hunk
dict | file
stringlengths 0
11.8M
| file_path
stringlengths 2
234
| label
int64 0
1
| commit_url
stringlengths 74
103
| dependency_score
sequencelengths 5
5
|
---|---|---|---|---|---|
{
"id": 0,
"code_window": [
"\n",
"\tcontainer, err := NewBuilder(runtime).Create(\n",
"\t\t&Config{\n",
"\t\t\tImage: GetTestImage(runtime).Id,\n",
"\t\t\tMemory: 33554432,\n",
"\t\t\tCmd: []string{\"/bin/cat\"},\n",
"\t\t\tOpenStdin: true,\n",
"\t\t},\n"
],
"labels": [
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\tCpuShares: 1024,\n"
],
"file_path": "commands_test.go",
"type": "add",
"edit_start_line_idx": 415
} | package docker
import (
"bufio"
"fmt"
"github.com/dotcloud/docker/rcli"
"io"
"io/ioutil"
"strings"
"testing"
"time"
)
func closeWrap(args ...io.Closer) error {
e := false
ret := fmt.Errorf("Error closing elements")
for _, c := range args {
if err := c.Close(); err != nil {
e = true
ret = fmt.Errorf("%s\n%s", ret, err)
}
}
if e {
return ret
}
return nil
}
func setTimeout(t *testing.T, msg string, d time.Duration, f func()) {
c := make(chan bool)
// Make sure we are not too long
go func() {
time.Sleep(d)
c <- true
}()
go func() {
f()
c <- false
}()
if <-c {
t.Fatal(msg)
}
}
func assertPipe(input, output string, r io.Reader, w io.Writer, count int) error {
for i := 0; i < count; i++ {
if _, err := w.Write([]byte(input)); err != nil {
return err
}
o, err := bufio.NewReader(r).ReadString('\n')
if err != nil {
return err
}
if strings.Trim(o, " \r\n") != output {
return fmt.Errorf("Unexpected output. Expected [%s], received [%s]", output, o)
}
}
return nil
}
func cmdWait(srv *Server, container *Container) error {
stdout, stdoutPipe := io.Pipe()
go func() {
srv.CmdWait(nil, stdoutPipe, container.Id)
}()
if _, err := bufio.NewReader(stdout).ReadString('\n'); err != nil {
return err
}
// Cleanup pipes
return closeWrap(stdout, stdoutPipe)
}
func cmdImages(srv *Server, args ...string) (string, error) {
stdout, stdoutPipe := io.Pipe()
go func() {
if err := srv.CmdImages(nil, stdoutPipe, args...); err != nil {
return
}
// force the pipe closed, so that the code below gets an EOF
stdoutPipe.Close()
}()
output, err := ioutil.ReadAll(stdout)
if err != nil {
return "", err
}
// Cleanup pipes
return string(output), closeWrap(stdout, stdoutPipe)
}
// TestImages checks that 'docker images' displays information correctly
func TestImages(t *testing.T) {
runtime, err := newTestRuntime()
if err != nil {
t.Fatal(err)
}
defer nuke(runtime)
srv := &Server{runtime: runtime}
output, err := cmdImages(srv)
if !strings.Contains(output, "REPOSITORY") {
t.Fatal("'images' should have a header")
}
if !strings.Contains(output, "docker-ut") {
t.Fatal("'images' should show the docker-ut image")
}
if !strings.Contains(output, "e9aa60c60128") {
t.Fatal("'images' should show the docker-ut image id")
}
output, err = cmdImages(srv, "-q")
if strings.Contains(output, "REPOSITORY") {
t.Fatal("'images -q' should not have a header")
}
if strings.Contains(output, "docker-ut") {
t.Fatal("'images' should not show the docker-ut image name")
}
if !strings.Contains(output, "e9aa60c60128") {
t.Fatal("'images' should show the docker-ut image id")
}
output, err = cmdImages(srv, "-viz")
if !strings.HasPrefix(output, "digraph docker {") {
t.Fatal("'images -v' should start with the dot header")
}
if !strings.HasSuffix(output, "}\n") {
t.Fatal("'images -v' should end with a '}'")
}
if !strings.Contains(output, "base -> \"e9aa60c60128\" [style=invis]") {
t.Fatal("'images -v' should have the docker-ut image id node")
}
// todo: add checks for -a
}
// TestRunHostname checks that 'docker run -h' correctly sets a custom hostname
func TestRunHostname(t *testing.T) {
runtime, err := newTestRuntime()
if err != nil {
t.Fatal(err)
}
defer nuke(runtime)
srv := &Server{runtime: runtime}
stdin, _ := io.Pipe()
stdout, stdoutPipe := io.Pipe()
c := make(chan struct{})
go func() {
if err := srv.CmdRun(stdin, rcli.NewDockerLocalConn(stdoutPipe), "-h", "foobar", GetTestImage(runtime).Id, "hostname"); err != nil {
t.Fatal(err)
}
close(c)
}()
cmdOutput, err := bufio.NewReader(stdout).ReadString('\n')
if err != nil {
t.Fatal(err)
}
if cmdOutput != "foobar\n" {
t.Fatalf("'hostname' should display '%s', not '%s'", "foobar\n", cmdOutput)
}
setTimeout(t, "CmdRun timed out", 2*time.Second, func() {
<-c
cmdWait(srv, srv.runtime.List()[0])
})
}
func TestRunExit(t *testing.T) {
runtime, err := newTestRuntime()
if err != nil {
t.Fatal(err)
}
defer nuke(runtime)
srv := &Server{runtime: runtime}
stdin, stdinPipe := io.Pipe()
stdout, stdoutPipe := io.Pipe()
c1 := make(chan struct{})
go func() {
srv.CmdRun(stdin, rcli.NewDockerLocalConn(stdoutPipe), "-i", GetTestImage(runtime).Id, "/bin/cat")
close(c1)
}()
setTimeout(t, "Read/Write assertion timed out", 2*time.Second, func() {
if err := assertPipe("hello\n", "hello", stdout, stdinPipe, 15); err != nil {
t.Fatal(err)
}
})
container := runtime.List()[0]
// Closing /bin/cat stdin, expect it to exit
p, err := container.StdinPipe()
if err != nil {
t.Fatal(err)
}
if err := p.Close(); err != nil {
t.Fatal(err)
}
// as the process exited, CmdRun must finish and unblock. Wait for it
setTimeout(t, "Waiting for CmdRun timed out", 2*time.Second, func() {
<-c1
cmdWait(srv, container)
})
// Make sure that the client has been disconnected
setTimeout(t, "The client should have been disconnected once the remote process exited.", 2*time.Second, func() {
// Expecting pipe i/o error, just check that read does not block
stdin.Read([]byte{})
})
// Cleanup pipes
if err := closeWrap(stdin, stdinPipe, stdout, stdoutPipe); err != nil {
t.Fatal(err)
}
}
// Expected behaviour: the process dies when the client disconnects
func TestRunDisconnect(t *testing.T) {
runtime, err := newTestRuntime()
if err != nil {
t.Fatal(err)
}
defer nuke(runtime)
srv := &Server{runtime: runtime}
stdin, stdinPipe := io.Pipe()
stdout, stdoutPipe := io.Pipe()
c1 := make(chan struct{})
go func() {
// We're simulating a disconnect so the return value doesn't matter. What matters is the
// fact that CmdRun returns.
srv.CmdRun(stdin, rcli.NewDockerLocalConn(stdoutPipe), "-i", GetTestImage(runtime).Id, "/bin/cat")
close(c1)
}()
setTimeout(t, "Read/Write assertion timed out", 2*time.Second, func() {
if err := assertPipe("hello\n", "hello", stdout, stdinPipe, 15); err != nil {
t.Fatal(err)
}
})
// Close pipes (simulate disconnect)
if err := closeWrap(stdin, stdinPipe, stdout, stdoutPipe); err != nil {
t.Fatal(err)
}
// as the pipes are close, we expect the process to die,
// therefore CmdRun to unblock. Wait for CmdRun
setTimeout(t, "Waiting for CmdRun timed out", 2*time.Second, func() {
<-c1
})
// Client disconnect after run -i should cause stdin to be closed, which should
// cause /bin/cat to exit.
setTimeout(t, "Waiting for /bin/cat to exit timed out", 2*time.Second, func() {
container := runtime.List()[0]
container.Wait()
if container.State.Running {
t.Fatalf("/bin/cat is still running after closing stdin")
}
})
}
// Expected behaviour: the process dies when the client disconnects
func TestRunDisconnectTty(t *testing.T) {
runtime, err := newTestRuntime()
if err != nil {
t.Fatal(err)
}
defer nuke(runtime)
srv := &Server{runtime: runtime}
stdin, stdinPipe := io.Pipe()
stdout, stdoutPipe := io.Pipe()
c1 := make(chan struct{})
go func() {
// We're simulating a disconnect so the return value doesn't matter. What matters is the
// fact that CmdRun returns.
srv.CmdRun(stdin, rcli.NewDockerLocalConn(stdoutPipe), "-i", "-t", GetTestImage(runtime).Id, "/bin/cat")
close(c1)
}()
setTimeout(t, "Waiting for the container to be started timed out", 2*time.Second, func() {
for {
// Client disconnect after run -i should keep stdin out in TTY mode
l := runtime.List()
if len(l) == 1 && l[0].State.Running {
break
}
time.Sleep(10 * time.Millisecond)
}
})
// Client disconnect after run -i should keep stdin out in TTY mode
container := runtime.List()[0]
setTimeout(t, "Read/Write assertion timed out", 2*time.Second, func() {
if err := assertPipe("hello\n", "hello", stdout, stdinPipe, 15); err != nil {
t.Fatal(err)
}
})
// Close pipes (simulate disconnect)
if err := closeWrap(stdin, stdinPipe, stdout, stdoutPipe); err != nil {
t.Fatal(err)
}
// In tty mode, we expect the process to stay alive even after client's stdin closes.
// Do not wait for run to finish
// Give some time to monitor to do his thing
container.WaitTimeout(500 * time.Millisecond)
if !container.State.Running {
t.Fatalf("/bin/cat should still be running after closing stdin (tty mode)")
}
}
// TestAttachStdin checks attaching to stdin without stdout and stderr.
// 'docker run -i -a stdin' should sends the client's stdin to the command,
// then detach from it and print the container id.
func TestRunAttachStdin(t *testing.T) {
runtime, err := newTestRuntime()
if err != nil {
t.Fatal(err)
}
defer nuke(runtime)
srv := &Server{runtime: runtime}
stdin, stdinPipe := io.Pipe()
stdout, stdoutPipe := io.Pipe()
ch := make(chan struct{})
go func() {
srv.CmdRun(stdin, rcli.NewDockerLocalConn(stdoutPipe), "-i", "-a", "stdin", GetTestImage(runtime).Id, "sh", "-c", "echo hello; cat")
close(ch)
}()
// Send input to the command, close stdin
setTimeout(t, "Write timed out", 2*time.Second, func() {
if _, err := stdinPipe.Write([]byte("hi there\n")); err != nil {
t.Fatal(err)
}
if err := stdinPipe.Close(); err != nil {
t.Fatal(err)
}
})
container := runtime.List()[0]
// Check output
cmdOutput, err := bufio.NewReader(stdout).ReadString('\n')
if err != nil {
t.Fatal(err)
}
if cmdOutput != container.ShortId()+"\n" {
t.Fatalf("Wrong output: should be '%s', not '%s'\n", container.ShortId()+"\n", cmdOutput)
}
// wait for CmdRun to return
setTimeout(t, "Waiting for CmdRun timed out", 2*time.Second, func() {
<-ch
})
setTimeout(t, "Waiting for command to exit timed out", 2*time.Second, func() {
container.Wait()
})
// Check logs
if cmdLogs, err := container.ReadLog("stdout"); err != nil {
t.Fatal(err)
} else {
if output, err := ioutil.ReadAll(cmdLogs); err != nil {
t.Fatal(err)
} else {
expectedLog := "hello\nhi there\n"
if string(output) != expectedLog {
t.Fatalf("Unexpected logs: should be '%s', not '%s'\n", expectedLog, output)
}
}
}
}
// Expected behaviour, the process stays alive when the client disconnects
func TestAttachDisconnect(t *testing.T) {
runtime, err := newTestRuntime()
if err != nil {
t.Fatal(err)
}
defer nuke(runtime)
srv := &Server{runtime: runtime}
container, err := NewBuilder(runtime).Create(
&Config{
Image: GetTestImage(runtime).Id,
Memory: 33554432,
Cmd: []string{"/bin/cat"},
OpenStdin: true,
},
)
if err != nil {
t.Fatal(err)
}
defer runtime.Destroy(container)
// Start the process
if err := container.Start(); err != nil {
t.Fatal(err)
}
stdin, stdinPipe := io.Pipe()
stdout, stdoutPipe := io.Pipe()
// Attach to it
c1 := make(chan struct{})
go func() {
// We're simulating a disconnect so the return value doesn't matter. What matters is the
// fact that CmdAttach returns.
srv.CmdAttach(stdin, rcli.NewDockerLocalConn(stdoutPipe), container.Id)
close(c1)
}()
setTimeout(t, "First read/write assertion timed out", 2*time.Second, func() {
if err := assertPipe("hello\n", "hello", stdout, stdinPipe, 15); err != nil {
t.Fatal(err)
}
})
// Close pipes (client disconnects)
if err := closeWrap(stdin, stdinPipe, stdout, stdoutPipe); err != nil {
t.Fatal(err)
}
// Wait for attach to finish, the client disconnected, therefore, Attach finished his job
setTimeout(t, "Waiting for CmdAttach timed out", 2*time.Second, func() {
<-c1
})
// We closed stdin, expect /bin/cat to still be running
// Wait a little bit to make sure container.monitor() did his thing
err = container.WaitTimeout(500 * time.Millisecond)
if err == nil || !container.State.Running {
t.Fatalf("/bin/cat is not running after closing stdin")
}
// Try to avoid the timeoout in destroy. Best effort, don't check error
cStdin, _ := container.StdinPipe()
cStdin.Close()
container.Wait()
}
| commands_test.go | 1 | https://github.com/moby/moby/commit/efd9becb78c82ddef07efb7e76e0100d7a712281 | [
0.9977356195449829,
0.1790858805179596,
0.00016245624283328652,
0.0012598945759236813,
0.3605245053768158
] |
{
"id": 0,
"code_window": [
"\n",
"\tcontainer, err := NewBuilder(runtime).Create(\n",
"\t\t&Config{\n",
"\t\t\tImage: GetTestImage(runtime).Id,\n",
"\t\t\tMemory: 33554432,\n",
"\t\t\tCmd: []string{\"/bin/cat\"},\n",
"\t\t\tOpenStdin: true,\n",
"\t\t},\n"
],
"labels": [
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\tCpuShares: 1024,\n"
],
"file_path": "commands_test.go",
"type": "add",
"edit_start_line_idx": 415
} | # -*- coding: utf-8 -*-
#
# Docker documentation build configuration file, created by
# sphinx-quickstart on Tue Mar 19 12:34:07 2013.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinxcontrib.httpdomain']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
#disable the parmalinks on headers, I find them really annoying
html_add_permalinks = None
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Docker'
copyright = u'2013, Team Docker'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'docker'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
html_theme_path = ['../theme']
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['static_files']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
html_show_sourcelink = False
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Dockerdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'Docker.tex', u'Docker Documentation',
u'Team Docker', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'docker', u'Docker Documentation',
[u'Team Docker'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'Docker', u'Docker Documentation',
u'Team Docker', 'Docker', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
| docs/sources/conf.py | 0 | https://github.com/moby/moby/commit/efd9becb78c82ddef07efb7e76e0100d7a712281 | [
0.00105696776881814,
0.00022157412604428828,
0.0001644209260120988,
0.00016753177624195814,
0.0001882349024526775
] |
{
"id": 0,
"code_window": [
"\n",
"\tcontainer, err := NewBuilder(runtime).Create(\n",
"\t\t&Config{\n",
"\t\t\tImage: GetTestImage(runtime).Id,\n",
"\t\t\tMemory: 33554432,\n",
"\t\t\tCmd: []string{\"/bin/cat\"},\n",
"\t\t\tOpenStdin: true,\n",
"\t\t},\n"
],
"labels": [
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\tCpuShares: 1024,\n"
],
"file_path": "commands_test.go",
"type": "add",
"edit_start_line_idx": 415
} | Docker documentation and website
================================
Documentation
-------------
This is your definite place to contribute to the docker documentation. The documentation is generated from the
.rst files under sources.
The folder also contains the other files to create the http://docker.io website, but you can generally ignore
most of those.
Installation
------------
* Work in your own fork of the code, we accept pull requests.
* Install sphinx: ``pip install sphinx``
* Install sphinx httpdomain contrib package ``sphinxcontrib-httpdomain``
* If pip is not available you can probably install it using your favorite package manager as **python-pip**
Usage
-----
* change the .rst files with your favorite editor to your liking
* run *make docs* to clean up old files and generate new ones
* your static website can now be found in the _build dir
* to preview what you have generated, cd into _build/html and then run 'python -m SimpleHTTPServer 8000'
Working using github's file editor
----------------------------------
Alternatively, for small changes and typo's you might want to use github's built in file editor. It allows
you to preview your changes right online. Just be carefull not to create many commits.
Images
------
When you need to add images, try to make them as small as possible (e.g. as gif).
Notes
-----
* The index.html and gettingstarted.html files are copied from the source dir to the output dir without modification.
So changes to those pages should be made directly in html
* For the template the css is compiled from less. When changes are needed they can be compiled using
lessc ``lessc main.less`` or watched using watch-lessc ``watch-lessc -i main.less -o main.css``
Guides on using sphinx
----------------------
* To make links to certain pages create a link target like so:
```
.. _hello_world:
Hello world
===========
This is.. (etc.)
```
The ``_hello_world:`` will make it possible to link to this position (page and marker) from all other pages.
* Notes, warnings and alarms
```
# a note (use when something is important)
.. note::
# a warning (orange)
.. warning::
# danger (red, use sparsely)
.. danger::
* Code examples
Start without $, so it's easy to copy and paste. | docs/README.md | 0 | https://github.com/moby/moby/commit/efd9becb78c82ddef07efb7e76e0100d7a712281 | [
0.00017443652905058116,
0.00016848917584866285,
0.0001653545186854899,
0.00016751736984588206,
0.0000031830134048504988
] |
{
"id": 0,
"code_window": [
"\n",
"\tcontainer, err := NewBuilder(runtime).Create(\n",
"\t\t&Config{\n",
"\t\t\tImage: GetTestImage(runtime).Id,\n",
"\t\t\tMemory: 33554432,\n",
"\t\t\tCmd: []string{\"/bin/cat\"},\n",
"\t\t\tOpenStdin: true,\n",
"\t\t},\n"
],
"labels": [
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\tCpuShares: 1024,\n"
],
"file_path": "commands_test.go",
"type": "add",
"edit_start_line_idx": 415
} | package docker
import (
"bytes"
"strconv"
"strings"
"syscall"
)
func getKernelVersion() (*KernelVersionInfo, error) {
var (
uts syscall.Utsname
flavor string
kernel, major, minor int
err error
)
if err := syscall.Uname(&uts); err != nil {
return nil, err
}
release := make([]byte, len(uts.Release))
i := 0
for _, c := range uts.Release {
release[i] = byte(c)
i++
}
// Remove the \x00 from the release for Atoi to parse correctly
release = release[:bytes.IndexByte(release, 0)]
tmp := strings.SplitN(string(release), "-", 2)
tmp2 := strings.SplitN(tmp[0], ".", 3)
if len(tmp2) > 0 {
kernel, err = strconv.Atoi(tmp2[0])
if err != nil {
return nil, err
}
}
if len(tmp2) > 1 {
major, err = strconv.Atoi(tmp2[1])
if err != nil {
return nil, err
}
}
if len(tmp2) > 2 {
minor, err = strconv.Atoi(tmp2[2])
if err != nil {
return nil, err
}
}
if len(tmp) == 2 {
flavor = tmp[1]
} else {
flavor = ""
}
return &KernelVersionInfo{
Kernel: kernel,
Major: major,
Minor: minor,
Flavor: flavor,
}, nil
}
| getKernelVersion_linux.go | 0 | https://github.com/moby/moby/commit/efd9becb78c82ddef07efb7e76e0100d7a712281 | [
0.00017505134746897966,
0.00016947180847637355,
0.00016427658556494862,
0.00017034000484272838,
0.0000036576618640538072
] |
{
"id": 1,
"code_window": [
"\tHostname string\n",
"\tUser string\n",
"\tMemory int64 // Memory limit (in bytes)\n",
"\tMemorySwap int64 // Total memory usage (memory + swap); set `-1' to disable swap\n",
"\tAttachStdin bool\n",
"\tAttachStdout bool\n",
"\tAttachStderr bool\n",
"\tPortSpecs []string\n"
],
"labels": [
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tCpuShares int64 // CPU shares (relative weight vs. other containers)\n"
],
"file_path": "container.go",
"type": "add",
"edit_start_line_idx": 58
} | package docker
import (
"encoding/json"
"fmt"
"github.com/dotcloud/docker/rcli"
"github.com/kr/pty"
"io"
"io/ioutil"
"log"
"os"
"os/exec"
"path"
"sort"
"strconv"
"strings"
"syscall"
"time"
)
type Container struct {
root string
Id string
Created time.Time
Path string
Args []string
Config *Config
State State
Image string
network *NetworkInterface
NetworkSettings *NetworkSettings
SysInitPath string
ResolvConfPath string
cmd *exec.Cmd
stdout *writeBroadcaster
stderr *writeBroadcaster
stdin io.ReadCloser
stdinPipe io.WriteCloser
ptyMaster io.Closer
runtime *Runtime
waitLock chan struct{}
Volumes map[string]string
}
type Config struct {
Hostname string
User string
Memory int64 // Memory limit (in bytes)
MemorySwap int64 // Total memory usage (memory + swap); set `-1' to disable swap
AttachStdin bool
AttachStdout bool
AttachStderr bool
PortSpecs []string
Tty bool // Attach standard streams to a tty, including stdin if it is not closed.
OpenStdin bool // Open stdin
StdinOnce bool // If true, close stdin after the 1 attached client disconnects.
Env []string
Cmd []string
Dns []string
Image string // Name of the image as it was passed by the operator (eg. could be symbolic)
Volumes map[string]struct{}
VolumesFrom string
}
func ParseRun(args []string, stdout io.Writer, capabilities *Capabilities) (*Config, error) {
cmd := rcli.Subcmd(stdout, "run", "[OPTIONS] IMAGE COMMAND [ARG...]", "Run a command in a new container")
if len(args) > 0 && args[0] != "--help" {
cmd.SetOutput(ioutil.Discard)
}
flHostname := cmd.String("h", "", "Container host name")
flUser := cmd.String("u", "", "Username or UID")
flDetach := cmd.Bool("d", false, "Detached mode: leave the container running in the background")
flAttach := NewAttachOpts()
cmd.Var(flAttach, "a", "Attach to stdin, stdout or stderr.")
flStdin := cmd.Bool("i", false, "Keep stdin open even if not attached")
flTty := cmd.Bool("t", false, "Allocate a pseudo-tty")
flMemory := cmd.Int64("m", 0, "Memory limit (in bytes)")
if *flMemory > 0 && !capabilities.MemoryLimit {
fmt.Fprintf(stdout, "WARNING: Your kernel does not support memory limit capabilities. Limitation discarded.\n")
*flMemory = 0
}
var flPorts ListOpts
cmd.Var(&flPorts, "p", "Expose a container's port to the host (use 'docker port' to see the actual mapping)")
var flEnv ListOpts
cmd.Var(&flEnv, "e", "Set environment variables")
var flDns ListOpts
cmd.Var(&flDns, "dns", "Set custom dns servers")
flVolumes := NewPathOpts()
cmd.Var(flVolumes, "v", "Attach a data volume")
flVolumesFrom := cmd.String("volumes-from", "", "Mount volumes from the specified container")
if err := cmd.Parse(args); err != nil {
return nil, err
}
if *flDetach && len(flAttach) > 0 {
return nil, fmt.Errorf("Conflicting options: -a and -d")
}
// If neither -d or -a are set, attach to everything by default
if len(flAttach) == 0 && !*flDetach {
if !*flDetach {
flAttach.Set("stdout")
flAttach.Set("stderr")
if *flStdin {
flAttach.Set("stdin")
}
}
}
parsedArgs := cmd.Args()
runCmd := []string{}
image := ""
if len(parsedArgs) >= 1 {
image = cmd.Arg(0)
}
if len(parsedArgs) > 1 {
runCmd = parsedArgs[1:]
}
config := &Config{
Hostname: *flHostname,
PortSpecs: flPorts,
User: *flUser,
Tty: *flTty,
OpenStdin: *flStdin,
Memory: *flMemory,
AttachStdin: flAttach.Get("stdin"),
AttachStdout: flAttach.Get("stdout"),
AttachStderr: flAttach.Get("stderr"),
Env: flEnv,
Cmd: runCmd,
Dns: flDns,
Image: image,
Volumes: flVolumes,
VolumesFrom: *flVolumesFrom,
}
if *flMemory > 0 && !capabilities.SwapLimit {
fmt.Fprintf(stdout, "WARNING: Your kernel does not support swap limit capabilities. Limitation discarded.\n")
config.MemorySwap = -1
}
// When allocating stdin in attached mode, close stdin at client disconnect
if config.OpenStdin && config.AttachStdin {
config.StdinOnce = true
}
return config, nil
}
type NetworkSettings struct {
IpAddress string
IpPrefixLen int
Gateway string
Bridge string
PortMapping map[string]string
}
// String returns a human-readable description of the port mapping defined in the settings
func (settings *NetworkSettings) PortMappingHuman() string {
var mapping []string
for private, public := range settings.PortMapping {
mapping = append(mapping, fmt.Sprintf("%s->%s", public, private))
}
sort.Strings(mapping)
return strings.Join(mapping, ", ")
}
// Inject the io.Reader at the given path. Note: do not close the reader
func (container *Container) Inject(file io.Reader, pth string) error {
// Make sure the directory exists
if err := os.MkdirAll(path.Join(container.rwPath(), path.Dir(pth)), 0755); err != nil {
return err
}
// FIXME: Handle permissions/already existing dest
dest, err := os.Create(path.Join(container.rwPath(), pth))
if err != nil {
return err
}
if _, err := io.Copy(dest, file); err != nil {
return err
}
return nil
}
func (container *Container) Cmd() *exec.Cmd {
return container.cmd
}
func (container *Container) When() time.Time {
return container.Created
}
func (container *Container) FromDisk() error {
data, err := ioutil.ReadFile(container.jsonPath())
if err != nil {
return err
}
// Load container settings
if err := json.Unmarshal(data, container); err != nil {
return err
}
return nil
}
func (container *Container) ToDisk() (err error) {
data, err := json.Marshal(container)
if err != nil {
return
}
return ioutil.WriteFile(container.jsonPath(), data, 0666)
}
func (container *Container) generateLXCConfig() error {
fo, err := os.Create(container.lxcConfigPath())
if err != nil {
return err
}
defer fo.Close()
if err := LxcTemplateCompiled.Execute(fo, container); err != nil {
return err
}
return nil
}
func (container *Container) startPty() error {
ptyMaster, ptySlave, err := pty.Open()
if err != nil {
return err
}
container.ptyMaster = ptyMaster
container.cmd.Stdout = ptySlave
container.cmd.Stderr = ptySlave
// Copy the PTYs to our broadcasters
go func() {
defer container.stdout.CloseWriters()
Debugf("[startPty] Begin of stdout pipe")
io.Copy(container.stdout, ptyMaster)
Debugf("[startPty] End of stdout pipe")
}()
// stdin
if container.Config.OpenStdin {
container.cmd.Stdin = ptySlave
container.cmd.SysProcAttr = &syscall.SysProcAttr{Setctty: true, Setsid: true}
go func() {
defer container.stdin.Close()
Debugf("[startPty] Begin of stdin pipe")
io.Copy(ptyMaster, container.stdin)
Debugf("[startPty] End of stdin pipe")
}()
}
if err := container.cmd.Start(); err != nil {
return err
}
ptySlave.Close()
return nil
}
func (container *Container) start() error {
container.cmd.Stdout = container.stdout
container.cmd.Stderr = container.stderr
if container.Config.OpenStdin {
stdin, err := container.cmd.StdinPipe()
if err != nil {
return err
}
go func() {
defer stdin.Close()
Debugf("Begin of stdin pipe [start]")
io.Copy(stdin, container.stdin)
Debugf("End of stdin pipe [start]")
}()
}
return container.cmd.Start()
}
func (container *Container) Attach(stdin io.ReadCloser, stdinCloser io.Closer, stdout io.Writer, stderr io.Writer) chan error {
var cStdout, cStderr io.ReadCloser
var nJobs int
errors := make(chan error, 3)
if stdin != nil && container.Config.OpenStdin {
nJobs += 1
if cStdin, err := container.StdinPipe(); err != nil {
errors <- err
} else {
go func() {
Debugf("[start] attach stdin\n")
defer Debugf("[end] attach stdin\n")
// No matter what, when stdin is closed (io.Copy unblock), close stdout and stderr
if cStdout != nil {
defer cStdout.Close()
}
if cStderr != nil {
defer cStderr.Close()
}
if container.Config.StdinOnce && !container.Config.Tty {
defer cStdin.Close()
}
if container.Config.Tty {
_, err = CopyEscapable(cStdin, stdin)
} else {
_, err = io.Copy(cStdin, stdin)
}
if err != nil {
Debugf("[error] attach stdin: %s\n", err)
}
// Discard error, expecting pipe error
errors <- nil
}()
}
}
if stdout != nil {
nJobs += 1
if p, err := container.StdoutPipe(); err != nil {
errors <- err
} else {
cStdout = p
go func() {
Debugf("[start] attach stdout\n")
defer Debugf("[end] attach stdout\n")
// If we are in StdinOnce mode, then close stdin
if container.Config.StdinOnce {
if stdin != nil {
defer stdin.Close()
}
if stdinCloser != nil {
defer stdinCloser.Close()
}
}
_, err := io.Copy(stdout, cStdout)
if err != nil {
Debugf("[error] attach stdout: %s\n", err)
}
errors <- err
}()
}
}
if stderr != nil {
nJobs += 1
if p, err := container.StderrPipe(); err != nil {
errors <- err
} else {
cStderr = p
go func() {
Debugf("[start] attach stderr\n")
defer Debugf("[end] attach stderr\n")
// If we are in StdinOnce mode, then close stdin
if container.Config.StdinOnce {
if stdin != nil {
defer stdin.Close()
}
if stdinCloser != nil {
defer stdinCloser.Close()
}
}
_, err := io.Copy(stderr, cStderr)
if err != nil {
Debugf("[error] attach stderr: %s\n", err)
}
errors <- err
}()
}
}
return Go(func() error {
if cStdout != nil {
defer cStdout.Close()
}
if cStderr != nil {
defer cStderr.Close()
}
// FIXME: how do clean up the stdin goroutine without the unwanted side effect
// of closing the passed stdin? Add an intermediary io.Pipe?
for i := 0; i < nJobs; i += 1 {
Debugf("Waiting for job %d/%d\n", i+1, nJobs)
if err := <-errors; err != nil {
Debugf("Job %d returned error %s. Aborting all jobs\n", i+1, err)
return err
}
Debugf("Job %d completed successfully\n", i+1)
}
Debugf("All jobs completed successfully\n")
return nil
})
}
func (container *Container) Start() error {
container.State.lock()
defer container.State.unlock()
if container.State.Running {
return fmt.Errorf("The container %s is already running.", container.Id)
}
if err := container.EnsureMounted(); err != nil {
return err
}
if err := container.allocateNetwork(); err != nil {
return err
}
// Make sure the config is compatible with the current kernel
if container.Config.Memory > 0 && !container.runtime.capabilities.MemoryLimit {
log.Printf("WARNING: Your kernel does not support memory limit capabilities. Limitation discarded.\n")
container.Config.Memory = 0
}
if container.Config.Memory > 0 && !container.runtime.capabilities.SwapLimit {
log.Printf("WARNING: Your kernel does not support swap limit capabilities. Limitation discarded.\n")
container.Config.MemorySwap = -1
}
container.Volumes = make(map[string]string)
// Create the requested volumes volumes
for volPath := range container.Config.Volumes {
if c, err := container.runtime.volumes.Create(nil, container, "", "", nil); err != nil {
return err
} else {
if err := os.MkdirAll(path.Join(container.RootfsPath(), volPath), 0755); err != nil {
return nil
}
container.Volumes[volPath] = c.Id
}
}
if container.Config.VolumesFrom != "" {
c := container.runtime.Get(container.Config.VolumesFrom)
if c == nil {
return fmt.Errorf("Container %s not found. Impossible to mount its volumes", container.Id)
}
for volPath, id := range c.Volumes {
if _, exists := container.Volumes[volPath]; exists {
return fmt.Errorf("The requested volume %s overlap one of the volume of the container %s", volPath, c.Id)
}
if err := os.MkdirAll(path.Join(container.RootfsPath(), volPath), 0755); err != nil {
return nil
}
container.Volumes[volPath] = id
}
}
if err := container.generateLXCConfig(); err != nil {
return err
}
params := []string{
"-n", container.Id,
"-f", container.lxcConfigPath(),
"--",
"/sbin/init",
}
// Networking
params = append(params, "-g", container.network.Gateway.String())
// User
if container.Config.User != "" {
params = append(params, "-u", container.Config.User)
}
if container.Config.Tty {
params = append(params, "-e", "TERM=xterm")
}
// Setup environment
params = append(params,
"-e", "HOME=/",
"-e", "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin",
)
for _, elem := range container.Config.Env {
params = append(params, "-e", elem)
}
// Program
params = append(params, "--", container.Path)
params = append(params, container.Args...)
container.cmd = exec.Command("lxc-start", params...)
// Setup logging of stdout and stderr to disk
if err := container.runtime.LogToDisk(container.stdout, container.logPath("stdout")); err != nil {
return err
}
if err := container.runtime.LogToDisk(container.stderr, container.logPath("stderr")); err != nil {
return err
}
var err error
if container.Config.Tty {
err = container.startPty()
} else {
err = container.start()
}
if err != nil {
return err
}
// FIXME: save state on disk *first*, then converge
// this way disk state is used as a journal, eg. we can restore after crash etc.
container.State.setRunning(container.cmd.Process.Pid)
// Init the lock
container.waitLock = make(chan struct{})
container.ToDisk()
go container.monitor()
return nil
}
func (container *Container) Run() error {
if err := container.Start(); err != nil {
return err
}
container.Wait()
return nil
}
func (container *Container) Output() (output []byte, err error) {
pipe, err := container.StdoutPipe()
if err != nil {
return nil, err
}
defer pipe.Close()
if err := container.Start(); err != nil {
return nil, err
}
output, err = ioutil.ReadAll(pipe)
container.Wait()
return output, err
}
// StdinPipe() returns a pipe connected to the standard input of the container's
// active process.
//
func (container *Container) StdinPipe() (io.WriteCloser, error) {
return container.stdinPipe, nil
}
func (container *Container) StdoutPipe() (io.ReadCloser, error) {
reader, writer := io.Pipe()
container.stdout.AddWriter(writer)
return newBufReader(reader), nil
}
func (container *Container) StderrPipe() (io.ReadCloser, error) {
reader, writer := io.Pipe()
container.stderr.AddWriter(writer)
return newBufReader(reader), nil
}
func (container *Container) allocateNetwork() error {
iface, err := container.runtime.networkManager.Allocate()
if err != nil {
return err
}
container.NetworkSettings.PortMapping = make(map[string]string)
for _, spec := range container.Config.PortSpecs {
if nat, err := iface.AllocatePort(spec); err != nil {
iface.Release()
return err
} else {
container.NetworkSettings.PortMapping[strconv.Itoa(nat.Backend)] = strconv.Itoa(nat.Frontend)
}
}
container.network = iface
container.NetworkSettings.Bridge = container.runtime.networkManager.bridgeIface
container.NetworkSettings.IpAddress = iface.IPNet.IP.String()
container.NetworkSettings.IpPrefixLen, _ = iface.IPNet.Mask.Size()
container.NetworkSettings.Gateway = iface.Gateway.String()
return nil
}
func (container *Container) releaseNetwork() {
container.network.Release()
container.network = nil
container.NetworkSettings = &NetworkSettings{}
}
// FIXME: replace this with a control socket within docker-init
func (container *Container) waitLxc() error {
for {
if output, err := exec.Command("lxc-info", "-n", container.Id).CombinedOutput(); err != nil {
return err
} else {
if !strings.Contains(string(output), "RUNNING") {
return nil
}
}
time.Sleep(500 * time.Millisecond)
}
return nil
}
func (container *Container) monitor() {
// Wait for the program to exit
Debugf("Waiting for process")
// If the command does not exists, try to wait via lxc
if container.cmd == nil {
if err := container.waitLxc(); err != nil {
Debugf("%s: Process: %s", container.Id, err)
}
} else {
if err := container.cmd.Wait(); err != nil {
// Discard the error as any signals or non 0 returns will generate an error
Debugf("%s: Process: %s", container.Id, err)
}
}
Debugf("Process finished")
var exitCode int = -1
if container.cmd != nil {
exitCode = container.cmd.ProcessState.Sys().(syscall.WaitStatus).ExitStatus()
}
// Cleanup
container.releaseNetwork()
if container.Config.OpenStdin {
if err := container.stdin.Close(); err != nil {
Debugf("%s: Error close stdin: %s", container.Id, err)
}
}
if err := container.stdout.CloseWriters(); err != nil {
Debugf("%s: Error close stdout: %s", container.Id, err)
}
if err := container.stderr.CloseWriters(); err != nil {
Debugf("%s: Error close stderr: %s", container.Id, err)
}
if container.ptyMaster != nil {
if err := container.ptyMaster.Close(); err != nil {
Debugf("%s: Error closing Pty master: %s", container.Id, err)
}
}
if err := container.Unmount(); err != nil {
log.Printf("%v: Failed to umount filesystem: %v", container.Id, err)
}
// Re-create a brand new stdin pipe once the container exited
if container.Config.OpenStdin {
container.stdin, container.stdinPipe = io.Pipe()
}
// Report status back
container.State.setStopped(exitCode)
// Release the lock
close(container.waitLock)
if err := container.ToDisk(); err != nil {
// FIXME: there is a race condition here which causes this to fail during the unit tests.
// If another goroutine was waiting for Wait() to return before removing the container's root
// from the filesystem... At this point it may already have done so.
// This is because State.setStopped() has already been called, and has caused Wait()
// to return.
// FIXME: why are we serializing running state to disk in the first place?
//log.Printf("%s: Failed to dump configuration to the disk: %s", container.Id, err)
}
}
func (container *Container) kill() error {
if !container.State.Running {
return nil
}
// Sending SIGKILL to the process via lxc
output, err := exec.Command("lxc-kill", "-n", container.Id, "9").CombinedOutput()
if err != nil {
log.Printf("error killing container %s (%s, %s)", container.Id, output, err)
}
// 2. Wait for the process to die, in last resort, try to kill the process directly
if err := container.WaitTimeout(10 * time.Second); err != nil {
if container.cmd == nil {
return fmt.Errorf("lxc-kill failed, impossible to kill the container %s", container.Id)
}
log.Printf("Container %s failed to exit within 10 seconds of lxc SIGKILL - trying direct SIGKILL", container.Id)
if err := container.cmd.Process.Kill(); err != nil {
return err
}
}
// Wait for the container to be actually stopped
container.Wait()
return nil
}
func (container *Container) Kill() error {
container.State.lock()
defer container.State.unlock()
if !container.State.Running {
return nil
}
return container.kill()
}
func (container *Container) Stop(seconds int) error {
container.State.lock()
defer container.State.unlock()
if !container.State.Running {
return nil
}
// 1. Send a SIGTERM
if output, err := exec.Command("lxc-kill", "-n", container.Id, "15").CombinedOutput(); err != nil {
log.Print(string(output))
log.Print("Failed to send SIGTERM to the process, force killing")
if err := container.kill(); err != nil {
return err
}
}
// 2. Wait for the process to exit on its own
if err := container.WaitTimeout(time.Duration(seconds) * time.Second); err != nil {
log.Printf("Container %v failed to exit within %d seconds of SIGTERM - using the force", container.Id, seconds)
if err := container.kill(); err != nil {
return err
}
}
return nil
}
func (container *Container) Restart(seconds int) error {
if err := container.Stop(seconds); err != nil {
return err
}
if err := container.Start(); err != nil {
return err
}
return nil
}
// Wait blocks until the container stops running, then returns its exit code.
func (container *Container) Wait() int {
<-container.waitLock
return container.State.ExitCode
}
func (container *Container) ExportRw() (Archive, error) {
return Tar(container.rwPath(), Uncompressed)
}
func (container *Container) RwChecksum() (string, error) {
rwData, err := Tar(container.rwPath(), Xz)
if err != nil {
return "", err
}
return HashData(rwData)
}
func (container *Container) Export() (Archive, error) {
if err := container.EnsureMounted(); err != nil {
return nil, err
}
return Tar(container.RootfsPath(), Uncompressed)
}
func (container *Container) WaitTimeout(timeout time.Duration) error {
done := make(chan bool)
go func() {
container.Wait()
done <- true
}()
select {
case <-time.After(timeout):
return fmt.Errorf("Timed Out")
case <-done:
return nil
}
panic("unreachable")
}
func (container *Container) EnsureMounted() error {
if mounted, err := container.Mounted(); err != nil {
return err
} else if mounted {
return nil
}
return container.Mount()
}
func (container *Container) Mount() error {
image, err := container.GetImage()
if err != nil {
return err
}
return image.Mount(container.RootfsPath(), container.rwPath())
}
func (container *Container) Changes() ([]Change, error) {
image, err := container.GetImage()
if err != nil {
return nil, err
}
return image.Changes(container.rwPath())
}
func (container *Container) GetImage() (*Image, error) {
if container.runtime == nil {
return nil, fmt.Errorf("Can't get image of unregistered container")
}
return container.runtime.graph.Get(container.Image)
}
func (container *Container) Mounted() (bool, error) {
return Mounted(container.RootfsPath())
}
func (container *Container) Unmount() error {
return Unmount(container.RootfsPath())
}
// ShortId returns a shorthand version of the container's id for convenience.
// A collision with other container shorthands is very unlikely, but possible.
// In case of a collision a lookup with Runtime.Get() will fail, and the caller
// will need to use a langer prefix, or the full-length container Id.
func (container *Container) ShortId() string {
return TruncateId(container.Id)
}
func (container *Container) logPath(name string) string {
return path.Join(container.root, fmt.Sprintf("%s-%s.log", container.Id, name))
}
func (container *Container) ReadLog(name string) (io.Reader, error) {
return os.Open(container.logPath(name))
}
func (container *Container) jsonPath() string {
return path.Join(container.root, "config.json")
}
func (container *Container) lxcConfigPath() string {
return path.Join(container.root, "config.lxc")
}
// This method must be exported to be used from the lxc template
func (container *Container) RootfsPath() string {
return path.Join(container.root, "rootfs")
}
func (container *Container) GetVolumes() (map[string]string, error) {
ret := make(map[string]string)
for volPath, id := range container.Volumes {
volume, err := container.runtime.volumes.Get(id)
if err != nil {
return nil, err
}
root, err := volume.root()
if err != nil {
return nil, err
}
ret[volPath] = path.Join(root, "layer")
}
return ret, nil
}
func (container *Container) rwPath() string {
return path.Join(container.root, "rw")
}
func validateId(id string) error {
if id == "" {
return fmt.Errorf("Invalid empty id")
}
return nil
}
| container.go | 1 | https://github.com/moby/moby/commit/efd9becb78c82ddef07efb7e76e0100d7a712281 | [
0.9961612224578857,
0.049245405942201614,
0.0001609969767741859,
0.0001718254789011553,
0.18910323083400726
] |
{
"id": 1,
"code_window": [
"\tHostname string\n",
"\tUser string\n",
"\tMemory int64 // Memory limit (in bytes)\n",
"\tMemorySwap int64 // Total memory usage (memory + swap); set `-1' to disable swap\n",
"\tAttachStdin bool\n",
"\tAttachStdout bool\n",
"\tAttachStderr bool\n",
"\tPortSpecs []string\n"
],
"labels": [
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tCpuShares int64 // CPU shares (relative weight vs. other containers)\n"
],
"file_path": "container.go",
"type": "add",
"edit_start_line_idx": 58
} | =========================================
``tag`` -- Tag an image into a repository
=========================================
::
Usage: docker tag [OPTIONS] IMAGE REPOSITORY [TAG]
Tag an image into a repository
-f=false: Force
| docs/sources/commandline/command/tag.rst | 0 | https://github.com/moby/moby/commit/efd9becb78c82ddef07efb7e76e0100d7a712281 | [
0.00017295966972596943,
0.0001714530517347157,
0.00016994643374346197,
0.0001714530517347157,
0.0000015066179912537336
] |
{
"id": 1,
"code_window": [
"\tHostname string\n",
"\tUser string\n",
"\tMemory int64 // Memory limit (in bytes)\n",
"\tMemorySwap int64 // Total memory usage (memory + swap); set `-1' to disable swap\n",
"\tAttachStdin bool\n",
"\tAttachStdout bool\n",
"\tAttachStderr bool\n",
"\tPortSpecs []string\n"
],
"labels": [
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tCpuShares int64 // CPU shares (relative weight vs. other containers)\n"
],
"file_path": "container.go",
"type": "add",
"edit_start_line_idx": 58
} |
/* ==========================================================================
Author's custom styles
========================================================================== */
@import "variables.less";
@red: crimson;
@lightblue: #118;
@lightred: #e11;
@darkblue: #292E33;
@borderGray: #888;
.red {
background-color: red;
}
.blue {
background-color: blue;
}
.orange {
background-color: orange;
}
.gray {
background-color: grey;
}
body {
padding-top: 58px;
font-family: Arial, Helvetica, sans-serif;
}
h1, h2, h3, h4 {
font-family: Arial, Helvetica, sans-serif;
// font-weight: bold;
font-weight: 900;
}
/* ===================
Top navigation
===================== */
.navbar {
z-index: 999;
.nav {
// float: right;
li a{
padding: 22px 15px 22px;
}
}
.brand {
padding: 13px 10px 13px 28px ;
// padding-left: 30px;
}
background-color: white;
}
.navbar-dotcloud .container {
border-bottom: 2px @black solid;
}
/*
* Responsive YouTube, Vimeo, Embed, and HTML5 Videos with CSS
* http://www.jonsuh.com
*
* Copyright (c) 2012 Jonathan Suh
* Free to use under the MIT license.
* http://www.opensource.org/licenses/mit-license.php
*/
.js-video {
height: 0;
padding-top: 25px;
padding-bottom: 67.5%;
margin-bottom: 10px;
position: relative;
overflow: hidden;
}
.js-video.vimeo {
padding-top: 0;
}
.js-video.widescreen {
padding-bottom: 57.25%;
}
.js-video embed, .js-video iframe, .js-video object, .js-video video {
top: 0;
left: 0;
width: 100%;
height: 100%;
position: absolute;
}
/* Responsive */
@media (max-width: 767px) {
.js-video {
padding-top: 0;
}
}
/* button style from http://charliepark.org/bootstrap_buttons/ */
.btn-custom {
background-color: hsl(0, 0%, 16%) !important;
background-repeat: repeat-x;
filter: progid:DXImageTransform.Microsoft.gradient(startColorstr="#515151", endColorstr="#282828");
background-image: -khtml-gradient(linear, left top, left bottom, from(#515151), to(#282828));
background-image: -moz-linear-gradient(top, #515151, #282828);
background-image: -ms-linear-gradient(top, #515151, #282828);
background-image: -webkit-gradient(linear, left top, left bottom, color-stop(0%, #515151), color-stop(100%, #282828));
background-image: -webkit-linear-gradient(top, #515151, #282828);
background-image: -o-linear-gradient(top, #515151, #282828);
background-image: linear-gradient(#515151, #282828);
border-color: #282828 #282828 hsl(0, 0%, 12%);
color: #fff !important;
text-shadow: 0 -1px 0 rgba(0, 0, 0, 0.26);
-webkit-font-smoothing: antialiased;
}
/* ===================
Page title bar
===================== */
h1.pageheader {
color: @white;
font-size: 20px;
font-family: "Arial Black", Tahoma, sans-serif;
margin: 8px;
margin-left: 22px;
}
/* ===================
Hero unit
===================== */
section.header {
margin-top:0;
}
.hero-unit {
background-color: @darkblue;
h5 {
color: @white;
}
.subtitle {
}
}
/* ===================
Main content layout
===================== */
.contentblock {
margin-top: 20px;
border-width: 3px;
// border-color: #E00;
// border-style:solid;
// border-color: @borderGray;
// box-sizing: border-box;
background-color: @grayLighter;
box-sizing: content-box;
padding: 20px;
}
.section img {
margin: 15px 15px 15px 0;
border: 2px solid gray;
}
.admonition {
padding: 10px;
border: 1px solid grey;
margin-bottom: 10px;
margin-top: 10px;
-webkit-border-radius: 4px;
-moz-border-radius: 4px;
border-radius: 4px;
}
.admonition .admonition-title {
font-weight: bold;
}
.admonition.note {
background-color: rgb(241, 235, 186);
}
.admonition.warning {
background-color: rgb(238, 217, 175);
}
.admonition.danger {
background-color: rgb(233, 188, 171);
}
/* ===================
left navigation
===================== */
.dotcloudsidebar {
// background-color: #ee3;
// border: 1px red dotted;
float: left;
height: 100%;
top: 0px;
bottom: 0px;
position: relative;
// margin: 0px;
min-height: 100%;
margin-top: 78px;
margin-bottom: 22px;
}
.sidebar {
// font-family: "Maven Pro";
font-weight: normal;
// margin-top: 38px;
float: left;
// width: 220px;
min-height: 475px;
// margin-bottom: 28px;
// padding-bottom: 120px;
background: #ececec;
border-left: 1px solid #bbbbbb;
border-right: 1px solid #cccccc;
position: relative;
ul {
padding: 0px;
li {
font-size: 14px;
// list-style: none;
list-style-type: none;
list-style-position: outside;
list-style-image: none;
margin-left: -25px;
padding: 0px;
a {
display: block;
color: #443331;
outline: 1px solid #dddddd;
padding: 12px 12px 10px 12px;
margin-top: 1px;
background-color: #d2d2d2;
}
.toctree-l1, .toctree-l2 {
}
.toctree-l1 {
font-size: larger;
a {
background-color: rgb(223, 223, 223);
}
.current {
font-weight: bold;
}
// margin-left: -25px;
}
.toctree-l2 {
a {
padding-left: 18px;
background-color: rgb(255, 255, 255);
}
.current {
font-weight: bold;
}
}
.toctree-l3 {
font-size: smaller;
a {
padding-left: 36px;
background-color: rgb(255, 255, 255);
}
.current {
font-weight: bold;
}
}
}
}
}
.brand img {
height: 38px;
margin-left: -6px;
}
.border-box {
box-sizing: border-box;
padding: 20px;
background-color: @lightblue;
color: white;
}
.titlebar {
background-color: @black;
margin-top: 0px;
margin-bottom: 20px;
min-height: 40px;
color: white;
// box-sizing: border-box;
padding-top: 8px;
padding-bottom: 8px;
}
.footer {
border-top: 2px solid black;
// background-color: #d2d2d2;
margin-top: 15px;
margin-bottom: 20px;
min-height: 40px;
padding-left: 8px;
padding-top: 8px;
padding-bottom: 8px;
}
/* This is the default */
.span6.with-padding {
background-color: @lightblue;
height: 200px;
color: white;
padding: 10px;
}
#global {
min-height: 500px;
}
/* =======================
Row size
======================= */
.row1 {
background-color: @grayLight;
height: 100%;
position: relative;
}
/* =======================
Social footer
======================= */
.social .twitter, .social .github, .social .googleplus {
background: url("../img/footer-links.png") no-repeat transparent;
display: inline-block;
height: 35px;
overflow: hidden;
text-indent: 9999px;
width: 35px;
margin-right: 10px;
}
.social .twitter {
background-position: 0px 2px;
}
.social .github {
background-position: -59px 2px;
}
/* =======================
Media size overrides
======================= */
/* Large desktop */
@media (min-width: 1200px) {
.span6.with-padding {
background-color: @red;
width: (@gridColumnWidth1200 * 6) + (@gridGutterWidth1200 * 5) - @gridGutterWidth1200;
padding: @gridGutterWidth1200/2;
}
}
/* Normal desktop */
@media (min-width: 980px) and (max-width: 1199px) {
.span6.with-padding {
background-color: @lightred;
width: (@gridColumnWidth * 6) + (@gridGutterWidth * 5) - @gridGutterWidth;
padding: @gridGutterWidth/2;
}
}
/* Portrait tablet to landscape and desktop */
@media (min-width: 768px) and (max-width: 979px) {
body {
padding-top: 0px;
}
.span6.with-padding {
background-color: @darkblue;
width: (@gridColumnWidth768 * 6) + (@gridGutterWidth768 * 5) - @gridGutterWidth768;
padding: @gridGutterWidth768/2;
}
}
/* Landscape phone to portrait tablet */
@media (max-width: 767px) {
body {
padding-top: 0px;
}
#global {
/* TODO: Fix this to be relative to the navigation size */
padding-top: 600px;
}
}
/* Landscape phones and down */
@media (max-width: 480px) {
} | docs/theme/docker/static/css/main.less | 0 | https://github.com/moby/moby/commit/efd9becb78c82ddef07efb7e76e0100d7a712281 | [
0.0001778463483788073,
0.00017324829241260886,
0.00016353235696442425,
0.0001736994308885187,
0.000003182724640282686
] |
{
"id": 1,
"code_window": [
"\tHostname string\n",
"\tUser string\n",
"\tMemory int64 // Memory limit (in bytes)\n",
"\tMemorySwap int64 // Total memory usage (memory + swap); set `-1' to disable swap\n",
"\tAttachStdin bool\n",
"\tAttachStdout bool\n",
"\tAttachStderr bool\n",
"\tPortSpecs []string\n"
],
"labels": [
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tCpuShares int64 // CPU shares (relative weight vs. other containers)\n"
],
"file_path": "container.go",
"type": "add",
"edit_start_line_idx": 58
} | # -*- mode: ruby -*-
# vi: set ft=ruby :
$BUILDBOT_IP = '192.168.33.31'
def v10(config)
config.vm.box = 'debian'
config.vm.share_folder 'v-data', '/data/docker', File.dirname(__FILE__) + '/../..'
config.vm.network :hostonly, $BUILDBOT_IP
# Install debian packaging dependencies and create debian packages
config.vm.provision :shell, :inline => 'apt-get -qq update; apt-get install -y debhelper autotools-dev golang'
config.vm.provision :shell, :inline => 'cd /data/docker/packaging/debian; make debian'
end
Vagrant::VERSION < '1.1.0' and Vagrant::Config.run do |config|
v10(config)
end
Vagrant::VERSION >= '1.1.0' and Vagrant.configure('1') do |config|
v10(config)
end
| packaging/debian/Vagrantfile | 0 | https://github.com/moby/moby/commit/efd9becb78c82ddef07efb7e76e0100d7a712281 | [
0.0001747136702761054,
0.0001714849058771506,
0.0001681230787653476,
0.00017161799769382924,
0.0000026922427878162125
] |
{
"id": 2,
"code_window": [
"\tif *flMemory > 0 && !capabilities.MemoryLimit {\n",
"\t\tfmt.Fprintf(stdout, \"WARNING: Your kernel does not support memory limit capabilities. Limitation discarded.\\n\")\n",
"\t\t*flMemory = 0\n",
"\t}\n",
"\n",
"\tvar flPorts ListOpts\n",
"\tcmd.Var(&flPorts, \"p\", \"Expose a container's port to the host (use 'docker port' to see the actual mapping)\")\n",
"\n",
"\tvar flEnv ListOpts\n",
"\tcmd.Var(&flEnv, \"e\", \"Set environment variables\")\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tflCpuShares := cmd.Int64(\"c\", 1024, \"CPU shares (relative weight)\")\n",
"\n"
],
"file_path": "container.go",
"type": "add",
"edit_start_line_idx": 93
} | package docker
import (
"bytes"
"crypto/sha256"
"encoding/hex"
"errors"
"fmt"
"github.com/dotcloud/docker/rcli"
"index/suffixarray"
"io"
"io/ioutil"
"net/http"
"os"
"os/exec"
"path/filepath"
"runtime"
"strings"
"sync"
"time"
)
// Go is a basic promise implementation: it wraps calls a function in a goroutine,
// and returns a channel which will later return the function's return value.
func Go(f func() error) chan error {
ch := make(chan error)
go func() {
ch <- f()
}()
return ch
}
// Request a given URL and return an io.Reader
func Download(url string, stderr io.Writer) (*http.Response, error) {
var resp *http.Response
var err error = nil
if resp, err = http.Get(url); err != nil {
return nil, err
}
if resp.StatusCode >= 400 {
return nil, errors.New("Got HTTP status code >= 400: " + resp.Status)
}
return resp, nil
}
// Debug function, if the debug flag is set, then display. Do nothing otherwise
// If Docker is in damon mode, also send the debug info on the socket
func Debugf(format string, a ...interface{}) {
if os.Getenv("DEBUG") != "" {
// Retrieve the stack infos
_, file, line, ok := runtime.Caller(1)
if !ok {
file = "<unknown>"
line = -1
} else {
file = file[strings.LastIndex(file, "/")+1:]
}
fmt.Fprintf(os.Stderr, fmt.Sprintf("[debug] %s:%d %s\n", file, line, format), a...)
if rcli.CLIENT_SOCKET != nil {
fmt.Fprintf(rcli.CLIENT_SOCKET, fmt.Sprintf("[debug] %s:%d %s\n", file, line, format), a...)
}
}
}
// Reader with progress bar
type progressReader struct {
reader io.ReadCloser // Stream to read from
output io.Writer // Where to send progress bar to
readTotal int // Expected stream length (bytes)
readProgress int // How much has been read so far (bytes)
lastUpdate int // How many bytes read at least update
template string // Template to print. Default "%v/%v (%v)"
}
func (r *progressReader) Read(p []byte) (n int, err error) {
read, err := io.ReadCloser(r.reader).Read(p)
r.readProgress += read
updateEvery := 4096
if r.readTotal > 0 {
// Only update progress for every 1% read
if increment := int(0.01 * float64(r.readTotal)); increment > updateEvery {
updateEvery = increment
}
}
if r.readProgress-r.lastUpdate > updateEvery || err != nil {
if r.readTotal > 0 {
fmt.Fprintf(r.output, r.template+"\r", r.readProgress, r.readTotal, fmt.Sprintf("%.0f%%", float64(r.readProgress)/float64(r.readTotal)*100))
} else {
fmt.Fprintf(r.output, r.template+"\r", r.readProgress, "?", "n/a")
}
r.lastUpdate = r.readProgress
}
// Send newline when complete
if err != nil {
fmt.Fprintf(r.output, "\n")
}
return read, err
}
func (r *progressReader) Close() error {
return io.ReadCloser(r.reader).Close()
}
func ProgressReader(r io.ReadCloser, size int, output io.Writer, template string) *progressReader {
if template == "" {
template = "%v/%v (%v)"
}
return &progressReader{r, output, size, 0, 0, template}
}
// HumanDuration returns a human-readable approximation of a duration
// (eg. "About a minute", "4 hours ago", etc.)
func HumanDuration(d time.Duration) string {
if seconds := int(d.Seconds()); seconds < 1 {
return "Less than a second"
} else if seconds < 60 {
return fmt.Sprintf("%d seconds", seconds)
} else if minutes := int(d.Minutes()); minutes == 1 {
return "About a minute"
} else if minutes < 60 {
return fmt.Sprintf("%d minutes", minutes)
} else if hours := int(d.Hours()); hours == 1 {
return "About an hour"
} else if hours < 48 {
return fmt.Sprintf("%d hours", hours)
} else if hours < 24*7*2 {
return fmt.Sprintf("%d days", hours/24)
} else if hours < 24*30*3 {
return fmt.Sprintf("%d weeks", hours/24/7)
} else if hours < 24*365*2 {
return fmt.Sprintf("%d months", hours/24/30)
}
return fmt.Sprintf("%d years", d.Hours()/24/365)
}
func Trunc(s string, maxlen int) string {
if len(s) <= maxlen {
return s
}
return s[:maxlen]
}
// Figure out the absolute path of our own binary
func SelfPath() string {
path, err := exec.LookPath(os.Args[0])
if err != nil {
panic(err)
}
path, err = filepath.Abs(path)
if err != nil {
panic(err)
}
return path
}
type nopWriter struct {
}
func (w *nopWriter) Write(buf []byte) (int, error) {
return len(buf), nil
}
type nopWriteCloser struct {
io.Writer
}
func (w *nopWriteCloser) Close() error { return nil }
func NopWriteCloser(w io.Writer) io.WriteCloser {
return &nopWriteCloser{w}
}
type bufReader struct {
buf *bytes.Buffer
reader io.Reader
err error
l sync.Mutex
wait sync.Cond
}
func newBufReader(r io.Reader) *bufReader {
reader := &bufReader{
buf: &bytes.Buffer{},
reader: r,
}
reader.wait.L = &reader.l
go reader.drain()
return reader
}
func (r *bufReader) drain() {
buf := make([]byte, 1024)
for {
n, err := r.reader.Read(buf)
r.l.Lock()
if err != nil {
r.err = err
} else {
r.buf.Write(buf[0:n])
}
r.wait.Signal()
r.l.Unlock()
if err != nil {
break
}
}
}
func (r *bufReader) Read(p []byte) (n int, err error) {
r.l.Lock()
defer r.l.Unlock()
for {
n, err = r.buf.Read(p)
if n > 0 {
return n, err
}
if r.err != nil {
return 0, r.err
}
r.wait.Wait()
}
panic("unreachable")
}
func (r *bufReader) Close() error {
closer, ok := r.reader.(io.ReadCloser)
if !ok {
return nil
}
return closer.Close()
}
type writeBroadcaster struct {
mu sync.Mutex
writers map[io.WriteCloser]struct{}
}
func (w *writeBroadcaster) AddWriter(writer io.WriteCloser) {
w.mu.Lock()
w.writers[writer] = struct{}{}
w.mu.Unlock()
}
// FIXME: Is that function used?
// FIXME: This relies on the concrete writer type used having equality operator
func (w *writeBroadcaster) RemoveWriter(writer io.WriteCloser) {
w.mu.Lock()
delete(w.writers, writer)
w.mu.Unlock()
}
func (w *writeBroadcaster) Write(p []byte) (n int, err error) {
w.mu.Lock()
defer w.mu.Unlock()
for writer := range w.writers {
if n, err := writer.Write(p); err != nil || n != len(p) {
// On error, evict the writer
delete(w.writers, writer)
}
}
return len(p), nil
}
func (w *writeBroadcaster) CloseWriters() error {
w.mu.Lock()
defer w.mu.Unlock()
for writer := range w.writers {
writer.Close()
}
w.writers = make(map[io.WriteCloser]struct{})
return nil
}
func newWriteBroadcaster() *writeBroadcaster {
return &writeBroadcaster{writers: make(map[io.WriteCloser]struct{})}
}
func getTotalUsedFds() int {
if fds, err := ioutil.ReadDir(fmt.Sprintf("/proc/%d/fd", os.Getpid())); err != nil {
Debugf("Error opening /proc/%d/fd: %s", os.Getpid(), err)
} else {
return len(fds)
}
return -1
}
// TruncIndex allows the retrieval of string identifiers by any of their unique prefixes.
// This is used to retrieve image and container IDs by more convenient shorthand prefixes.
type TruncIndex struct {
index *suffixarray.Index
ids map[string]bool
bytes []byte
}
func NewTruncIndex() *TruncIndex {
return &TruncIndex{
index: suffixarray.New([]byte{' '}),
ids: make(map[string]bool),
bytes: []byte{' '},
}
}
func (idx *TruncIndex) Add(id string) error {
if strings.Contains(id, " ") {
return fmt.Errorf("Illegal character: ' '")
}
if _, exists := idx.ids[id]; exists {
return fmt.Errorf("Id already exists: %s", id)
}
idx.ids[id] = true
idx.bytes = append(idx.bytes, []byte(id+" ")...)
idx.index = suffixarray.New(idx.bytes)
return nil
}
func (idx *TruncIndex) Delete(id string) error {
if _, exists := idx.ids[id]; !exists {
return fmt.Errorf("No such id: %s", id)
}
before, after, err := idx.lookup(id)
if err != nil {
return err
}
delete(idx.ids, id)
idx.bytes = append(idx.bytes[:before], idx.bytes[after:]...)
idx.index = suffixarray.New(idx.bytes)
return nil
}
func (idx *TruncIndex) lookup(s string) (int, int, error) {
offsets := idx.index.Lookup([]byte(" "+s), -1)
//log.Printf("lookup(%s): %v (index bytes: '%s')\n", s, offsets, idx.index.Bytes())
if offsets == nil || len(offsets) == 0 || len(offsets) > 1 {
return -1, -1, fmt.Errorf("No such id: %s", s)
}
offsetBefore := offsets[0] + 1
offsetAfter := offsetBefore + strings.Index(string(idx.bytes[offsetBefore:]), " ")
return offsetBefore, offsetAfter, nil
}
func (idx *TruncIndex) Get(s string) (string, error) {
before, after, err := idx.lookup(s)
//log.Printf("Get(%s) bytes=|%s| before=|%d| after=|%d|\n", s, idx.bytes, before, after)
if err != nil {
return "", err
}
return string(idx.bytes[before:after]), err
}
// TruncateId returns a shorthand version of a string identifier for convenience.
// A collision with other shorthands is very unlikely, but possible.
// In case of a collision a lookup with TruncIndex.Get() will fail, and the caller
// will need to use a langer prefix, or the full-length Id.
func TruncateId(id string) string {
shortLen := 12
if len(id) < shortLen {
shortLen = len(id)
}
return id[:shortLen]
}
// Code c/c from io.Copy() modified to handle escape sequence
func CopyEscapable(dst io.Writer, src io.ReadCloser) (written int64, err error) {
buf := make([]byte, 32*1024)
for {
nr, er := src.Read(buf)
if nr > 0 {
// ---- Docker addition
// char 16 is C-p
if nr == 1 && buf[0] == 16 {
nr, er = src.Read(buf)
// char 17 is C-q
if nr == 1 && buf[0] == 17 {
if err := src.Close(); err != nil {
return 0, err
}
return 0, io.EOF
}
}
// ---- End of docker
nw, ew := dst.Write(buf[0:nr])
if nw > 0 {
written += int64(nw)
}
if ew != nil {
err = ew
break
}
if nr != nw {
err = io.ErrShortWrite
break
}
}
if er == io.EOF {
break
}
if er != nil {
err = er
break
}
}
return written, err
}
func HashData(src io.Reader) (string, error) {
h := sha256.New()
if _, err := io.Copy(h, src); err != nil {
return "", err
}
return "sha256:" + hex.EncodeToString(h.Sum(nil)), nil
}
type KernelVersionInfo struct {
Kernel int
Major int
Minor int
Flavor string
}
// FIXME: this doens't build on Darwin
func GetKernelVersion() (*KernelVersionInfo, error) {
return getKernelVersion()
}
func (k *KernelVersionInfo) String() string {
return fmt.Sprintf("%d.%d.%d-%s", k.Kernel, k.Major, k.Minor, k.Flavor)
}
// Compare two KernelVersionInfo struct.
// Returns -1 if a < b, = if a == b, 1 it a > b
func CompareKernelVersion(a, b *KernelVersionInfo) int {
if a.Kernel < b.Kernel {
return -1
} else if a.Kernel > b.Kernel {
return 1
}
if a.Major < b.Major {
return -1
} else if a.Major > b.Major {
return 1
}
if a.Minor < b.Minor {
return -1
} else if a.Minor > b.Minor {
return 1
}
return 0
}
func FindCgroupMountpoint(cgroupType string) (string, error) {
output, err := ioutil.ReadFile("/proc/mounts")
if err != nil {
return "", err
}
// /proc/mounts has 6 fields per line, one mount per line, e.g.
// cgroup /sys/fs/cgroup/devices cgroup rw,relatime,devices 0 0
for _, line := range strings.Split(string(output), "\n") {
parts := strings.Split(line, " ")
if len(parts) == 6 && parts[2] == "cgroup" {
for _, opt := range strings.Split(parts[3], ",") {
if opt == cgroupType {
return parts[1], nil
}
}
}
}
return "", fmt.Errorf("cgroup mountpoint not found for %s", cgroupType)
}
// Compare two Config struct. Do not compare the "Image" nor "Hostname" fields
// If OpenStdin is set, then it differs
func CompareConfig(a, b *Config) bool {
if a == nil || b == nil ||
a.OpenStdin || b.OpenStdin {
return false
}
if a.AttachStdout != b.AttachStdout ||
a.AttachStderr != b.AttachStderr ||
a.User != b.User ||
a.Memory != b.Memory ||
a.MemorySwap != b.MemorySwap ||
a.OpenStdin != b.OpenStdin ||
a.Tty != b.Tty {
return false
}
if len(a.Cmd) != len(b.Cmd) ||
len(a.Dns) != len(b.Dns) ||
len(a.Env) != len(b.Env) ||
len(a.PortSpecs) != len(b.PortSpecs) {
return false
}
for i := 0; i < len(a.Cmd); i++ {
if a.Cmd[i] != b.Cmd[i] {
return false
}
}
for i := 0; i < len(a.Dns); i++ {
if a.Dns[i] != b.Dns[i] {
return false
}
}
for i := 0; i < len(a.Env); i++ {
if a.Env[i] != b.Env[i] {
return false
}
}
for i := 0; i < len(a.PortSpecs); i++ {
if a.PortSpecs[i] != b.PortSpecs[i] {
return false
}
}
return true
}
| utils.go | 1 | https://github.com/moby/moby/commit/efd9becb78c82ddef07efb7e76e0100d7a712281 | [
0.0010029342956840992,
0.00020323645730968565,
0.0001611152256373316,
0.00017043392290361226,
0.00013955074246041477
] |
{
"id": 2,
"code_window": [
"\tif *flMemory > 0 && !capabilities.MemoryLimit {\n",
"\t\tfmt.Fprintf(stdout, \"WARNING: Your kernel does not support memory limit capabilities. Limitation discarded.\\n\")\n",
"\t\t*flMemory = 0\n",
"\t}\n",
"\n",
"\tvar flPorts ListOpts\n",
"\tcmd.Var(&flPorts, \"p\", \"Expose a container's port to the host (use 'docker port' to see the actual mapping)\")\n",
"\n",
"\tvar flEnv ListOpts\n",
"\tcmd.Var(&flEnv, \"e\", \"Set environment variables\")\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tflCpuShares := cmd.Int64(\"c\", 1024, \"CPU shares (relative weight)\")\n",
"\n"
],
"file_path": "container.go",
"type": "add",
"edit_start_line_idx": 93
} | package auth
import (
"testing"
)
func TestEncodeAuth(t *testing.T) {
newAuthConfig := &AuthConfig{Username: "ken", Password: "test", Email: "[email protected]"}
authStr := EncodeAuth(newAuthConfig)
decAuthConfig, err := DecodeAuth(authStr)
if err != nil {
t.Fatal(err)
}
if newAuthConfig.Username != decAuthConfig.Username {
t.Fatal("Encode Username doesn't match decoded Username")
}
if newAuthConfig.Password != decAuthConfig.Password {
t.Fatal("Encode Password doesn't match decoded Password")
}
if authStr != "a2VuOnRlc3Q=" {
t.Fatal("AuthString encoding isn't correct.")
}
}
| auth/auth_test.go | 0 | https://github.com/moby/moby/commit/efd9becb78c82ddef07efb7e76e0100d7a712281 | [
0.00017227594798896462,
0.00017072285118047148,
0.00016968653653748333,
0.00017020605446305126,
0.0000011185014727743692
] |
{
"id": 2,
"code_window": [
"\tif *flMemory > 0 && !capabilities.MemoryLimit {\n",
"\t\tfmt.Fprintf(stdout, \"WARNING: Your kernel does not support memory limit capabilities. Limitation discarded.\\n\")\n",
"\t\t*flMemory = 0\n",
"\t}\n",
"\n",
"\tvar flPorts ListOpts\n",
"\tcmd.Var(&flPorts, \"p\", \"Expose a container's port to the host (use 'docker port' to see the actual mapping)\")\n",
"\n",
"\tvar flEnv ListOpts\n",
"\tcmd.Var(&flEnv, \"e\", \"Set environment variables\")\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tflCpuShares := cmd.Int64(\"c\", 1024, \"CPU shares (relative weight)\")\n",
"\n"
],
"file_path": "container.go",
"type": "add",
"edit_start_line_idx": 93
} | 8
| packaging/ubuntu/compat | 0 | https://github.com/moby/moby/commit/efd9becb78c82ddef07efb7e76e0100d7a712281 | [
0.00017143989680334926,
0.00017143989680334926,
0.00017143989680334926,
0.00017143989680334926,
0
] |
{
"id": 2,
"code_window": [
"\tif *flMemory > 0 && !capabilities.MemoryLimit {\n",
"\t\tfmt.Fprintf(stdout, \"WARNING: Your kernel does not support memory limit capabilities. Limitation discarded.\\n\")\n",
"\t\t*flMemory = 0\n",
"\t}\n",
"\n",
"\tvar flPorts ListOpts\n",
"\tcmd.Var(&flPorts, \"p\", \"Expose a container's port to the host (use 'docker port' to see the actual mapping)\")\n",
"\n",
"\tvar flEnv ListOpts\n",
"\tcmd.Var(&flEnv, \"e\", \"Set environment variables\")\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tflCpuShares := cmd.Int64(\"c\", 1024, \"CPU shares (relative weight)\")\n",
"\n"
],
"file_path": "container.go",
"type": "add",
"edit_start_line_idx": 93
} | :title: docker Registry documentation
:description: Documentation for docker Registry and Registry API
:keywords: docker, registry, api, index
Registry
========
Contents:
.. toctree::
:maxdepth: 2
api
| docs/sources/registry/index.rst | 0 | https://github.com/moby/moby/commit/efd9becb78c82ddef07efb7e76e0100d7a712281 | [
0.00017380544159095734,
0.00017320725601166487,
0.00017260908498428762,
0.00017320725601166487,
5.98178303334862e-7
] |
{
"id": 3,
"code_window": [
"\t\tPortSpecs: flPorts,\n",
"\t\tUser: *flUser,\n",
"\t\tTty: *flTty,\n",
"\t\tOpenStdin: *flStdin,\n",
"\t\tMemory: *flMemory,\n",
"\t\tAttachStdin: flAttach.Get(\"stdin\"),\n",
"\t\tAttachStdout: flAttach.Get(\"stdout\"),\n",
"\t\tAttachStderr: flAttach.Get(\"stderr\"),\n",
"\t\tEnv: flEnv,\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\tCpuShares: *flCpuShares,\n"
],
"file_path": "container.go",
"type": "add",
"edit_start_line_idx": 139
} | package docker
import (
"bytes"
"crypto/sha256"
"encoding/hex"
"errors"
"fmt"
"github.com/dotcloud/docker/rcli"
"index/suffixarray"
"io"
"io/ioutil"
"net/http"
"os"
"os/exec"
"path/filepath"
"runtime"
"strings"
"sync"
"time"
)
// Go is a basic promise implementation: it wraps calls a function in a goroutine,
// and returns a channel which will later return the function's return value.
func Go(f func() error) chan error {
ch := make(chan error)
go func() {
ch <- f()
}()
return ch
}
// Request a given URL and return an io.Reader
func Download(url string, stderr io.Writer) (*http.Response, error) {
var resp *http.Response
var err error = nil
if resp, err = http.Get(url); err != nil {
return nil, err
}
if resp.StatusCode >= 400 {
return nil, errors.New("Got HTTP status code >= 400: " + resp.Status)
}
return resp, nil
}
// Debug function, if the debug flag is set, then display. Do nothing otherwise
// If Docker is in damon mode, also send the debug info on the socket
func Debugf(format string, a ...interface{}) {
if os.Getenv("DEBUG") != "" {
// Retrieve the stack infos
_, file, line, ok := runtime.Caller(1)
if !ok {
file = "<unknown>"
line = -1
} else {
file = file[strings.LastIndex(file, "/")+1:]
}
fmt.Fprintf(os.Stderr, fmt.Sprintf("[debug] %s:%d %s\n", file, line, format), a...)
if rcli.CLIENT_SOCKET != nil {
fmt.Fprintf(rcli.CLIENT_SOCKET, fmt.Sprintf("[debug] %s:%d %s\n", file, line, format), a...)
}
}
}
// Reader with progress bar
type progressReader struct {
reader io.ReadCloser // Stream to read from
output io.Writer // Where to send progress bar to
readTotal int // Expected stream length (bytes)
readProgress int // How much has been read so far (bytes)
lastUpdate int // How many bytes read at least update
template string // Template to print. Default "%v/%v (%v)"
}
func (r *progressReader) Read(p []byte) (n int, err error) {
read, err := io.ReadCloser(r.reader).Read(p)
r.readProgress += read
updateEvery := 4096
if r.readTotal > 0 {
// Only update progress for every 1% read
if increment := int(0.01 * float64(r.readTotal)); increment > updateEvery {
updateEvery = increment
}
}
if r.readProgress-r.lastUpdate > updateEvery || err != nil {
if r.readTotal > 0 {
fmt.Fprintf(r.output, r.template+"\r", r.readProgress, r.readTotal, fmt.Sprintf("%.0f%%", float64(r.readProgress)/float64(r.readTotal)*100))
} else {
fmt.Fprintf(r.output, r.template+"\r", r.readProgress, "?", "n/a")
}
r.lastUpdate = r.readProgress
}
// Send newline when complete
if err != nil {
fmt.Fprintf(r.output, "\n")
}
return read, err
}
func (r *progressReader) Close() error {
return io.ReadCloser(r.reader).Close()
}
func ProgressReader(r io.ReadCloser, size int, output io.Writer, template string) *progressReader {
if template == "" {
template = "%v/%v (%v)"
}
return &progressReader{r, output, size, 0, 0, template}
}
// HumanDuration returns a human-readable approximation of a duration
// (eg. "About a minute", "4 hours ago", etc.)
func HumanDuration(d time.Duration) string {
if seconds := int(d.Seconds()); seconds < 1 {
return "Less than a second"
} else if seconds < 60 {
return fmt.Sprintf("%d seconds", seconds)
} else if minutes := int(d.Minutes()); minutes == 1 {
return "About a minute"
} else if minutes < 60 {
return fmt.Sprintf("%d minutes", minutes)
} else if hours := int(d.Hours()); hours == 1 {
return "About an hour"
} else if hours < 48 {
return fmt.Sprintf("%d hours", hours)
} else if hours < 24*7*2 {
return fmt.Sprintf("%d days", hours/24)
} else if hours < 24*30*3 {
return fmt.Sprintf("%d weeks", hours/24/7)
} else if hours < 24*365*2 {
return fmt.Sprintf("%d months", hours/24/30)
}
return fmt.Sprintf("%d years", d.Hours()/24/365)
}
func Trunc(s string, maxlen int) string {
if len(s) <= maxlen {
return s
}
return s[:maxlen]
}
// Figure out the absolute path of our own binary
func SelfPath() string {
path, err := exec.LookPath(os.Args[0])
if err != nil {
panic(err)
}
path, err = filepath.Abs(path)
if err != nil {
panic(err)
}
return path
}
type nopWriter struct {
}
func (w *nopWriter) Write(buf []byte) (int, error) {
return len(buf), nil
}
type nopWriteCloser struct {
io.Writer
}
func (w *nopWriteCloser) Close() error { return nil }
func NopWriteCloser(w io.Writer) io.WriteCloser {
return &nopWriteCloser{w}
}
type bufReader struct {
buf *bytes.Buffer
reader io.Reader
err error
l sync.Mutex
wait sync.Cond
}
func newBufReader(r io.Reader) *bufReader {
reader := &bufReader{
buf: &bytes.Buffer{},
reader: r,
}
reader.wait.L = &reader.l
go reader.drain()
return reader
}
func (r *bufReader) drain() {
buf := make([]byte, 1024)
for {
n, err := r.reader.Read(buf)
r.l.Lock()
if err != nil {
r.err = err
} else {
r.buf.Write(buf[0:n])
}
r.wait.Signal()
r.l.Unlock()
if err != nil {
break
}
}
}
func (r *bufReader) Read(p []byte) (n int, err error) {
r.l.Lock()
defer r.l.Unlock()
for {
n, err = r.buf.Read(p)
if n > 0 {
return n, err
}
if r.err != nil {
return 0, r.err
}
r.wait.Wait()
}
panic("unreachable")
}
func (r *bufReader) Close() error {
closer, ok := r.reader.(io.ReadCloser)
if !ok {
return nil
}
return closer.Close()
}
type writeBroadcaster struct {
mu sync.Mutex
writers map[io.WriteCloser]struct{}
}
func (w *writeBroadcaster) AddWriter(writer io.WriteCloser) {
w.mu.Lock()
w.writers[writer] = struct{}{}
w.mu.Unlock()
}
// FIXME: Is that function used?
// FIXME: This relies on the concrete writer type used having equality operator
func (w *writeBroadcaster) RemoveWriter(writer io.WriteCloser) {
w.mu.Lock()
delete(w.writers, writer)
w.mu.Unlock()
}
func (w *writeBroadcaster) Write(p []byte) (n int, err error) {
w.mu.Lock()
defer w.mu.Unlock()
for writer := range w.writers {
if n, err := writer.Write(p); err != nil || n != len(p) {
// On error, evict the writer
delete(w.writers, writer)
}
}
return len(p), nil
}
func (w *writeBroadcaster) CloseWriters() error {
w.mu.Lock()
defer w.mu.Unlock()
for writer := range w.writers {
writer.Close()
}
w.writers = make(map[io.WriteCloser]struct{})
return nil
}
func newWriteBroadcaster() *writeBroadcaster {
return &writeBroadcaster{writers: make(map[io.WriteCloser]struct{})}
}
func getTotalUsedFds() int {
if fds, err := ioutil.ReadDir(fmt.Sprintf("/proc/%d/fd", os.Getpid())); err != nil {
Debugf("Error opening /proc/%d/fd: %s", os.Getpid(), err)
} else {
return len(fds)
}
return -1
}
// TruncIndex allows the retrieval of string identifiers by any of their unique prefixes.
// This is used to retrieve image and container IDs by more convenient shorthand prefixes.
type TruncIndex struct {
index *suffixarray.Index
ids map[string]bool
bytes []byte
}
func NewTruncIndex() *TruncIndex {
return &TruncIndex{
index: suffixarray.New([]byte{' '}),
ids: make(map[string]bool),
bytes: []byte{' '},
}
}
func (idx *TruncIndex) Add(id string) error {
if strings.Contains(id, " ") {
return fmt.Errorf("Illegal character: ' '")
}
if _, exists := idx.ids[id]; exists {
return fmt.Errorf("Id already exists: %s", id)
}
idx.ids[id] = true
idx.bytes = append(idx.bytes, []byte(id+" ")...)
idx.index = suffixarray.New(idx.bytes)
return nil
}
func (idx *TruncIndex) Delete(id string) error {
if _, exists := idx.ids[id]; !exists {
return fmt.Errorf("No such id: %s", id)
}
before, after, err := idx.lookup(id)
if err != nil {
return err
}
delete(idx.ids, id)
idx.bytes = append(idx.bytes[:before], idx.bytes[after:]...)
idx.index = suffixarray.New(idx.bytes)
return nil
}
func (idx *TruncIndex) lookup(s string) (int, int, error) {
offsets := idx.index.Lookup([]byte(" "+s), -1)
//log.Printf("lookup(%s): %v (index bytes: '%s')\n", s, offsets, idx.index.Bytes())
if offsets == nil || len(offsets) == 0 || len(offsets) > 1 {
return -1, -1, fmt.Errorf("No such id: %s", s)
}
offsetBefore := offsets[0] + 1
offsetAfter := offsetBefore + strings.Index(string(idx.bytes[offsetBefore:]), " ")
return offsetBefore, offsetAfter, nil
}
func (idx *TruncIndex) Get(s string) (string, error) {
before, after, err := idx.lookup(s)
//log.Printf("Get(%s) bytes=|%s| before=|%d| after=|%d|\n", s, idx.bytes, before, after)
if err != nil {
return "", err
}
return string(idx.bytes[before:after]), err
}
// TruncateId returns a shorthand version of a string identifier for convenience.
// A collision with other shorthands is very unlikely, but possible.
// In case of a collision a lookup with TruncIndex.Get() will fail, and the caller
// will need to use a langer prefix, or the full-length Id.
func TruncateId(id string) string {
shortLen := 12
if len(id) < shortLen {
shortLen = len(id)
}
return id[:shortLen]
}
// Code c/c from io.Copy() modified to handle escape sequence
func CopyEscapable(dst io.Writer, src io.ReadCloser) (written int64, err error) {
buf := make([]byte, 32*1024)
for {
nr, er := src.Read(buf)
if nr > 0 {
// ---- Docker addition
// char 16 is C-p
if nr == 1 && buf[0] == 16 {
nr, er = src.Read(buf)
// char 17 is C-q
if nr == 1 && buf[0] == 17 {
if err := src.Close(); err != nil {
return 0, err
}
return 0, io.EOF
}
}
// ---- End of docker
nw, ew := dst.Write(buf[0:nr])
if nw > 0 {
written += int64(nw)
}
if ew != nil {
err = ew
break
}
if nr != nw {
err = io.ErrShortWrite
break
}
}
if er == io.EOF {
break
}
if er != nil {
err = er
break
}
}
return written, err
}
func HashData(src io.Reader) (string, error) {
h := sha256.New()
if _, err := io.Copy(h, src); err != nil {
return "", err
}
return "sha256:" + hex.EncodeToString(h.Sum(nil)), nil
}
type KernelVersionInfo struct {
Kernel int
Major int
Minor int
Flavor string
}
// FIXME: this doens't build on Darwin
func GetKernelVersion() (*KernelVersionInfo, error) {
return getKernelVersion()
}
func (k *KernelVersionInfo) String() string {
return fmt.Sprintf("%d.%d.%d-%s", k.Kernel, k.Major, k.Minor, k.Flavor)
}
// Compare two KernelVersionInfo struct.
// Returns -1 if a < b, = if a == b, 1 it a > b
func CompareKernelVersion(a, b *KernelVersionInfo) int {
if a.Kernel < b.Kernel {
return -1
} else if a.Kernel > b.Kernel {
return 1
}
if a.Major < b.Major {
return -1
} else if a.Major > b.Major {
return 1
}
if a.Minor < b.Minor {
return -1
} else if a.Minor > b.Minor {
return 1
}
return 0
}
func FindCgroupMountpoint(cgroupType string) (string, error) {
output, err := ioutil.ReadFile("/proc/mounts")
if err != nil {
return "", err
}
// /proc/mounts has 6 fields per line, one mount per line, e.g.
// cgroup /sys/fs/cgroup/devices cgroup rw,relatime,devices 0 0
for _, line := range strings.Split(string(output), "\n") {
parts := strings.Split(line, " ")
if len(parts) == 6 && parts[2] == "cgroup" {
for _, opt := range strings.Split(parts[3], ",") {
if opt == cgroupType {
return parts[1], nil
}
}
}
}
return "", fmt.Errorf("cgroup mountpoint not found for %s", cgroupType)
}
// Compare two Config struct. Do not compare the "Image" nor "Hostname" fields
// If OpenStdin is set, then it differs
func CompareConfig(a, b *Config) bool {
if a == nil || b == nil ||
a.OpenStdin || b.OpenStdin {
return false
}
if a.AttachStdout != b.AttachStdout ||
a.AttachStderr != b.AttachStderr ||
a.User != b.User ||
a.Memory != b.Memory ||
a.MemorySwap != b.MemorySwap ||
a.OpenStdin != b.OpenStdin ||
a.Tty != b.Tty {
return false
}
if len(a.Cmd) != len(b.Cmd) ||
len(a.Dns) != len(b.Dns) ||
len(a.Env) != len(b.Env) ||
len(a.PortSpecs) != len(b.PortSpecs) {
return false
}
for i := 0; i < len(a.Cmd); i++ {
if a.Cmd[i] != b.Cmd[i] {
return false
}
}
for i := 0; i < len(a.Dns); i++ {
if a.Dns[i] != b.Dns[i] {
return false
}
}
for i := 0; i < len(a.Env); i++ {
if a.Env[i] != b.Env[i] {
return false
}
}
for i := 0; i < len(a.PortSpecs); i++ {
if a.PortSpecs[i] != b.PortSpecs[i] {
return false
}
}
return true
}
| utils.go | 1 | https://github.com/moby/moby/commit/efd9becb78c82ddef07efb7e76e0100d7a712281 | [
0.006526533048599958,
0.00033596545108594,
0.00016050557314883918,
0.00017060643585864455,
0.000875251367688179
] |
{
"id": 3,
"code_window": [
"\t\tPortSpecs: flPorts,\n",
"\t\tUser: *flUser,\n",
"\t\tTty: *flTty,\n",
"\t\tOpenStdin: *flStdin,\n",
"\t\tMemory: *flMemory,\n",
"\t\tAttachStdin: flAttach.Get(\"stdin\"),\n",
"\t\tAttachStdout: flAttach.Get(\"stdout\"),\n",
"\t\tAttachStderr: flAttach.Get(\"stderr\"),\n",
"\t\tEnv: flEnv,\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\tCpuShares: *flCpuShares,\n"
],
"file_path": "container.go",
"type": "add",
"edit_start_line_idx": 139
} | #!/bin/sh
# Start docker
/sbin/start docker
| packaging/ubuntu/lxc-docker.postinst | 0 | https://github.com/moby/moby/commit/efd9becb78c82ddef07efb7e76e0100d7a712281 | [
0.0001715460093691945,
0.0001715460093691945,
0.0001715460093691945,
0.0001715460093691945,
0
] |
{
"id": 3,
"code_window": [
"\t\tPortSpecs: flPorts,\n",
"\t\tUser: *flUser,\n",
"\t\tTty: *flTty,\n",
"\t\tOpenStdin: *flStdin,\n",
"\t\tMemory: *flMemory,\n",
"\t\tAttachStdin: flAttach.Get(\"stdin\"),\n",
"\t\tAttachStdout: flAttach.Get(\"stdout\"),\n",
"\t\tAttachStderr: flAttach.Get(\"stderr\"),\n",
"\t\tEnv: flEnv,\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\tCpuShares: *flCpuShares,\n"
],
"file_path": "container.go",
"type": "add",
"edit_start_line_idx": 139
} | FAQ
===
Most frequently asked questions.
--------------------------------
1. **How much does Docker cost?**
Docker is 100% free, it is open source, so you can use it without paying.
2. **What open source license are you using?**
We are using the Apache License Version 2.0, see it here: https://github.com/dotcloud/docker/blob/master/LICENSE
3. **Does Docker run on Mac OS X or Windows?**
Not at this time, Docker currently only runs on Linux, but you can use VirtualBox to run Docker in a virtual machine on your box, and get the best of both worlds. Check out the MacOSX_ and Windows_ intallation guides.
4. **How do containers compare to virtual machines?**
They are complementary. VMs are best used to allocate chunks of hardware resources. Containers operate at the process level, which makes them very lightweight and perfect as a unit of software delivery.
5. **Can I help by adding some questions and answers?**
Definitely! You can fork `the repo`_ and edit the documentation sources.
42. **Where can I find more answers?**
You can find more answers on:
* `IRC: docker on freenode`_
* `Github`_
* `Ask questions on Stackoverflow`_
* `Join the conversation on Twitter`_
.. _Windows: ../documentation/installation/windows.html
.. _MacOSX: ../documentation/installation/macos.html
.. _the repo: http://www.github.com/dotcloud/docker
.. _IRC\: docker on freenode: irc://chat.freenode.net#docker
.. _Github: http://www.github.com/dotcloud/docker
.. _Ask questions on Stackoverflow: http://stackoverflow.com/search?q=docker
.. _Join the conversation on Twitter: http://twitter.com/getdocker
Looking for something else to read? Checkout the :ref:`hello_world` example.
| docs/sources/faq.rst | 0 | https://github.com/moby/moby/commit/efd9becb78c82ddef07efb7e76e0100d7a712281 | [
0.0001758542057359591,
0.00017231100355274975,
0.00016940331261139363,
0.0001711255026748404,
0.0000023676882392464904
] |
{
"id": 3,
"code_window": [
"\t\tPortSpecs: flPorts,\n",
"\t\tUser: *flUser,\n",
"\t\tTty: *flTty,\n",
"\t\tOpenStdin: *flStdin,\n",
"\t\tMemory: *flMemory,\n",
"\t\tAttachStdin: flAttach.Get(\"stdin\"),\n",
"\t\tAttachStdout: flAttach.Get(\"stdout\"),\n",
"\t\tAttachStderr: flAttach.Get(\"stderr\"),\n",
"\t\tEnv: flEnv,\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\tCpuShares: *flCpuShares,\n"
],
"file_path": "container.go",
"type": "add",
"edit_start_line_idx": 139
} | Static files dir
================
Files you put in /sources/static_files/ will be copied to the web visible /_static/
Be careful not to override pre-existing static files from the template.
Generally, layout related files should go in the /theme directory.
If you want to add images to your particular documentation page. Just put them next to
your .rst source file and reference them relatively. | docs/sources/static_files/README.md | 0 | https://github.com/moby/moby/commit/efd9becb78c82ddef07efb7e76e0100d7a712281 | [
0.00017570720228832215,
0.00017206996562890708,
0.00016843274352140725,
0.00017206996562890708,
0.000003637229383457452
] |
{
"id": 4,
"code_window": [
"\tcontainer, err := NewBuilder(runtime).Create(\n",
"\t\t&Config{\n",
"\t\t\tImage: GetTestImage(runtime).Id,\n",
"\t\t\tMemory: 33554432,\n",
"\t\t\tCmd: []string{\"/bin/cat\"},\n",
"\t\t\tOpenStdin: true,\n",
"\t\t},\n"
],
"labels": [
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\tCpuShares: 1000,\n"
],
"file_path": "container_test.go",
"type": "add",
"edit_start_line_idx": 392
} | package docker
import (
"encoding/json"
"fmt"
"github.com/dotcloud/docker/rcli"
"github.com/kr/pty"
"io"
"io/ioutil"
"log"
"os"
"os/exec"
"path"
"sort"
"strconv"
"strings"
"syscall"
"time"
)
type Container struct {
root string
Id string
Created time.Time
Path string
Args []string
Config *Config
State State
Image string
network *NetworkInterface
NetworkSettings *NetworkSettings
SysInitPath string
ResolvConfPath string
cmd *exec.Cmd
stdout *writeBroadcaster
stderr *writeBroadcaster
stdin io.ReadCloser
stdinPipe io.WriteCloser
ptyMaster io.Closer
runtime *Runtime
waitLock chan struct{}
Volumes map[string]string
}
type Config struct {
Hostname string
User string
Memory int64 // Memory limit (in bytes)
MemorySwap int64 // Total memory usage (memory + swap); set `-1' to disable swap
AttachStdin bool
AttachStdout bool
AttachStderr bool
PortSpecs []string
Tty bool // Attach standard streams to a tty, including stdin if it is not closed.
OpenStdin bool // Open stdin
StdinOnce bool // If true, close stdin after the 1 attached client disconnects.
Env []string
Cmd []string
Dns []string
Image string // Name of the image as it was passed by the operator (eg. could be symbolic)
Volumes map[string]struct{}
VolumesFrom string
}
func ParseRun(args []string, stdout io.Writer, capabilities *Capabilities) (*Config, error) {
cmd := rcli.Subcmd(stdout, "run", "[OPTIONS] IMAGE COMMAND [ARG...]", "Run a command in a new container")
if len(args) > 0 && args[0] != "--help" {
cmd.SetOutput(ioutil.Discard)
}
flHostname := cmd.String("h", "", "Container host name")
flUser := cmd.String("u", "", "Username or UID")
flDetach := cmd.Bool("d", false, "Detached mode: leave the container running in the background")
flAttach := NewAttachOpts()
cmd.Var(flAttach, "a", "Attach to stdin, stdout or stderr.")
flStdin := cmd.Bool("i", false, "Keep stdin open even if not attached")
flTty := cmd.Bool("t", false, "Allocate a pseudo-tty")
flMemory := cmd.Int64("m", 0, "Memory limit (in bytes)")
if *flMemory > 0 && !capabilities.MemoryLimit {
fmt.Fprintf(stdout, "WARNING: Your kernel does not support memory limit capabilities. Limitation discarded.\n")
*flMemory = 0
}
var flPorts ListOpts
cmd.Var(&flPorts, "p", "Expose a container's port to the host (use 'docker port' to see the actual mapping)")
var flEnv ListOpts
cmd.Var(&flEnv, "e", "Set environment variables")
var flDns ListOpts
cmd.Var(&flDns, "dns", "Set custom dns servers")
flVolumes := NewPathOpts()
cmd.Var(flVolumes, "v", "Attach a data volume")
flVolumesFrom := cmd.String("volumes-from", "", "Mount volumes from the specified container")
if err := cmd.Parse(args); err != nil {
return nil, err
}
if *flDetach && len(flAttach) > 0 {
return nil, fmt.Errorf("Conflicting options: -a and -d")
}
// If neither -d or -a are set, attach to everything by default
if len(flAttach) == 0 && !*flDetach {
if !*flDetach {
flAttach.Set("stdout")
flAttach.Set("stderr")
if *flStdin {
flAttach.Set("stdin")
}
}
}
parsedArgs := cmd.Args()
runCmd := []string{}
image := ""
if len(parsedArgs) >= 1 {
image = cmd.Arg(0)
}
if len(parsedArgs) > 1 {
runCmd = parsedArgs[1:]
}
config := &Config{
Hostname: *flHostname,
PortSpecs: flPorts,
User: *flUser,
Tty: *flTty,
OpenStdin: *flStdin,
Memory: *flMemory,
AttachStdin: flAttach.Get("stdin"),
AttachStdout: flAttach.Get("stdout"),
AttachStderr: flAttach.Get("stderr"),
Env: flEnv,
Cmd: runCmd,
Dns: flDns,
Image: image,
Volumes: flVolumes,
VolumesFrom: *flVolumesFrom,
}
if *flMemory > 0 && !capabilities.SwapLimit {
fmt.Fprintf(stdout, "WARNING: Your kernel does not support swap limit capabilities. Limitation discarded.\n")
config.MemorySwap = -1
}
// When allocating stdin in attached mode, close stdin at client disconnect
if config.OpenStdin && config.AttachStdin {
config.StdinOnce = true
}
return config, nil
}
type NetworkSettings struct {
IpAddress string
IpPrefixLen int
Gateway string
Bridge string
PortMapping map[string]string
}
// String returns a human-readable description of the port mapping defined in the settings
func (settings *NetworkSettings) PortMappingHuman() string {
var mapping []string
for private, public := range settings.PortMapping {
mapping = append(mapping, fmt.Sprintf("%s->%s", public, private))
}
sort.Strings(mapping)
return strings.Join(mapping, ", ")
}
// Inject the io.Reader at the given path. Note: do not close the reader
func (container *Container) Inject(file io.Reader, pth string) error {
// Make sure the directory exists
if err := os.MkdirAll(path.Join(container.rwPath(), path.Dir(pth)), 0755); err != nil {
return err
}
// FIXME: Handle permissions/already existing dest
dest, err := os.Create(path.Join(container.rwPath(), pth))
if err != nil {
return err
}
if _, err := io.Copy(dest, file); err != nil {
return err
}
return nil
}
func (container *Container) Cmd() *exec.Cmd {
return container.cmd
}
func (container *Container) When() time.Time {
return container.Created
}
func (container *Container) FromDisk() error {
data, err := ioutil.ReadFile(container.jsonPath())
if err != nil {
return err
}
// Load container settings
if err := json.Unmarshal(data, container); err != nil {
return err
}
return nil
}
func (container *Container) ToDisk() (err error) {
data, err := json.Marshal(container)
if err != nil {
return
}
return ioutil.WriteFile(container.jsonPath(), data, 0666)
}
func (container *Container) generateLXCConfig() error {
fo, err := os.Create(container.lxcConfigPath())
if err != nil {
return err
}
defer fo.Close()
if err := LxcTemplateCompiled.Execute(fo, container); err != nil {
return err
}
return nil
}
func (container *Container) startPty() error {
ptyMaster, ptySlave, err := pty.Open()
if err != nil {
return err
}
container.ptyMaster = ptyMaster
container.cmd.Stdout = ptySlave
container.cmd.Stderr = ptySlave
// Copy the PTYs to our broadcasters
go func() {
defer container.stdout.CloseWriters()
Debugf("[startPty] Begin of stdout pipe")
io.Copy(container.stdout, ptyMaster)
Debugf("[startPty] End of stdout pipe")
}()
// stdin
if container.Config.OpenStdin {
container.cmd.Stdin = ptySlave
container.cmd.SysProcAttr = &syscall.SysProcAttr{Setctty: true, Setsid: true}
go func() {
defer container.stdin.Close()
Debugf("[startPty] Begin of stdin pipe")
io.Copy(ptyMaster, container.stdin)
Debugf("[startPty] End of stdin pipe")
}()
}
if err := container.cmd.Start(); err != nil {
return err
}
ptySlave.Close()
return nil
}
func (container *Container) start() error {
container.cmd.Stdout = container.stdout
container.cmd.Stderr = container.stderr
if container.Config.OpenStdin {
stdin, err := container.cmd.StdinPipe()
if err != nil {
return err
}
go func() {
defer stdin.Close()
Debugf("Begin of stdin pipe [start]")
io.Copy(stdin, container.stdin)
Debugf("End of stdin pipe [start]")
}()
}
return container.cmd.Start()
}
func (container *Container) Attach(stdin io.ReadCloser, stdinCloser io.Closer, stdout io.Writer, stderr io.Writer) chan error {
var cStdout, cStderr io.ReadCloser
var nJobs int
errors := make(chan error, 3)
if stdin != nil && container.Config.OpenStdin {
nJobs += 1
if cStdin, err := container.StdinPipe(); err != nil {
errors <- err
} else {
go func() {
Debugf("[start] attach stdin\n")
defer Debugf("[end] attach stdin\n")
// No matter what, when stdin is closed (io.Copy unblock), close stdout and stderr
if cStdout != nil {
defer cStdout.Close()
}
if cStderr != nil {
defer cStderr.Close()
}
if container.Config.StdinOnce && !container.Config.Tty {
defer cStdin.Close()
}
if container.Config.Tty {
_, err = CopyEscapable(cStdin, stdin)
} else {
_, err = io.Copy(cStdin, stdin)
}
if err != nil {
Debugf("[error] attach stdin: %s\n", err)
}
// Discard error, expecting pipe error
errors <- nil
}()
}
}
if stdout != nil {
nJobs += 1
if p, err := container.StdoutPipe(); err != nil {
errors <- err
} else {
cStdout = p
go func() {
Debugf("[start] attach stdout\n")
defer Debugf("[end] attach stdout\n")
// If we are in StdinOnce mode, then close stdin
if container.Config.StdinOnce {
if stdin != nil {
defer stdin.Close()
}
if stdinCloser != nil {
defer stdinCloser.Close()
}
}
_, err := io.Copy(stdout, cStdout)
if err != nil {
Debugf("[error] attach stdout: %s\n", err)
}
errors <- err
}()
}
}
if stderr != nil {
nJobs += 1
if p, err := container.StderrPipe(); err != nil {
errors <- err
} else {
cStderr = p
go func() {
Debugf("[start] attach stderr\n")
defer Debugf("[end] attach stderr\n")
// If we are in StdinOnce mode, then close stdin
if container.Config.StdinOnce {
if stdin != nil {
defer stdin.Close()
}
if stdinCloser != nil {
defer stdinCloser.Close()
}
}
_, err := io.Copy(stderr, cStderr)
if err != nil {
Debugf("[error] attach stderr: %s\n", err)
}
errors <- err
}()
}
}
return Go(func() error {
if cStdout != nil {
defer cStdout.Close()
}
if cStderr != nil {
defer cStderr.Close()
}
// FIXME: how do clean up the stdin goroutine without the unwanted side effect
// of closing the passed stdin? Add an intermediary io.Pipe?
for i := 0; i < nJobs; i += 1 {
Debugf("Waiting for job %d/%d\n", i+1, nJobs)
if err := <-errors; err != nil {
Debugf("Job %d returned error %s. Aborting all jobs\n", i+1, err)
return err
}
Debugf("Job %d completed successfully\n", i+1)
}
Debugf("All jobs completed successfully\n")
return nil
})
}
func (container *Container) Start() error {
container.State.lock()
defer container.State.unlock()
if container.State.Running {
return fmt.Errorf("The container %s is already running.", container.Id)
}
if err := container.EnsureMounted(); err != nil {
return err
}
if err := container.allocateNetwork(); err != nil {
return err
}
// Make sure the config is compatible with the current kernel
if container.Config.Memory > 0 && !container.runtime.capabilities.MemoryLimit {
log.Printf("WARNING: Your kernel does not support memory limit capabilities. Limitation discarded.\n")
container.Config.Memory = 0
}
if container.Config.Memory > 0 && !container.runtime.capabilities.SwapLimit {
log.Printf("WARNING: Your kernel does not support swap limit capabilities. Limitation discarded.\n")
container.Config.MemorySwap = -1
}
container.Volumes = make(map[string]string)
// Create the requested volumes volumes
for volPath := range container.Config.Volumes {
if c, err := container.runtime.volumes.Create(nil, container, "", "", nil); err != nil {
return err
} else {
if err := os.MkdirAll(path.Join(container.RootfsPath(), volPath), 0755); err != nil {
return nil
}
container.Volumes[volPath] = c.Id
}
}
if container.Config.VolumesFrom != "" {
c := container.runtime.Get(container.Config.VolumesFrom)
if c == nil {
return fmt.Errorf("Container %s not found. Impossible to mount its volumes", container.Id)
}
for volPath, id := range c.Volumes {
if _, exists := container.Volumes[volPath]; exists {
return fmt.Errorf("The requested volume %s overlap one of the volume of the container %s", volPath, c.Id)
}
if err := os.MkdirAll(path.Join(container.RootfsPath(), volPath), 0755); err != nil {
return nil
}
container.Volumes[volPath] = id
}
}
if err := container.generateLXCConfig(); err != nil {
return err
}
params := []string{
"-n", container.Id,
"-f", container.lxcConfigPath(),
"--",
"/sbin/init",
}
// Networking
params = append(params, "-g", container.network.Gateway.String())
// User
if container.Config.User != "" {
params = append(params, "-u", container.Config.User)
}
if container.Config.Tty {
params = append(params, "-e", "TERM=xterm")
}
// Setup environment
params = append(params,
"-e", "HOME=/",
"-e", "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin",
)
for _, elem := range container.Config.Env {
params = append(params, "-e", elem)
}
// Program
params = append(params, "--", container.Path)
params = append(params, container.Args...)
container.cmd = exec.Command("lxc-start", params...)
// Setup logging of stdout and stderr to disk
if err := container.runtime.LogToDisk(container.stdout, container.logPath("stdout")); err != nil {
return err
}
if err := container.runtime.LogToDisk(container.stderr, container.logPath("stderr")); err != nil {
return err
}
var err error
if container.Config.Tty {
err = container.startPty()
} else {
err = container.start()
}
if err != nil {
return err
}
// FIXME: save state on disk *first*, then converge
// this way disk state is used as a journal, eg. we can restore after crash etc.
container.State.setRunning(container.cmd.Process.Pid)
// Init the lock
container.waitLock = make(chan struct{})
container.ToDisk()
go container.monitor()
return nil
}
func (container *Container) Run() error {
if err := container.Start(); err != nil {
return err
}
container.Wait()
return nil
}
func (container *Container) Output() (output []byte, err error) {
pipe, err := container.StdoutPipe()
if err != nil {
return nil, err
}
defer pipe.Close()
if err := container.Start(); err != nil {
return nil, err
}
output, err = ioutil.ReadAll(pipe)
container.Wait()
return output, err
}
// StdinPipe() returns a pipe connected to the standard input of the container's
// active process.
//
func (container *Container) StdinPipe() (io.WriteCloser, error) {
return container.stdinPipe, nil
}
func (container *Container) StdoutPipe() (io.ReadCloser, error) {
reader, writer := io.Pipe()
container.stdout.AddWriter(writer)
return newBufReader(reader), nil
}
func (container *Container) StderrPipe() (io.ReadCloser, error) {
reader, writer := io.Pipe()
container.stderr.AddWriter(writer)
return newBufReader(reader), nil
}
func (container *Container) allocateNetwork() error {
iface, err := container.runtime.networkManager.Allocate()
if err != nil {
return err
}
container.NetworkSettings.PortMapping = make(map[string]string)
for _, spec := range container.Config.PortSpecs {
if nat, err := iface.AllocatePort(spec); err != nil {
iface.Release()
return err
} else {
container.NetworkSettings.PortMapping[strconv.Itoa(nat.Backend)] = strconv.Itoa(nat.Frontend)
}
}
container.network = iface
container.NetworkSettings.Bridge = container.runtime.networkManager.bridgeIface
container.NetworkSettings.IpAddress = iface.IPNet.IP.String()
container.NetworkSettings.IpPrefixLen, _ = iface.IPNet.Mask.Size()
container.NetworkSettings.Gateway = iface.Gateway.String()
return nil
}
func (container *Container) releaseNetwork() {
container.network.Release()
container.network = nil
container.NetworkSettings = &NetworkSettings{}
}
// FIXME: replace this with a control socket within docker-init
func (container *Container) waitLxc() error {
for {
if output, err := exec.Command("lxc-info", "-n", container.Id).CombinedOutput(); err != nil {
return err
} else {
if !strings.Contains(string(output), "RUNNING") {
return nil
}
}
time.Sleep(500 * time.Millisecond)
}
return nil
}
func (container *Container) monitor() {
// Wait for the program to exit
Debugf("Waiting for process")
// If the command does not exists, try to wait via lxc
if container.cmd == nil {
if err := container.waitLxc(); err != nil {
Debugf("%s: Process: %s", container.Id, err)
}
} else {
if err := container.cmd.Wait(); err != nil {
// Discard the error as any signals or non 0 returns will generate an error
Debugf("%s: Process: %s", container.Id, err)
}
}
Debugf("Process finished")
var exitCode int = -1
if container.cmd != nil {
exitCode = container.cmd.ProcessState.Sys().(syscall.WaitStatus).ExitStatus()
}
// Cleanup
container.releaseNetwork()
if container.Config.OpenStdin {
if err := container.stdin.Close(); err != nil {
Debugf("%s: Error close stdin: %s", container.Id, err)
}
}
if err := container.stdout.CloseWriters(); err != nil {
Debugf("%s: Error close stdout: %s", container.Id, err)
}
if err := container.stderr.CloseWriters(); err != nil {
Debugf("%s: Error close stderr: %s", container.Id, err)
}
if container.ptyMaster != nil {
if err := container.ptyMaster.Close(); err != nil {
Debugf("%s: Error closing Pty master: %s", container.Id, err)
}
}
if err := container.Unmount(); err != nil {
log.Printf("%v: Failed to umount filesystem: %v", container.Id, err)
}
// Re-create a brand new stdin pipe once the container exited
if container.Config.OpenStdin {
container.stdin, container.stdinPipe = io.Pipe()
}
// Report status back
container.State.setStopped(exitCode)
// Release the lock
close(container.waitLock)
if err := container.ToDisk(); err != nil {
// FIXME: there is a race condition here which causes this to fail during the unit tests.
// If another goroutine was waiting for Wait() to return before removing the container's root
// from the filesystem... At this point it may already have done so.
// This is because State.setStopped() has already been called, and has caused Wait()
// to return.
// FIXME: why are we serializing running state to disk in the first place?
//log.Printf("%s: Failed to dump configuration to the disk: %s", container.Id, err)
}
}
func (container *Container) kill() error {
if !container.State.Running {
return nil
}
// Sending SIGKILL to the process via lxc
output, err := exec.Command("lxc-kill", "-n", container.Id, "9").CombinedOutput()
if err != nil {
log.Printf("error killing container %s (%s, %s)", container.Id, output, err)
}
// 2. Wait for the process to die, in last resort, try to kill the process directly
if err := container.WaitTimeout(10 * time.Second); err != nil {
if container.cmd == nil {
return fmt.Errorf("lxc-kill failed, impossible to kill the container %s", container.Id)
}
log.Printf("Container %s failed to exit within 10 seconds of lxc SIGKILL - trying direct SIGKILL", container.Id)
if err := container.cmd.Process.Kill(); err != nil {
return err
}
}
// Wait for the container to be actually stopped
container.Wait()
return nil
}
func (container *Container) Kill() error {
container.State.lock()
defer container.State.unlock()
if !container.State.Running {
return nil
}
return container.kill()
}
func (container *Container) Stop(seconds int) error {
container.State.lock()
defer container.State.unlock()
if !container.State.Running {
return nil
}
// 1. Send a SIGTERM
if output, err := exec.Command("lxc-kill", "-n", container.Id, "15").CombinedOutput(); err != nil {
log.Print(string(output))
log.Print("Failed to send SIGTERM to the process, force killing")
if err := container.kill(); err != nil {
return err
}
}
// 2. Wait for the process to exit on its own
if err := container.WaitTimeout(time.Duration(seconds) * time.Second); err != nil {
log.Printf("Container %v failed to exit within %d seconds of SIGTERM - using the force", container.Id, seconds)
if err := container.kill(); err != nil {
return err
}
}
return nil
}
func (container *Container) Restart(seconds int) error {
if err := container.Stop(seconds); err != nil {
return err
}
if err := container.Start(); err != nil {
return err
}
return nil
}
// Wait blocks until the container stops running, then returns its exit code.
func (container *Container) Wait() int {
<-container.waitLock
return container.State.ExitCode
}
func (container *Container) ExportRw() (Archive, error) {
return Tar(container.rwPath(), Uncompressed)
}
func (container *Container) RwChecksum() (string, error) {
rwData, err := Tar(container.rwPath(), Xz)
if err != nil {
return "", err
}
return HashData(rwData)
}
func (container *Container) Export() (Archive, error) {
if err := container.EnsureMounted(); err != nil {
return nil, err
}
return Tar(container.RootfsPath(), Uncompressed)
}
func (container *Container) WaitTimeout(timeout time.Duration) error {
done := make(chan bool)
go func() {
container.Wait()
done <- true
}()
select {
case <-time.After(timeout):
return fmt.Errorf("Timed Out")
case <-done:
return nil
}
panic("unreachable")
}
func (container *Container) EnsureMounted() error {
if mounted, err := container.Mounted(); err != nil {
return err
} else if mounted {
return nil
}
return container.Mount()
}
func (container *Container) Mount() error {
image, err := container.GetImage()
if err != nil {
return err
}
return image.Mount(container.RootfsPath(), container.rwPath())
}
func (container *Container) Changes() ([]Change, error) {
image, err := container.GetImage()
if err != nil {
return nil, err
}
return image.Changes(container.rwPath())
}
func (container *Container) GetImage() (*Image, error) {
if container.runtime == nil {
return nil, fmt.Errorf("Can't get image of unregistered container")
}
return container.runtime.graph.Get(container.Image)
}
func (container *Container) Mounted() (bool, error) {
return Mounted(container.RootfsPath())
}
func (container *Container) Unmount() error {
return Unmount(container.RootfsPath())
}
// ShortId returns a shorthand version of the container's id for convenience.
// A collision with other container shorthands is very unlikely, but possible.
// In case of a collision a lookup with Runtime.Get() will fail, and the caller
// will need to use a langer prefix, or the full-length container Id.
func (container *Container) ShortId() string {
return TruncateId(container.Id)
}
func (container *Container) logPath(name string) string {
return path.Join(container.root, fmt.Sprintf("%s-%s.log", container.Id, name))
}
func (container *Container) ReadLog(name string) (io.Reader, error) {
return os.Open(container.logPath(name))
}
func (container *Container) jsonPath() string {
return path.Join(container.root, "config.json")
}
func (container *Container) lxcConfigPath() string {
return path.Join(container.root, "config.lxc")
}
// This method must be exported to be used from the lxc template
func (container *Container) RootfsPath() string {
return path.Join(container.root, "rootfs")
}
func (container *Container) GetVolumes() (map[string]string, error) {
ret := make(map[string]string)
for volPath, id := range container.Volumes {
volume, err := container.runtime.volumes.Get(id)
if err != nil {
return nil, err
}
root, err := volume.root()
if err != nil {
return nil, err
}
ret[volPath] = path.Join(root, "layer")
}
return ret, nil
}
func (container *Container) rwPath() string {
return path.Join(container.root, "rw")
}
func validateId(id string) error {
if id == "" {
return fmt.Errorf("Invalid empty id")
}
return nil
}
| container.go | 1 | https://github.com/moby/moby/commit/efd9becb78c82ddef07efb7e76e0100d7a712281 | [
0.9851524829864502,
0.1871778964996338,
0.0001628866739338264,
0.001335539505816996,
0.35262978076934814
] |
{
"id": 4,
"code_window": [
"\tcontainer, err := NewBuilder(runtime).Create(\n",
"\t\t&Config{\n",
"\t\t\tImage: GetTestImage(runtime).Id,\n",
"\t\t\tMemory: 33554432,\n",
"\t\t\tCmd: []string{\"/bin/cat\"},\n",
"\t\t\tOpenStdin: true,\n",
"\t\t},\n"
],
"labels": [
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\tCpuShares: 1000,\n"
],
"file_path": "container_test.go",
"type": "add",
"edit_start_line_idx": 392
} | /*!
* Bootstrap Responsive v2.3.0
*
* Copyright 2012 Twitter, Inc
* Licensed under the Apache License v2.0
* http://www.apache.org/licenses/LICENSE-2.0
*
* Designed and built with all the love in the world @twitter by @mdo and @fat.
*/
.clearfix {
*zoom: 1;
}
.clearfix:before,
.clearfix:after {
display: table;
line-height: 0;
content: "";
}
.clearfix:after {
clear: both;
}
.hide-text {
font: 0/0 a;
color: transparent;
text-shadow: none;
background-color: transparent;
border: 0;
}
.input-block-level {
display: block;
width: 100%;
min-height: 30px;
-webkit-box-sizing: border-box;
-moz-box-sizing: border-box;
box-sizing: border-box;
}
@-ms-viewport {
width: device-width;
}
.hidden {
display: none;
visibility: hidden;
}
.visible-phone {
display: none !important;
}
.visible-tablet {
display: none !important;
}
.hidden-desktop {
display: none !important;
}
.visible-desktop {
display: inherit !important;
}
@media (min-width: 768px) and (max-width: 979px) {
.hidden-desktop {
display: inherit !important;
}
.visible-desktop {
display: none !important ;
}
.visible-tablet {
display: inherit !important;
}
.hidden-tablet {
display: none !important;
}
}
@media (max-width: 767px) {
.hidden-desktop {
display: inherit !important;
}
.visible-desktop {
display: none !important;
}
.visible-phone {
display: inherit !important;
}
.hidden-phone {
display: none !important;
}
}
.visible-print {
display: none !important;
}
@media print {
.visible-print {
display: inherit !important;
}
.hidden-print {
display: none !important;
}
}
@media (min-width: 1200px) {
.row {
margin-left: -30px;
*zoom: 1;
}
.row:before,
.row:after {
display: table;
line-height: 0;
content: "";
}
.row:after {
clear: both;
}
[class*="span"] {
float: left;
min-height: 1px;
margin-left: 30px;
}
.container,
.navbar-static-top .container,
.navbar-fixed-top .container,
.navbar-fixed-bottom .container {
width: 1170px;
}
.span12 {
width: 1170px;
}
.span11 {
width: 1070px;
}
.span10 {
width: 970px;
}
.span9 {
width: 870px;
}
.span8 {
width: 770px;
}
.span7 {
width: 670px;
}
.span6 {
width: 570px;
}
.span5 {
width: 470px;
}
.span4 {
width: 370px;
}
.span3 {
width: 270px;
}
.span2 {
width: 170px;
}
.span1 {
width: 70px;
}
.offset12 {
margin-left: 1230px;
}
.offset11 {
margin-left: 1130px;
}
.offset10 {
margin-left: 1030px;
}
.offset9 {
margin-left: 930px;
}
.offset8 {
margin-left: 830px;
}
.offset7 {
margin-left: 730px;
}
.offset6 {
margin-left: 630px;
}
.offset5 {
margin-left: 530px;
}
.offset4 {
margin-left: 430px;
}
.offset3 {
margin-left: 330px;
}
.offset2 {
margin-left: 230px;
}
.offset1 {
margin-left: 130px;
}
.row-fluid {
width: 100%;
*zoom: 1;
}
.row-fluid:before,
.row-fluid:after {
display: table;
line-height: 0;
content: "";
}
.row-fluid:after {
clear: both;
}
.row-fluid [class*="span"] {
display: block;
float: left;
width: 100%;
min-height: 30px;
margin-left: 2.564102564102564%;
*margin-left: 2.5109110747408616%;
-webkit-box-sizing: border-box;
-moz-box-sizing: border-box;
box-sizing: border-box;
}
.row-fluid [class*="span"]:first-child {
margin-left: 0;
}
.row-fluid .controls-row [class*="span"] + [class*="span"] {
margin-left: 2.564102564102564%;
}
.row-fluid .span12 {
width: 100%;
*width: 99.94680851063829%;
}
.row-fluid .span11 {
width: 91.45299145299145%;
*width: 91.39979996362975%;
}
.row-fluid .span10 {
width: 82.90598290598291%;
*width: 82.8527914166212%;
}
.row-fluid .span9 {
width: 74.35897435897436%;
*width: 74.30578286961266%;
}
.row-fluid .span8 {
width: 65.81196581196582%;
*width: 65.75877432260411%;
}
.row-fluid .span7 {
width: 57.26495726495726%;
*width: 57.21176577559556%;
}
.row-fluid .span6 {
width: 48.717948717948715%;
*width: 48.664757228587014%;
}
.row-fluid .span5 {
width: 40.17094017094017%;
*width: 40.11774868157847%;
}
.row-fluid .span4 {
width: 31.623931623931625%;
*width: 31.570740134569924%;
}
.row-fluid .span3 {
width: 23.076923076923077%;
*width: 23.023731587561375%;
}
.row-fluid .span2 {
width: 14.52991452991453%;
*width: 14.476723040552828%;
}
.row-fluid .span1 {
width: 5.982905982905983%;
*width: 5.929714493544281%;
}
.row-fluid .offset12 {
margin-left: 105.12820512820512%;
*margin-left: 105.02182214948171%;
}
.row-fluid .offset12:first-child {
margin-left: 102.56410256410257%;
*margin-left: 102.45771958537915%;
}
.row-fluid .offset11 {
margin-left: 96.58119658119658%;
*margin-left: 96.47481360247316%;
}
.row-fluid .offset11:first-child {
margin-left: 94.01709401709402%;
*margin-left: 93.91071103837061%;
}
.row-fluid .offset10 {
margin-left: 88.03418803418803%;
*margin-left: 87.92780505546462%;
}
.row-fluid .offset10:first-child {
margin-left: 85.47008547008548%;
*margin-left: 85.36370249136206%;
}
.row-fluid .offset9 {
margin-left: 79.48717948717949%;
*margin-left: 79.38079650845607%;
}
.row-fluid .offset9:first-child {
margin-left: 76.92307692307693%;
*margin-left: 76.81669394435352%;
}
.row-fluid .offset8 {
margin-left: 70.94017094017094%;
*margin-left: 70.83378796144753%;
}
.row-fluid .offset8:first-child {
margin-left: 68.37606837606839%;
*margin-left: 68.26968539734497%;
}
.row-fluid .offset7 {
margin-left: 62.393162393162385%;
*margin-left: 62.28677941443899%;
}
.row-fluid .offset7:first-child {
margin-left: 59.82905982905982%;
*margin-left: 59.72267685033642%;
}
.row-fluid .offset6 {
margin-left: 53.84615384615384%;
*margin-left: 53.739770867430444%;
}
.row-fluid .offset6:first-child {
margin-left: 51.28205128205128%;
*margin-left: 51.175668303327875%;
}
.row-fluid .offset5 {
margin-left: 45.299145299145295%;
*margin-left: 45.1927623204219%;
}
.row-fluid .offset5:first-child {
margin-left: 42.73504273504273%;
*margin-left: 42.62865975631933%;
}
.row-fluid .offset4 {
margin-left: 36.75213675213675%;
*margin-left: 36.645753773413354%;
}
.row-fluid .offset4:first-child {
margin-left: 34.18803418803419%;
*margin-left: 34.081651209310785%;
}
.row-fluid .offset3 {
margin-left: 28.205128205128204%;
*margin-left: 28.0987452264048%;
}
.row-fluid .offset3:first-child {
margin-left: 25.641025641025642%;
*margin-left: 25.53464266230224%;
}
.row-fluid .offset2 {
margin-left: 19.65811965811966%;
*margin-left: 19.551736679396257%;
}
.row-fluid .offset2:first-child {
margin-left: 17.094017094017094%;
*margin-left: 16.98763411529369%;
}
.row-fluid .offset1 {
margin-left: 11.11111111111111%;
*margin-left: 11.004728132387708%;
}
.row-fluid .offset1:first-child {
margin-left: 8.547008547008547%;
*margin-left: 8.440625568285142%;
}
input,
textarea,
.uneditable-input {
margin-left: 0;
}
.controls-row [class*="span"] + [class*="span"] {
margin-left: 30px;
}
input.span12,
textarea.span12,
.uneditable-input.span12 {
width: 1156px;
}
input.span11,
textarea.span11,
.uneditable-input.span11 {
width: 1056px;
}
input.span10,
textarea.span10,
.uneditable-input.span10 {
width: 956px;
}
input.span9,
textarea.span9,
.uneditable-input.span9 {
width: 856px;
}
input.span8,
textarea.span8,
.uneditable-input.span8 {
width: 756px;
}
input.span7,
textarea.span7,
.uneditable-input.span7 {
width: 656px;
}
input.span6,
textarea.span6,
.uneditable-input.span6 {
width: 556px;
}
input.span5,
textarea.span5,
.uneditable-input.span5 {
width: 456px;
}
input.span4,
textarea.span4,
.uneditable-input.span4 {
width: 356px;
}
input.span3,
textarea.span3,
.uneditable-input.span3 {
width: 256px;
}
input.span2,
textarea.span2,
.uneditable-input.span2 {
width: 156px;
}
input.span1,
textarea.span1,
.uneditable-input.span1 {
width: 56px;
}
.thumbnails {
margin-left: -30px;
}
.thumbnails > li {
margin-left: 30px;
}
.row-fluid .thumbnails {
margin-left: 0;
}
}
@media (min-width: 768px) and (max-width: 979px) {
.row {
margin-left: -20px;
*zoom: 1;
}
.row:before,
.row:after {
display: table;
line-height: 0;
content: "";
}
.row:after {
clear: both;
}
[class*="span"] {
float: left;
min-height: 1px;
margin-left: 20px;
}
.container,
.navbar-static-top .container,
.navbar-fixed-top .container,
.navbar-fixed-bottom .container {
width: 724px;
}
.span12 {
width: 724px;
}
.span11 {
width: 662px;
}
.span10 {
width: 600px;
}
.span9 {
width: 538px;
}
.span8 {
width: 476px;
}
.span7 {
width: 414px;
}
.span6 {
width: 352px;
}
.span5 {
width: 290px;
}
.span4 {
width: 228px;
}
.span3 {
width: 166px;
}
.span2 {
width: 104px;
}
.span1 {
width: 42px;
}
.offset12 {
margin-left: 764px;
}
.offset11 {
margin-left: 702px;
}
.offset10 {
margin-left: 640px;
}
.offset9 {
margin-left: 578px;
}
.offset8 {
margin-left: 516px;
}
.offset7 {
margin-left: 454px;
}
.offset6 {
margin-left: 392px;
}
.offset5 {
margin-left: 330px;
}
.offset4 {
margin-left: 268px;
}
.offset3 {
margin-left: 206px;
}
.offset2 {
margin-left: 144px;
}
.offset1 {
margin-left: 82px;
}
.row-fluid {
width: 100%;
*zoom: 1;
}
.row-fluid:before,
.row-fluid:after {
display: table;
line-height: 0;
content: "";
}
.row-fluid:after {
clear: both;
}
.row-fluid [class*="span"] {
display: block;
float: left;
width: 100%;
min-height: 30px;
margin-left: 2.7624309392265194%;
*margin-left: 2.709239449864817%;
-webkit-box-sizing: border-box;
-moz-box-sizing: border-box;
box-sizing: border-box;
}
.row-fluid [class*="span"]:first-child {
margin-left: 0;
}
.row-fluid .controls-row [class*="span"] + [class*="span"] {
margin-left: 2.7624309392265194%;
}
.row-fluid .span12 {
width: 100%;
*width: 99.94680851063829%;
}
.row-fluid .span11 {
width: 91.43646408839778%;
*width: 91.38327259903608%;
}
.row-fluid .span10 {
width: 82.87292817679558%;
*width: 82.81973668743387%;
}
.row-fluid .span9 {
width: 74.30939226519337%;
*width: 74.25620077583166%;
}
.row-fluid .span8 {
width: 65.74585635359117%;
*width: 65.69266486422946%;
}
.row-fluid .span7 {
width: 57.18232044198895%;
*width: 57.12912895262725%;
}
.row-fluid .span6 {
width: 48.61878453038674%;
*width: 48.56559304102504%;
}
.row-fluid .span5 {
width: 40.05524861878453%;
*width: 40.00205712942283%;
}
.row-fluid .span4 {
width: 31.491712707182323%;
*width: 31.43852121782062%;
}
.row-fluid .span3 {
width: 22.92817679558011%;
*width: 22.87498530621841%;
}
.row-fluid .span2 {
width: 14.3646408839779%;
*width: 14.311449394616199%;
}
.row-fluid .span1 {
width: 5.801104972375691%;
*width: 5.747913483013988%;
}
.row-fluid .offset12 {
margin-left: 105.52486187845304%;
*margin-left: 105.41847889972962%;
}
.row-fluid .offset12:first-child {
margin-left: 102.76243093922652%;
*margin-left: 102.6560479605031%;
}
.row-fluid .offset11 {
margin-left: 96.96132596685082%;
*margin-left: 96.8549429881274%;
}
.row-fluid .offset11:first-child {
margin-left: 94.1988950276243%;
*margin-left: 94.09251204890089%;
}
.row-fluid .offset10 {
margin-left: 88.39779005524862%;
*margin-left: 88.2914070765252%;
}
.row-fluid .offset10:first-child {
margin-left: 85.6353591160221%;
*margin-left: 85.52897613729868%;
}
.row-fluid .offset9 {
margin-left: 79.8342541436464%;
*margin-left: 79.72787116492299%;
}
.row-fluid .offset9:first-child {
margin-left: 77.07182320441989%;
*margin-left: 76.96544022569647%;
}
.row-fluid .offset8 {
margin-left: 71.2707182320442%;
*margin-left: 71.16433525332079%;
}
.row-fluid .offset8:first-child {
margin-left: 68.50828729281768%;
*margin-left: 68.40190431409427%;
}
.row-fluid .offset7 {
margin-left: 62.70718232044199%;
*margin-left: 62.600799341718584%;
}
.row-fluid .offset7:first-child {
margin-left: 59.94475138121547%;
*margin-left: 59.838368402492065%;
}
.row-fluid .offset6 {
margin-left: 54.14364640883978%;
*margin-left: 54.037263430116376%;
}
.row-fluid .offset6:first-child {
margin-left: 51.38121546961326%;
*margin-left: 51.27483249088986%;
}
.row-fluid .offset5 {
margin-left: 45.58011049723757%;
*margin-left: 45.47372751851417%;
}
.row-fluid .offset5:first-child {
margin-left: 42.81767955801105%;
*margin-left: 42.71129657928765%;
}
.row-fluid .offset4 {
margin-left: 37.01657458563536%;
*margin-left: 36.91019160691196%;
}
.row-fluid .offset4:first-child {
margin-left: 34.25414364640884%;
*margin-left: 34.14776066768544%;
}
.row-fluid .offset3 {
margin-left: 28.45303867403315%;
*margin-left: 28.346655695309746%;
}
.row-fluid .offset3:first-child {
margin-left: 25.69060773480663%;
*margin-left: 25.584224756083227%;
}
.row-fluid .offset2 {
margin-left: 19.88950276243094%;
*margin-left: 19.783119783707537%;
}
.row-fluid .offset2:first-child {
margin-left: 17.12707182320442%;
*margin-left: 17.02068884448102%;
}
.row-fluid .offset1 {
margin-left: 11.32596685082873%;
*margin-left: 11.219583872105325%;
}
.row-fluid .offset1:first-child {
margin-left: 8.56353591160221%;
*margin-left: 8.457152932878806%;
}
input,
textarea,
.uneditable-input {
margin-left: 0;
}
.controls-row [class*="span"] + [class*="span"] {
margin-left: 20px;
}
input.span12,
textarea.span12,
.uneditable-input.span12 {
width: 710px;
}
input.span11,
textarea.span11,
.uneditable-input.span11 {
width: 648px;
}
input.span10,
textarea.span10,
.uneditable-input.span10 {
width: 586px;
}
input.span9,
textarea.span9,
.uneditable-input.span9 {
width: 524px;
}
input.span8,
textarea.span8,
.uneditable-input.span8 {
width: 462px;
}
input.span7,
textarea.span7,
.uneditable-input.span7 {
width: 400px;
}
input.span6,
textarea.span6,
.uneditable-input.span6 {
width: 338px;
}
input.span5,
textarea.span5,
.uneditable-input.span5 {
width: 276px;
}
input.span4,
textarea.span4,
.uneditable-input.span4 {
width: 214px;
}
input.span3,
textarea.span3,
.uneditable-input.span3 {
width: 152px;
}
input.span2,
textarea.span2,
.uneditable-input.span2 {
width: 90px;
}
input.span1,
textarea.span1,
.uneditable-input.span1 {
width: 28px;
}
}
@media (max-width: 767px) {
body {
padding-right: 20px;
padding-left: 20px;
}
.navbar-fixed-top,
.navbar-fixed-bottom,
.navbar-static-top {
margin-right: -20px;
margin-left: -20px;
}
.container-fluid {
padding: 0;
}
.dl-horizontal dt {
float: none;
width: auto;
clear: none;
text-align: left;
}
.dl-horizontal dd {
margin-left: 0;
}
.container {
width: auto;
}
.row-fluid {
width: 100%;
}
.row,
.thumbnails {
margin-left: 0;
}
.thumbnails > li {
float: none;
margin-left: 0;
}
[class*="span"],
.uneditable-input[class*="span"],
.row-fluid [class*="span"] {
display: block;
float: none;
width: 100%;
margin-left: 0;
-webkit-box-sizing: border-box;
-moz-box-sizing: border-box;
box-sizing: border-box;
}
.span12,
.row-fluid .span12 {
width: 100%;
-webkit-box-sizing: border-box;
-moz-box-sizing: border-box;
box-sizing: border-box;
}
.row-fluid [class*="offset"]:first-child {
margin-left: 0;
}
.input-large,
.input-xlarge,
.input-xxlarge,
input[class*="span"],
select[class*="span"],
textarea[class*="span"],
.uneditable-input {
display: block;
width: 100%;
min-height: 30px;
-webkit-box-sizing: border-box;
-moz-box-sizing: border-box;
box-sizing: border-box;
}
.input-prepend input,
.input-append input,
.input-prepend input[class*="span"],
.input-append input[class*="span"] {
display: inline-block;
width: auto;
}
.controls-row [class*="span"] + [class*="span"] {
margin-left: 0;
}
.modal {
position: fixed;
top: 20px;
right: 20px;
left: 20px;
width: auto;
margin: 0;
}
.modal.fade {
top: -100px;
}
.modal.fade.in {
top: 20px;
}
}
@media (max-width: 480px) {
.nav-collapse {
-webkit-transform: translate3d(0, 0, 0);
}
.page-header h1 small {
display: block;
line-height: 20px;
}
input[type="checkbox"],
input[type="radio"] {
border: 1px solid #ccc;
}
.form-horizontal .control-label {
float: none;
width: auto;
padding-top: 0;
text-align: left;
}
.form-horizontal .controls {
margin-left: 0;
}
.form-horizontal .control-list {
padding-top: 0;
}
.form-horizontal .form-actions {
padding-right: 10px;
padding-left: 10px;
}
.media .pull-left,
.media .pull-right {
display: block;
float: none;
margin-bottom: 10px;
}
.media-object {
margin-right: 0;
margin-left: 0;
}
.modal {
top: 10px;
right: 10px;
left: 10px;
}
.modal-header .close {
padding: 10px;
margin: -10px;
}
.carousel-caption {
position: static;
}
}
@media (max-width: 979px) {
body {
padding-top: 0;
}
.navbar-fixed-top,
.navbar-fixed-bottom {
position: static;
}
.navbar-fixed-top {
margin-bottom: 20px;
}
.navbar-fixed-bottom {
margin-top: 20px;
}
.navbar-fixed-top .navbar-inner,
.navbar-fixed-bottom .navbar-inner {
padding: 5px;
}
.navbar .container {
width: auto;
padding: 0;
}
.navbar .brand {
padding-right: 10px;
padding-left: 10px;
margin: 0 0 0 -5px;
}
.nav-collapse {
clear: both;
}
.nav-collapse .nav {
float: none;
margin: 0 0 10px;
}
.nav-collapse .nav > li {
float: none;
}
.nav-collapse .nav > li > a {
margin-bottom: 2px;
}
.nav-collapse .nav > .divider-vertical {
display: none;
}
.nav-collapse .nav .nav-header {
color: #777777;
text-shadow: none;
}
.nav-collapse .nav > li > a,
.nav-collapse .dropdown-menu a {
padding: 9px 15px;
font-weight: bold;
color: #777777;
-webkit-border-radius: 3px;
-moz-border-radius: 3px;
border-radius: 3px;
}
.nav-collapse .btn {
padding: 4px 10px 4px;
font-weight: normal;
-webkit-border-radius: 4px;
-moz-border-radius: 4px;
border-radius: 4px;
}
.nav-collapse .dropdown-menu li + li a {
margin-bottom: 2px;
}
.nav-collapse .nav > li > a:hover,
.nav-collapse .nav > li > a:focus,
.nav-collapse .dropdown-menu a:hover,
.nav-collapse .dropdown-menu a:focus {
background-color: #f2f2f2;
}
.navbar-inverse .nav-collapse .nav > li > a,
.navbar-inverse .nav-collapse .dropdown-menu a {
color: #999999;
}
.navbar-inverse .nav-collapse .nav > li > a:hover,
.navbar-inverse .nav-collapse .nav > li > a:focus,
.navbar-inverse .nav-collapse .dropdown-menu a:hover,
.navbar-inverse .nav-collapse .dropdown-menu a:focus {
background-color: #111111;
}
.nav-collapse.in .btn-group {
padding: 0;
margin-top: 5px;
}
.nav-collapse .dropdown-menu {
position: static;
top: auto;
left: auto;
display: none;
float: none;
max-width: none;
padding: 0;
margin: 0 15px;
background-color: transparent;
border: none;
-webkit-border-radius: 0;
-moz-border-radius: 0;
border-radius: 0;
-webkit-box-shadow: none;
-moz-box-shadow: none;
box-shadow: none;
}
.nav-collapse .open > .dropdown-menu {
display: block;
}
.nav-collapse .dropdown-menu:before,
.nav-collapse .dropdown-menu:after {
display: none;
}
.nav-collapse .dropdown-menu .divider {
display: none;
}
.nav-collapse .nav > li > .dropdown-menu:before,
.nav-collapse .nav > li > .dropdown-menu:after {
display: none;
}
.nav-collapse .navbar-form,
.nav-collapse .navbar-search {
float: none;
padding: 10px 15px;
margin: 10px 0;
border-top: 1px solid #f2f2f2;
border-bottom: 1px solid #f2f2f2;
-webkit-box-shadow: inset 0 1px 0 rgba(255, 255, 255, 0.1), 0 1px 0 rgba(255, 255, 255, 0.1);
-moz-box-shadow: inset 0 1px 0 rgba(255, 255, 255, 0.1), 0 1px 0 rgba(255, 255, 255, 0.1);
box-shadow: inset 0 1px 0 rgba(255, 255, 255, 0.1), 0 1px 0 rgba(255, 255, 255, 0.1);
}
.navbar-inverse .nav-collapse .navbar-form,
.navbar-inverse .nav-collapse .navbar-search {
border-top-color: #111111;
border-bottom-color: #111111;
}
.navbar .nav-collapse .nav.pull-right {
float: none;
margin-left: 0;
}
.nav-collapse,
.nav-collapse.collapse {
height: 0;
overflow: hidden;
}
.navbar .btn-navbar {
display: block;
}
.navbar-static .navbar-inner {
padding-right: 10px;
padding-left: 10px;
}
}
@media (min-width: 980px) {
.nav-collapse.collapse {
height: auto !important;
overflow: visible !important;
}
}
| docs/theme/docker/static/css/bootstrap-responsive.min.css | 0 | https://github.com/moby/moby/commit/efd9becb78c82ddef07efb7e76e0100d7a712281 | [
0.0002314647426828742,
0.00017511924670543522,
0.0001674480881774798,
0.0001748920331010595,
0.000005793595846625976
] |
{
"id": 4,
"code_window": [
"\tcontainer, err := NewBuilder(runtime).Create(\n",
"\t\t&Config{\n",
"\t\t\tImage: GetTestImage(runtime).Id,\n",
"\t\t\tMemory: 33554432,\n",
"\t\t\tCmd: []string{\"/bin/cat\"},\n",
"\t\t\tOpenStdin: true,\n",
"\t\t},\n"
],
"labels": [
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\tCpuShares: 1000,\n"
],
"file_path": "container_test.go",
"type": "add",
"edit_start_line_idx": 392
} | #!/usr/bin/make -f
# -*- makefile -*-
# Sample debian/rules that uses debhelper.
# This file was originally written by Joey Hess and Craig Small.
# As a special exception, when this file is copied by dh-make into a
# dh-make output file, you may use that output file without restriction.
# This special exception was added by Craig Small in version 0.37 of dh-make.
# Uncomment this to turn on verbose mode.
#export DH_VERBOSE=1
%:
dh $@
| packaging/ubuntu/rules | 0 | https://github.com/moby/moby/commit/efd9becb78c82ddef07efb7e76e0100d7a712281 | [
0.00017707355436868966,
0.00017326023953501135,
0.00016944692470133305,
0.00017326023953501135,
0.000003813314833678305
] |
{
"id": 4,
"code_window": [
"\tcontainer, err := NewBuilder(runtime).Create(\n",
"\t\t&Config{\n",
"\t\t\tImage: GetTestImage(runtime).Id,\n",
"\t\t\tMemory: 33554432,\n",
"\t\t\tCmd: []string{\"/bin/cat\"},\n",
"\t\t\tOpenStdin: true,\n",
"\t\t},\n"
],
"labels": [
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\tCpuShares: 1000,\n"
],
"file_path": "container_test.go",
"type": "add",
"edit_start_line_idx": 392
} | #!/usr/bin/env python
'''Trigger buildbot docker test build
post-commit git hook designed to automatically trigger buildbot on
the provided vagrant docker VM.'''
import requests
USERNAME = 'buildbot'
PASSWORD = 'docker'
BASE_URL = 'http://localhost:8010'
path = lambda s: BASE_URL + '/' + s
try:
session = requests.session()
session.post(path('login'),data={'username':USERNAME,'passwd':PASSWORD})
session.post(path('builders/docker/force'),
data={'forcescheduler':'trigger','reason':'Test commit'})
except:
pass
| hack/environment/post-commit | 0 | https://github.com/moby/moby/commit/efd9becb78c82ddef07efb7e76e0100d7a712281 | [
0.00017386811668984592,
0.00017075682990252972,
0.00016834620328154415,
0.0001700561842881143,
0.0000023081111066858284
] |
{
"id": 5,
"code_window": [
"\t// Memory is allocated randomly for testing\n",
"\trand.Seed(time.Now().UTC().UnixNano())\n",
"\tmemMin := 33554432\n",
"\tmemMax := 536870912\n",
"\tmem := memMin + rand.Intn(memMax-memMin)\n",
"\tcontainer, err := NewBuilder(runtime).Create(&Config{\n",
"\t\tImage: GetTestImage(runtime).Id,\n",
"\t\tCmd: []string{\"/bin/true\"},\n",
"\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t// CPU shares as well\n",
"\tcpuMin := 100\n",
"\tcpuMax := 10000\n",
"\tcpu := cpuMin + rand.Intn(cpuMax-cpuMin)\n"
],
"file_path": "container_test.go",
"type": "add",
"edit_start_line_idx": 1061
} | package docker
import (
"bufio"
"fmt"
"io"
"io/ioutil"
"math/rand"
"os"
"regexp"
"sort"
"strings"
"testing"
"time"
)
func TestIdFormat(t *testing.T) {
runtime, err := newTestRuntime()
if err != nil {
t.Fatal(err)
}
defer nuke(runtime)
container1, err := NewBuilder(runtime).Create(
&Config{
Image: GetTestImage(runtime).Id,
Cmd: []string{"/bin/sh", "-c", "echo hello world"},
},
)
if err != nil {
t.Fatal(err)
}
match, err := regexp.Match("^[0-9a-f]{64}$", []byte(container1.Id))
if err != nil {
t.Fatal(err)
}
if !match {
t.Fatalf("Invalid container ID: %s", container1.Id)
}
}
func TestMultipleAttachRestart(t *testing.T) {
runtime, err := newTestRuntime()
if err != nil {
t.Fatal(err)
}
defer nuke(runtime)
container, err := NewBuilder(runtime).Create(
&Config{
Image: GetTestImage(runtime).Id,
Cmd: []string{"/bin/sh", "-c",
"i=1; while [ $i -le 5 ]; do i=`expr $i + 1`; echo hello; done"},
},
)
if err != nil {
t.Fatal(err)
}
defer runtime.Destroy(container)
// Simulate 3 client attaching to the container and stop/restart
stdout1, err := container.StdoutPipe()
if err != nil {
t.Fatal(err)
}
stdout2, err := container.StdoutPipe()
if err != nil {
t.Fatal(err)
}
stdout3, err := container.StdoutPipe()
if err != nil {
t.Fatal(err)
}
if err := container.Start(); err != nil {
t.Fatal(err)
}
l1, err := bufio.NewReader(stdout1).ReadString('\n')
if err != nil {
t.Fatal(err)
}
if strings.Trim(l1, " \r\n") != "hello" {
t.Fatalf("Unexpected output. Expected [%s], received [%s]", "hello", l1)
}
l2, err := bufio.NewReader(stdout2).ReadString('\n')
if err != nil {
t.Fatal(err)
}
if strings.Trim(l2, " \r\n") != "hello" {
t.Fatalf("Unexpected output. Expected [%s], received [%s]", "hello", l2)
}
l3, err := bufio.NewReader(stdout3).ReadString('\n')
if err != nil {
t.Fatal(err)
}
if strings.Trim(l3, " \r\n") != "hello" {
t.Fatalf("Unexpected output. Expected [%s], received [%s]", "hello", l3)
}
if err := container.Stop(10); err != nil {
t.Fatal(err)
}
stdout1, err = container.StdoutPipe()
if err != nil {
t.Fatal(err)
}
stdout2, err = container.StdoutPipe()
if err != nil {
t.Fatal(err)
}
stdout3, err = container.StdoutPipe()
if err != nil {
t.Fatal(err)
}
if err := container.Start(); err != nil {
t.Fatal(err)
}
setTimeout(t, "Timeout reading from the process", 3*time.Second, func() {
l1, err = bufio.NewReader(stdout1).ReadString('\n')
if err != nil {
t.Fatal(err)
}
if strings.Trim(l1, " \r\n") != "hello" {
t.Fatalf("Unexpected output. Expected [%s], received [%s]", "hello", l1)
}
l2, err = bufio.NewReader(stdout2).ReadString('\n')
if err != nil {
t.Fatal(err)
}
if strings.Trim(l2, " \r\n") != "hello" {
t.Fatalf("Unexpected output. Expected [%s], received [%s]", "hello", l2)
}
l3, err = bufio.NewReader(stdout3).ReadString('\n')
if err != nil {
t.Fatal(err)
}
if strings.Trim(l3, " \r\n") != "hello" {
t.Fatalf("Unexpected output. Expected [%s], received [%s]", "hello", l3)
}
})
container.Wait()
}
func TestDiff(t *testing.T) {
runtime, err := newTestRuntime()
if err != nil {
t.Fatal(err)
}
defer nuke(runtime)
builder := NewBuilder(runtime)
// Create a container and remove a file
container1, err := builder.Create(
&Config{
Image: GetTestImage(runtime).Id,
Cmd: []string{"/bin/rm", "/etc/passwd"},
},
)
if err != nil {
t.Fatal(err)
}
defer runtime.Destroy(container1)
if err := container1.Run(); err != nil {
t.Fatal(err)
}
// Check the changelog
c, err := container1.Changes()
if err != nil {
t.Fatal(err)
}
success := false
for _, elem := range c {
if elem.Path == "/etc/passwd" && elem.Kind == 2 {
success = true
}
}
if !success {
t.Fatalf("/etc/passwd as been removed but is not present in the diff")
}
// Commit the container
rwTar, err := container1.ExportRw()
if err != nil {
t.Error(err)
}
img, err := runtime.graph.Create(rwTar, container1, "unit test commited image - diff", "", nil)
if err != nil {
t.Error(err)
}
// Create a new container from the commited image
container2, err := builder.Create(
&Config{
Image: img.Id,
Cmd: []string{"cat", "/etc/passwd"},
},
)
if err != nil {
t.Fatal(err)
}
defer runtime.Destroy(container2)
if err := container2.Run(); err != nil {
t.Fatal(err)
}
// Check the changelog
c, err = container2.Changes()
if err != nil {
t.Fatal(err)
}
for _, elem := range c {
if elem.Path == "/etc/passwd" {
t.Fatalf("/etc/passwd should not be present in the diff after commit.")
}
}
}
func TestCommitAutoRun(t *testing.T) {
runtime, err := newTestRuntime()
if err != nil {
t.Fatal(err)
}
defer nuke(runtime)
builder := NewBuilder(runtime)
container1, err := builder.Create(
&Config{
Image: GetTestImage(runtime).Id,
Cmd: []string{"/bin/sh", "-c", "echo hello > /world"},
},
)
if err != nil {
t.Fatal(err)
}
defer runtime.Destroy(container1)
if container1.State.Running {
t.Errorf("Container shouldn't be running")
}
if err := container1.Run(); err != nil {
t.Fatal(err)
}
if container1.State.Running {
t.Errorf("Container shouldn't be running")
}
rwTar, err := container1.ExportRw()
if err != nil {
t.Error(err)
}
img, err := runtime.graph.Create(rwTar, container1, "unit test commited image", "", &Config{Cmd: []string{"cat", "/world"}})
if err != nil {
t.Error(err)
}
// FIXME: Make a TestCommit that stops here and check docker.root/layers/img.id/world
container2, err := builder.Create(
&Config{
Image: img.Id,
},
)
if err != nil {
t.Fatal(err)
}
defer runtime.Destroy(container2)
stdout, err := container2.StdoutPipe()
if err != nil {
t.Fatal(err)
}
stderr, err := container2.StderrPipe()
if err != nil {
t.Fatal(err)
}
if err := container2.Start(); err != nil {
t.Fatal(err)
}
container2.Wait()
output, err := ioutil.ReadAll(stdout)
if err != nil {
t.Fatal(err)
}
output2, err := ioutil.ReadAll(stderr)
if err != nil {
t.Fatal(err)
}
if err := stdout.Close(); err != nil {
t.Fatal(err)
}
if err := stderr.Close(); err != nil {
t.Fatal(err)
}
if string(output) != "hello\n" {
t.Fatalf("Unexpected output. Expected %s, received: %s (err: %s)", "hello\n", output, output2)
}
}
func TestCommitRun(t *testing.T) {
runtime, err := newTestRuntime()
if err != nil {
t.Fatal(err)
}
defer nuke(runtime)
builder := NewBuilder(runtime)
container1, err := builder.Create(
&Config{
Image: GetTestImage(runtime).Id,
Cmd: []string{"/bin/sh", "-c", "echo hello > /world"},
},
)
if err != nil {
t.Fatal(err)
}
defer runtime.Destroy(container1)
if container1.State.Running {
t.Errorf("Container shouldn't be running")
}
if err := container1.Run(); err != nil {
t.Fatal(err)
}
if container1.State.Running {
t.Errorf("Container shouldn't be running")
}
rwTar, err := container1.ExportRw()
if err != nil {
t.Error(err)
}
img, err := runtime.graph.Create(rwTar, container1, "unit test commited image", "", nil)
if err != nil {
t.Error(err)
}
// FIXME: Make a TestCommit that stops here and check docker.root/layers/img.id/world
container2, err := builder.Create(
&Config{
Image: img.Id,
Cmd: []string{"cat", "/world"},
},
)
if err != nil {
t.Fatal(err)
}
defer runtime.Destroy(container2)
stdout, err := container2.StdoutPipe()
if err != nil {
t.Fatal(err)
}
stderr, err := container2.StderrPipe()
if err != nil {
t.Fatal(err)
}
if err := container2.Start(); err != nil {
t.Fatal(err)
}
container2.Wait()
output, err := ioutil.ReadAll(stdout)
if err != nil {
t.Fatal(err)
}
output2, err := ioutil.ReadAll(stderr)
if err != nil {
t.Fatal(err)
}
if err := stdout.Close(); err != nil {
t.Fatal(err)
}
if err := stderr.Close(); err != nil {
t.Fatal(err)
}
if string(output) != "hello\n" {
t.Fatalf("Unexpected output. Expected %s, received: %s (err: %s)", "hello\n", output, output2)
}
}
func TestStart(t *testing.T) {
runtime, err := newTestRuntime()
if err != nil {
t.Fatal(err)
}
defer nuke(runtime)
container, err := NewBuilder(runtime).Create(
&Config{
Image: GetTestImage(runtime).Id,
Memory: 33554432,
Cmd: []string{"/bin/cat"},
OpenStdin: true,
},
)
if err != nil {
t.Fatal(err)
}
defer runtime.Destroy(container)
if err := container.Start(); err != nil {
t.Fatal(err)
}
// Give some time to the process to start
container.WaitTimeout(500 * time.Millisecond)
if !container.State.Running {
t.Errorf("Container should be running")
}
if err := container.Start(); err == nil {
t.Fatalf("A running containter should be able to be started")
}
// Try to avoid the timeoout in destroy. Best effort, don't check error
cStdin, _ := container.StdinPipe()
cStdin.Close()
container.WaitTimeout(2 * time.Second)
}
func TestRun(t *testing.T) {
runtime, err := newTestRuntime()
if err != nil {
t.Fatal(err)
}
defer nuke(runtime)
container, err := NewBuilder(runtime).Create(
&Config{
Image: GetTestImage(runtime).Id,
Cmd: []string{"ls", "-al"},
},
)
if err != nil {
t.Fatal(err)
}
defer runtime.Destroy(container)
if container.State.Running {
t.Errorf("Container shouldn't be running")
}
if err := container.Run(); err != nil {
t.Fatal(err)
}
if container.State.Running {
t.Errorf("Container shouldn't be running")
}
}
func TestOutput(t *testing.T) {
runtime, err := newTestRuntime()
if err != nil {
t.Fatal(err)
}
defer nuke(runtime)
container, err := NewBuilder(runtime).Create(
&Config{
Image: GetTestImage(runtime).Id,
Cmd: []string{"echo", "-n", "foobar"},
},
)
if err != nil {
t.Fatal(err)
}
defer runtime.Destroy(container)
output, err := container.Output()
if err != nil {
t.Fatal(err)
}
if string(output) != "foobar" {
t.Error(string(output))
}
}
func TestKillDifferentUser(t *testing.T) {
runtime, err := newTestRuntime()
if err != nil {
t.Fatal(err)
}
defer nuke(runtime)
container, err := NewBuilder(runtime).Create(&Config{
Image: GetTestImage(runtime).Id,
Cmd: []string{"tail", "-f", "/etc/resolv.conf"},
User: "daemon",
},
)
if err != nil {
t.Fatal(err)
}
defer runtime.Destroy(container)
if container.State.Running {
t.Errorf("Container shouldn't be running")
}
if err := container.Start(); err != nil {
t.Fatal(err)
}
// Give some time to lxc to spawn the process (setuid might take some time)
container.WaitTimeout(500 * time.Millisecond)
if !container.State.Running {
t.Errorf("Container should be running")
}
if err := container.Kill(); err != nil {
t.Fatal(err)
}
if container.State.Running {
t.Errorf("Container shouldn't be running")
}
container.Wait()
if container.State.Running {
t.Errorf("Container shouldn't be running")
}
// Try stopping twice
if err := container.Kill(); err != nil {
t.Fatal(err)
}
}
func TestKill(t *testing.T) {
runtime, err := newTestRuntime()
if err != nil {
t.Fatal(err)
}
defer nuke(runtime)
container, err := NewBuilder(runtime).Create(&Config{
Image: GetTestImage(runtime).Id,
Cmd: []string{"cat", "/dev/zero"},
},
)
if err != nil {
t.Fatal(err)
}
defer runtime.Destroy(container)
if container.State.Running {
t.Errorf("Container shouldn't be running")
}
if err := container.Start(); err != nil {
t.Fatal(err)
}
// Give some time to lxc to spawn the process
container.WaitTimeout(500 * time.Millisecond)
if !container.State.Running {
t.Errorf("Container should be running")
}
if err := container.Kill(); err != nil {
t.Fatal(err)
}
if container.State.Running {
t.Errorf("Container shouldn't be running")
}
container.Wait()
if container.State.Running {
t.Errorf("Container shouldn't be running")
}
// Try stopping twice
if err := container.Kill(); err != nil {
t.Fatal(err)
}
}
func TestExitCode(t *testing.T) {
runtime, err := newTestRuntime()
if err != nil {
t.Fatal(err)
}
defer nuke(runtime)
builder := NewBuilder(runtime)
trueContainer, err := builder.Create(&Config{
Image: GetTestImage(runtime).Id,
Cmd: []string{"/bin/true", ""},
})
if err != nil {
t.Fatal(err)
}
defer runtime.Destroy(trueContainer)
if err := trueContainer.Run(); err != nil {
t.Fatal(err)
}
if trueContainer.State.ExitCode != 0 {
t.Errorf("Unexpected exit code %d (expected 0)", trueContainer.State.ExitCode)
}
falseContainer, err := builder.Create(&Config{
Image: GetTestImage(runtime).Id,
Cmd: []string{"/bin/false", ""},
})
if err != nil {
t.Fatal(err)
}
defer runtime.Destroy(falseContainer)
if err := falseContainer.Run(); err != nil {
t.Fatal(err)
}
if falseContainer.State.ExitCode != 1 {
t.Errorf("Unexpected exit code %d (expected 1)", falseContainer.State.ExitCode)
}
}
func TestRestart(t *testing.T) {
runtime, err := newTestRuntime()
if err != nil {
t.Fatal(err)
}
defer nuke(runtime)
container, err := NewBuilder(runtime).Create(&Config{
Image: GetTestImage(runtime).Id,
Cmd: []string{"echo", "-n", "foobar"},
},
)
if err != nil {
t.Fatal(err)
}
defer runtime.Destroy(container)
output, err := container.Output()
if err != nil {
t.Fatal(err)
}
if string(output) != "foobar" {
t.Error(string(output))
}
// Run the container again and check the output
output, err = container.Output()
if err != nil {
t.Fatal(err)
}
if string(output) != "foobar" {
t.Error(string(output))
}
}
func TestRestartStdin(t *testing.T) {
runtime, err := newTestRuntime()
if err != nil {
t.Fatal(err)
}
defer nuke(runtime)
container, err := NewBuilder(runtime).Create(&Config{
Image: GetTestImage(runtime).Id,
Cmd: []string{"cat"},
OpenStdin: true,
},
)
if err != nil {
t.Fatal(err)
}
defer runtime.Destroy(container)
stdin, err := container.StdinPipe()
if err != nil {
t.Fatal(err)
}
stdout, err := container.StdoutPipe()
if err != nil {
t.Fatal(err)
}
if err := container.Start(); err != nil {
t.Fatal(err)
}
if _, err := io.WriteString(stdin, "hello world"); err != nil {
t.Fatal(err)
}
if err := stdin.Close(); err != nil {
t.Fatal(err)
}
container.Wait()
output, err := ioutil.ReadAll(stdout)
if err != nil {
t.Fatal(err)
}
if err := stdout.Close(); err != nil {
t.Fatal(err)
}
if string(output) != "hello world" {
t.Fatalf("Unexpected output. Expected %s, received: %s", "hello world", string(output))
}
// Restart and try again
stdin, err = container.StdinPipe()
if err != nil {
t.Fatal(err)
}
stdout, err = container.StdoutPipe()
if err != nil {
t.Fatal(err)
}
if err := container.Start(); err != nil {
t.Fatal(err)
}
if _, err := io.WriteString(stdin, "hello world #2"); err != nil {
t.Fatal(err)
}
if err := stdin.Close(); err != nil {
t.Fatal(err)
}
container.Wait()
output, err = ioutil.ReadAll(stdout)
if err != nil {
t.Fatal(err)
}
if err := stdout.Close(); err != nil {
t.Fatal(err)
}
if string(output) != "hello world #2" {
t.Fatalf("Unexpected output. Expected %s, received: %s", "hello world #2", string(output))
}
}
func TestUser(t *testing.T) {
runtime, err := newTestRuntime()
if err != nil {
t.Fatal(err)
}
defer nuke(runtime)
builder := NewBuilder(runtime)
// Default user must be root
container, err := builder.Create(&Config{
Image: GetTestImage(runtime).Id,
Cmd: []string{"id"},
},
)
if err != nil {
t.Fatal(err)
}
defer runtime.Destroy(container)
output, err := container.Output()
if err != nil {
t.Fatal(err)
}
if !strings.Contains(string(output), "uid=0(root) gid=0(root)") {
t.Error(string(output))
}
// Set a username
container, err = builder.Create(&Config{
Image: GetTestImage(runtime).Id,
Cmd: []string{"id"},
User: "root",
},
)
if err != nil {
t.Fatal(err)
}
defer runtime.Destroy(container)
output, err = container.Output()
if err != nil || container.State.ExitCode != 0 {
t.Fatal(err)
}
if !strings.Contains(string(output), "uid=0(root) gid=0(root)") {
t.Error(string(output))
}
// Set a UID
container, err = builder.Create(&Config{
Image: GetTestImage(runtime).Id,
Cmd: []string{"id"},
User: "0",
},
)
if err != nil || container.State.ExitCode != 0 {
t.Fatal(err)
}
defer runtime.Destroy(container)
output, err = container.Output()
if err != nil || container.State.ExitCode != 0 {
t.Fatal(err)
}
if !strings.Contains(string(output), "uid=0(root) gid=0(root)") {
t.Error(string(output))
}
// Set a different user by uid
container, err = builder.Create(&Config{
Image: GetTestImage(runtime).Id,
Cmd: []string{"id"},
User: "1",
},
)
if err != nil {
t.Fatal(err)
}
defer runtime.Destroy(container)
output, err = container.Output()
if err != nil {
t.Fatal(err)
} else if container.State.ExitCode != 0 {
t.Fatalf("Container exit code is invalid: %d\nOutput:\n%s\n", container.State.ExitCode, output)
}
if !strings.Contains(string(output), "uid=1(daemon) gid=1(daemon)") {
t.Error(string(output))
}
// Set a different user by username
container, err = builder.Create(&Config{
Image: GetTestImage(runtime).Id,
Cmd: []string{"id"},
User: "daemon",
},
)
if err != nil {
t.Fatal(err)
}
defer runtime.Destroy(container)
output, err = container.Output()
if err != nil || container.State.ExitCode != 0 {
t.Fatal(err)
}
if !strings.Contains(string(output), "uid=1(daemon) gid=1(daemon)") {
t.Error(string(output))
}
}
func TestMultipleContainers(t *testing.T) {
runtime, err := newTestRuntime()
if err != nil {
t.Fatal(err)
}
defer nuke(runtime)
builder := NewBuilder(runtime)
container1, err := builder.Create(&Config{
Image: GetTestImage(runtime).Id,
Cmd: []string{"cat", "/dev/zero"},
},
)
if err != nil {
t.Fatal(err)
}
defer runtime.Destroy(container1)
container2, err := builder.Create(&Config{
Image: GetTestImage(runtime).Id,
Cmd: []string{"cat", "/dev/zero"},
},
)
if err != nil {
t.Fatal(err)
}
defer runtime.Destroy(container2)
// Start both containers
if err := container1.Start(); err != nil {
t.Fatal(err)
}
if err := container2.Start(); err != nil {
t.Fatal(err)
}
// Make sure they are running before trying to kill them
container1.WaitTimeout(250 * time.Millisecond)
container2.WaitTimeout(250 * time.Millisecond)
// If we are here, both containers should be running
if !container1.State.Running {
t.Fatal("Container not running")
}
if !container2.State.Running {
t.Fatal("Container not running")
}
// Kill them
if err := container1.Kill(); err != nil {
t.Fatal(err)
}
if err := container2.Kill(); err != nil {
t.Fatal(err)
}
}
func TestStdin(t *testing.T) {
runtime, err := newTestRuntime()
if err != nil {
t.Fatal(err)
}
defer nuke(runtime)
container, err := NewBuilder(runtime).Create(&Config{
Image: GetTestImage(runtime).Id,
Cmd: []string{"cat"},
OpenStdin: true,
},
)
if err != nil {
t.Fatal(err)
}
defer runtime.Destroy(container)
stdin, err := container.StdinPipe()
if err != nil {
t.Fatal(err)
}
stdout, err := container.StdoutPipe()
if err != nil {
t.Fatal(err)
}
if err := container.Start(); err != nil {
t.Fatal(err)
}
defer stdin.Close()
defer stdout.Close()
if _, err := io.WriteString(stdin, "hello world"); err != nil {
t.Fatal(err)
}
if err := stdin.Close(); err != nil {
t.Fatal(err)
}
container.Wait()
output, err := ioutil.ReadAll(stdout)
if err != nil {
t.Fatal(err)
}
if string(output) != "hello world" {
t.Fatalf("Unexpected output. Expected %s, received: %s", "hello world", string(output))
}
}
func TestTty(t *testing.T) {
runtime, err := newTestRuntime()
if err != nil {
t.Fatal(err)
}
defer nuke(runtime)
container, err := NewBuilder(runtime).Create(&Config{
Image: GetTestImage(runtime).Id,
Cmd: []string{"cat"},
OpenStdin: true,
},
)
if err != nil {
t.Fatal(err)
}
defer runtime.Destroy(container)
stdin, err := container.StdinPipe()
if err != nil {
t.Fatal(err)
}
stdout, err := container.StdoutPipe()
if err != nil {
t.Fatal(err)
}
if err := container.Start(); err != nil {
t.Fatal(err)
}
defer stdin.Close()
defer stdout.Close()
if _, err := io.WriteString(stdin, "hello world"); err != nil {
t.Fatal(err)
}
if err := stdin.Close(); err != nil {
t.Fatal(err)
}
container.Wait()
output, err := ioutil.ReadAll(stdout)
if err != nil {
t.Fatal(err)
}
if string(output) != "hello world" {
t.Fatalf("Unexpected output. Expected %s, received: %s", "hello world", string(output))
}
}
func TestEnv(t *testing.T) {
runtime, err := newTestRuntime()
if err != nil {
t.Fatal(err)
}
defer nuke(runtime)
container, err := NewBuilder(runtime).Create(&Config{
Image: GetTestImage(runtime).Id,
Cmd: []string{"/usr/bin/env"},
},
)
if err != nil {
t.Fatal(err)
}
defer runtime.Destroy(container)
stdout, err := container.StdoutPipe()
if err != nil {
t.Fatal(err)
}
defer stdout.Close()
if err := container.Start(); err != nil {
t.Fatal(err)
}
container.Wait()
output, err := ioutil.ReadAll(stdout)
if err != nil {
t.Fatal(err)
}
actualEnv := strings.Split(string(output), "\n")
if actualEnv[len(actualEnv)-1] == "" {
actualEnv = actualEnv[:len(actualEnv)-1]
}
sort.Strings(actualEnv)
goodEnv := []string{
"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin",
"HOME=/",
}
sort.Strings(goodEnv)
if len(goodEnv) != len(actualEnv) {
t.Fatalf("Wrong environment: should be %d variables, not: '%s'\n", len(goodEnv), strings.Join(actualEnv, ", "))
}
for i := range goodEnv {
if actualEnv[i] != goodEnv[i] {
t.Fatalf("Wrong environment variable: should be %s, not %s", goodEnv[i], actualEnv[i])
}
}
}
func grepFile(t *testing.T, path string, pattern string) {
f, err := os.Open(path)
if err != nil {
t.Fatal(err)
}
defer f.Close()
r := bufio.NewReader(f)
var (
line string
)
err = nil
for err == nil {
line, err = r.ReadString('\n')
if strings.Contains(line, pattern) == true {
return
}
}
t.Fatalf("grepFile: pattern \"%s\" not found in \"%s\"", pattern, path)
}
func TestLXCConfig(t *testing.T) {
runtime, err := newTestRuntime()
if err != nil {
t.Fatal(err)
}
defer nuke(runtime)
// Memory is allocated randomly for testing
rand.Seed(time.Now().UTC().UnixNano())
memMin := 33554432
memMax := 536870912
mem := memMin + rand.Intn(memMax-memMin)
container, err := NewBuilder(runtime).Create(&Config{
Image: GetTestImage(runtime).Id,
Cmd: []string{"/bin/true"},
Hostname: "foobar",
Memory: int64(mem),
},
)
if err != nil {
t.Fatal(err)
}
defer runtime.Destroy(container)
container.generateLXCConfig()
grepFile(t, container.lxcConfigPath(), "lxc.utsname = foobar")
grepFile(t, container.lxcConfigPath(),
fmt.Sprintf("lxc.cgroup.memory.limit_in_bytes = %d", mem))
grepFile(t, container.lxcConfigPath(),
fmt.Sprintf("lxc.cgroup.memory.memsw.limit_in_bytes = %d", mem*2))
}
func BenchmarkRunSequencial(b *testing.B) {
runtime, err := newTestRuntime()
if err != nil {
b.Fatal(err)
}
defer nuke(runtime)
for i := 0; i < b.N; i++ {
container, err := NewBuilder(runtime).Create(&Config{
Image: GetTestImage(runtime).Id,
Cmd: []string{"echo", "-n", "foo"},
},
)
if err != nil {
b.Fatal(err)
}
defer runtime.Destroy(container)
output, err := container.Output()
if err != nil {
b.Fatal(err)
}
if string(output) != "foo" {
b.Fatalf("Unexpected output: %s", output)
}
if err := runtime.Destroy(container); err != nil {
b.Fatal(err)
}
}
}
func BenchmarkRunParallel(b *testing.B) {
runtime, err := newTestRuntime()
if err != nil {
b.Fatal(err)
}
defer nuke(runtime)
var tasks []chan error
for i := 0; i < b.N; i++ {
complete := make(chan error)
tasks = append(tasks, complete)
go func(i int, complete chan error) {
container, err := NewBuilder(runtime).Create(&Config{
Image: GetTestImage(runtime).Id,
Cmd: []string{"echo", "-n", "foo"},
},
)
if err != nil {
complete <- err
return
}
defer runtime.Destroy(container)
if err := container.Start(); err != nil {
complete <- err
return
}
if err := container.WaitTimeout(15 * time.Second); err != nil {
complete <- err
return
}
// if string(output) != "foo" {
// complete <- fmt.Errorf("Unexecpted output: %v", string(output))
// }
if err := runtime.Destroy(container); err != nil {
complete <- err
return
}
complete <- nil
}(i, complete)
}
var errors []error
for _, task := range tasks {
err := <-task
if err != nil {
errors = append(errors, err)
}
}
if len(errors) > 0 {
b.Fatal(errors)
}
}
| container_test.go | 1 | https://github.com/moby/moby/commit/efd9becb78c82ddef07efb7e76e0100d7a712281 | [
0.998063862323761,
0.5865391492843628,
0.00016335112741217017,
0.9765823483467102,
0.45986995100975037
] |
{
"id": 5,
"code_window": [
"\t// Memory is allocated randomly for testing\n",
"\trand.Seed(time.Now().UTC().UnixNano())\n",
"\tmemMin := 33554432\n",
"\tmemMax := 536870912\n",
"\tmem := memMin + rand.Intn(memMax-memMin)\n",
"\tcontainer, err := NewBuilder(runtime).Create(&Config{\n",
"\t\tImage: GetTestImage(runtime).Id,\n",
"\t\tCmd: []string{\"/bin/true\"},\n",
"\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t// CPU shares as well\n",
"\tcpuMin := 100\n",
"\tcpuMax := 10000\n",
"\tcpu := cpuMin + rand.Intn(cpuMax-cpuMin)\n"
],
"file_path": "container_test.go",
"type": "add",
"edit_start_line_idx": 1061
} | # Vagrant-docker
This is a placeholder for the official vagrant-docker, a plugin for Vagrant (http://vagrantup.com) which exposes Docker as a provider.
| contrib/vagrant-docker/README.md | 0 | https://github.com/moby/moby/commit/efd9becb78c82ddef07efb7e76e0100d7a712281 | [
0.0001788694498827681,
0.0001788694498827681,
0.0001788694498827681,
0.0001788694498827681,
0
] |
{
"id": 5,
"code_window": [
"\t// Memory is allocated randomly for testing\n",
"\trand.Seed(time.Now().UTC().UnixNano())\n",
"\tmemMin := 33554432\n",
"\tmemMax := 536870912\n",
"\tmem := memMin + rand.Intn(memMax-memMin)\n",
"\tcontainer, err := NewBuilder(runtime).Create(&Config{\n",
"\t\tImage: GetTestImage(runtime).Id,\n",
"\t\tCmd: []string{\"/bin/true\"},\n",
"\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t// CPU shares as well\n",
"\tcpuMin := 100\n",
"\tcpuMax := 10000\n",
"\tcpu := cpuMin + rand.Intn(cpuMax-cpuMin)\n"
],
"file_path": "container_test.go",
"type": "add",
"edit_start_line_idx": 1061
} | FAQ
===
Most frequently asked questions.
--------------------------------
1. **How much does Docker cost?**
Docker is 100% free, it is open source, so you can use it without paying.
2. **What open source license are you using?**
We are using the Apache License Version 2.0, see it here: https://github.com/dotcloud/docker/blob/master/LICENSE
3. **Does Docker run on Mac OS X or Windows?**
Not at this time, Docker currently only runs on Linux, but you can use VirtualBox to run Docker in a virtual machine on your box, and get the best of both worlds. Check out the MacOSX_ and Windows_ intallation guides.
4. **How do containers compare to virtual machines?**
They are complementary. VMs are best used to allocate chunks of hardware resources. Containers operate at the process level, which makes them very lightweight and perfect as a unit of software delivery.
5. **Can I help by adding some questions and answers?**
Definitely! You can fork `the repo`_ and edit the documentation sources.
42. **Where can I find more answers?**
You can find more answers on:
* `IRC: docker on freenode`_
* `Github`_
* `Ask questions on Stackoverflow`_
* `Join the conversation on Twitter`_
.. _Windows: ../documentation/installation/windows.html
.. _MacOSX: ../documentation/installation/macos.html
.. _the repo: http://www.github.com/dotcloud/docker
.. _IRC\: docker on freenode: irc://chat.freenode.net#docker
.. _Github: http://www.github.com/dotcloud/docker
.. _Ask questions on Stackoverflow: http://stackoverflow.com/search?q=docker
.. _Join the conversation on Twitter: http://twitter.com/getdocker
Looking for something else to read? Checkout the :ref:`hello_world` example.
| docs/sources/faq.rst | 0 | https://github.com/moby/moby/commit/efd9becb78c82ddef07efb7e76e0100d7a712281 | [
0.00017708621453493834,
0.0001728151401039213,
0.0001670049678068608,
0.0001748471549944952,
0.00000382282814825885
] |
{
"id": 5,
"code_window": [
"\t// Memory is allocated randomly for testing\n",
"\trand.Seed(time.Now().UTC().UnixNano())\n",
"\tmemMin := 33554432\n",
"\tmemMax := 536870912\n",
"\tmem := memMin + rand.Intn(memMax-memMin)\n",
"\tcontainer, err := NewBuilder(runtime).Create(&Config{\n",
"\t\tImage: GetTestImage(runtime).Id,\n",
"\t\tCmd: []string{\"/bin/true\"},\n",
"\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t// CPU shares as well\n",
"\tcpuMin := 100\n",
"\tcpuMax := 10000\n",
"\tcpu := cpuMin + rand.Intn(cpuMax-cpuMin)\n"
],
"file_path": "container_test.go",
"type": "add",
"edit_start_line_idx": 1061
} | docs/theme/docker/static/css/variables.css | 0 | https://github.com/moby/moby/commit/efd9becb78c82ddef07efb7e76e0100d7a712281 | [
0.00017131979984696954,
0.00017131979984696954,
0.00017131979984696954,
0.00017131979984696954,
0
] |
|
{
"id": 6,
"code_window": [
"\tcontainer, err := NewBuilder(runtime).Create(&Config{\n",
"\t\tImage: GetTestImage(runtime).Id,\n",
"\t\tCmd: []string{\"/bin/true\"},\n",
"\n",
"\t\tHostname: \"foobar\",\n",
"\t\tMemory: int64(mem),\n",
"\t},\n",
"\t)\n",
"\tif err != nil {\n",
"\t\tt.Fatal(err)\n",
"\t}\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\tHostname: \"foobar\",\n",
"\t\tMemory: int64(mem),\n",
"\t\tCpuShares: int64(cpu),\n"
],
"file_path": "container_test.go",
"type": "replace",
"edit_start_line_idx": 1065
} | package docker
import (
"bufio"
"fmt"
"io"
"io/ioutil"
"math/rand"
"os"
"regexp"
"sort"
"strings"
"testing"
"time"
)
func TestIdFormat(t *testing.T) {
runtime, err := newTestRuntime()
if err != nil {
t.Fatal(err)
}
defer nuke(runtime)
container1, err := NewBuilder(runtime).Create(
&Config{
Image: GetTestImage(runtime).Id,
Cmd: []string{"/bin/sh", "-c", "echo hello world"},
},
)
if err != nil {
t.Fatal(err)
}
match, err := regexp.Match("^[0-9a-f]{64}$", []byte(container1.Id))
if err != nil {
t.Fatal(err)
}
if !match {
t.Fatalf("Invalid container ID: %s", container1.Id)
}
}
func TestMultipleAttachRestart(t *testing.T) {
runtime, err := newTestRuntime()
if err != nil {
t.Fatal(err)
}
defer nuke(runtime)
container, err := NewBuilder(runtime).Create(
&Config{
Image: GetTestImage(runtime).Id,
Cmd: []string{"/bin/sh", "-c",
"i=1; while [ $i -le 5 ]; do i=`expr $i + 1`; echo hello; done"},
},
)
if err != nil {
t.Fatal(err)
}
defer runtime.Destroy(container)
// Simulate 3 client attaching to the container and stop/restart
stdout1, err := container.StdoutPipe()
if err != nil {
t.Fatal(err)
}
stdout2, err := container.StdoutPipe()
if err != nil {
t.Fatal(err)
}
stdout3, err := container.StdoutPipe()
if err != nil {
t.Fatal(err)
}
if err := container.Start(); err != nil {
t.Fatal(err)
}
l1, err := bufio.NewReader(stdout1).ReadString('\n')
if err != nil {
t.Fatal(err)
}
if strings.Trim(l1, " \r\n") != "hello" {
t.Fatalf("Unexpected output. Expected [%s], received [%s]", "hello", l1)
}
l2, err := bufio.NewReader(stdout2).ReadString('\n')
if err != nil {
t.Fatal(err)
}
if strings.Trim(l2, " \r\n") != "hello" {
t.Fatalf("Unexpected output. Expected [%s], received [%s]", "hello", l2)
}
l3, err := bufio.NewReader(stdout3).ReadString('\n')
if err != nil {
t.Fatal(err)
}
if strings.Trim(l3, " \r\n") != "hello" {
t.Fatalf("Unexpected output. Expected [%s], received [%s]", "hello", l3)
}
if err := container.Stop(10); err != nil {
t.Fatal(err)
}
stdout1, err = container.StdoutPipe()
if err != nil {
t.Fatal(err)
}
stdout2, err = container.StdoutPipe()
if err != nil {
t.Fatal(err)
}
stdout3, err = container.StdoutPipe()
if err != nil {
t.Fatal(err)
}
if err := container.Start(); err != nil {
t.Fatal(err)
}
setTimeout(t, "Timeout reading from the process", 3*time.Second, func() {
l1, err = bufio.NewReader(stdout1).ReadString('\n')
if err != nil {
t.Fatal(err)
}
if strings.Trim(l1, " \r\n") != "hello" {
t.Fatalf("Unexpected output. Expected [%s], received [%s]", "hello", l1)
}
l2, err = bufio.NewReader(stdout2).ReadString('\n')
if err != nil {
t.Fatal(err)
}
if strings.Trim(l2, " \r\n") != "hello" {
t.Fatalf("Unexpected output. Expected [%s], received [%s]", "hello", l2)
}
l3, err = bufio.NewReader(stdout3).ReadString('\n')
if err != nil {
t.Fatal(err)
}
if strings.Trim(l3, " \r\n") != "hello" {
t.Fatalf("Unexpected output. Expected [%s], received [%s]", "hello", l3)
}
})
container.Wait()
}
func TestDiff(t *testing.T) {
runtime, err := newTestRuntime()
if err != nil {
t.Fatal(err)
}
defer nuke(runtime)
builder := NewBuilder(runtime)
// Create a container and remove a file
container1, err := builder.Create(
&Config{
Image: GetTestImage(runtime).Id,
Cmd: []string{"/bin/rm", "/etc/passwd"},
},
)
if err != nil {
t.Fatal(err)
}
defer runtime.Destroy(container1)
if err := container1.Run(); err != nil {
t.Fatal(err)
}
// Check the changelog
c, err := container1.Changes()
if err != nil {
t.Fatal(err)
}
success := false
for _, elem := range c {
if elem.Path == "/etc/passwd" && elem.Kind == 2 {
success = true
}
}
if !success {
t.Fatalf("/etc/passwd as been removed but is not present in the diff")
}
// Commit the container
rwTar, err := container1.ExportRw()
if err != nil {
t.Error(err)
}
img, err := runtime.graph.Create(rwTar, container1, "unit test commited image - diff", "", nil)
if err != nil {
t.Error(err)
}
// Create a new container from the commited image
container2, err := builder.Create(
&Config{
Image: img.Id,
Cmd: []string{"cat", "/etc/passwd"},
},
)
if err != nil {
t.Fatal(err)
}
defer runtime.Destroy(container2)
if err := container2.Run(); err != nil {
t.Fatal(err)
}
// Check the changelog
c, err = container2.Changes()
if err != nil {
t.Fatal(err)
}
for _, elem := range c {
if elem.Path == "/etc/passwd" {
t.Fatalf("/etc/passwd should not be present in the diff after commit.")
}
}
}
func TestCommitAutoRun(t *testing.T) {
runtime, err := newTestRuntime()
if err != nil {
t.Fatal(err)
}
defer nuke(runtime)
builder := NewBuilder(runtime)
container1, err := builder.Create(
&Config{
Image: GetTestImage(runtime).Id,
Cmd: []string{"/bin/sh", "-c", "echo hello > /world"},
},
)
if err != nil {
t.Fatal(err)
}
defer runtime.Destroy(container1)
if container1.State.Running {
t.Errorf("Container shouldn't be running")
}
if err := container1.Run(); err != nil {
t.Fatal(err)
}
if container1.State.Running {
t.Errorf("Container shouldn't be running")
}
rwTar, err := container1.ExportRw()
if err != nil {
t.Error(err)
}
img, err := runtime.graph.Create(rwTar, container1, "unit test commited image", "", &Config{Cmd: []string{"cat", "/world"}})
if err != nil {
t.Error(err)
}
// FIXME: Make a TestCommit that stops here and check docker.root/layers/img.id/world
container2, err := builder.Create(
&Config{
Image: img.Id,
},
)
if err != nil {
t.Fatal(err)
}
defer runtime.Destroy(container2)
stdout, err := container2.StdoutPipe()
if err != nil {
t.Fatal(err)
}
stderr, err := container2.StderrPipe()
if err != nil {
t.Fatal(err)
}
if err := container2.Start(); err != nil {
t.Fatal(err)
}
container2.Wait()
output, err := ioutil.ReadAll(stdout)
if err != nil {
t.Fatal(err)
}
output2, err := ioutil.ReadAll(stderr)
if err != nil {
t.Fatal(err)
}
if err := stdout.Close(); err != nil {
t.Fatal(err)
}
if err := stderr.Close(); err != nil {
t.Fatal(err)
}
if string(output) != "hello\n" {
t.Fatalf("Unexpected output. Expected %s, received: %s (err: %s)", "hello\n", output, output2)
}
}
func TestCommitRun(t *testing.T) {
runtime, err := newTestRuntime()
if err != nil {
t.Fatal(err)
}
defer nuke(runtime)
builder := NewBuilder(runtime)
container1, err := builder.Create(
&Config{
Image: GetTestImage(runtime).Id,
Cmd: []string{"/bin/sh", "-c", "echo hello > /world"},
},
)
if err != nil {
t.Fatal(err)
}
defer runtime.Destroy(container1)
if container1.State.Running {
t.Errorf("Container shouldn't be running")
}
if err := container1.Run(); err != nil {
t.Fatal(err)
}
if container1.State.Running {
t.Errorf("Container shouldn't be running")
}
rwTar, err := container1.ExportRw()
if err != nil {
t.Error(err)
}
img, err := runtime.graph.Create(rwTar, container1, "unit test commited image", "", nil)
if err != nil {
t.Error(err)
}
// FIXME: Make a TestCommit that stops here and check docker.root/layers/img.id/world
container2, err := builder.Create(
&Config{
Image: img.Id,
Cmd: []string{"cat", "/world"},
},
)
if err != nil {
t.Fatal(err)
}
defer runtime.Destroy(container2)
stdout, err := container2.StdoutPipe()
if err != nil {
t.Fatal(err)
}
stderr, err := container2.StderrPipe()
if err != nil {
t.Fatal(err)
}
if err := container2.Start(); err != nil {
t.Fatal(err)
}
container2.Wait()
output, err := ioutil.ReadAll(stdout)
if err != nil {
t.Fatal(err)
}
output2, err := ioutil.ReadAll(stderr)
if err != nil {
t.Fatal(err)
}
if err := stdout.Close(); err != nil {
t.Fatal(err)
}
if err := stderr.Close(); err != nil {
t.Fatal(err)
}
if string(output) != "hello\n" {
t.Fatalf("Unexpected output. Expected %s, received: %s (err: %s)", "hello\n", output, output2)
}
}
func TestStart(t *testing.T) {
runtime, err := newTestRuntime()
if err != nil {
t.Fatal(err)
}
defer nuke(runtime)
container, err := NewBuilder(runtime).Create(
&Config{
Image: GetTestImage(runtime).Id,
Memory: 33554432,
Cmd: []string{"/bin/cat"},
OpenStdin: true,
},
)
if err != nil {
t.Fatal(err)
}
defer runtime.Destroy(container)
if err := container.Start(); err != nil {
t.Fatal(err)
}
// Give some time to the process to start
container.WaitTimeout(500 * time.Millisecond)
if !container.State.Running {
t.Errorf("Container should be running")
}
if err := container.Start(); err == nil {
t.Fatalf("A running containter should be able to be started")
}
// Try to avoid the timeoout in destroy. Best effort, don't check error
cStdin, _ := container.StdinPipe()
cStdin.Close()
container.WaitTimeout(2 * time.Second)
}
func TestRun(t *testing.T) {
runtime, err := newTestRuntime()
if err != nil {
t.Fatal(err)
}
defer nuke(runtime)
container, err := NewBuilder(runtime).Create(
&Config{
Image: GetTestImage(runtime).Id,
Cmd: []string{"ls", "-al"},
},
)
if err != nil {
t.Fatal(err)
}
defer runtime.Destroy(container)
if container.State.Running {
t.Errorf("Container shouldn't be running")
}
if err := container.Run(); err != nil {
t.Fatal(err)
}
if container.State.Running {
t.Errorf("Container shouldn't be running")
}
}
func TestOutput(t *testing.T) {
runtime, err := newTestRuntime()
if err != nil {
t.Fatal(err)
}
defer nuke(runtime)
container, err := NewBuilder(runtime).Create(
&Config{
Image: GetTestImage(runtime).Id,
Cmd: []string{"echo", "-n", "foobar"},
},
)
if err != nil {
t.Fatal(err)
}
defer runtime.Destroy(container)
output, err := container.Output()
if err != nil {
t.Fatal(err)
}
if string(output) != "foobar" {
t.Error(string(output))
}
}
func TestKillDifferentUser(t *testing.T) {
runtime, err := newTestRuntime()
if err != nil {
t.Fatal(err)
}
defer nuke(runtime)
container, err := NewBuilder(runtime).Create(&Config{
Image: GetTestImage(runtime).Id,
Cmd: []string{"tail", "-f", "/etc/resolv.conf"},
User: "daemon",
},
)
if err != nil {
t.Fatal(err)
}
defer runtime.Destroy(container)
if container.State.Running {
t.Errorf("Container shouldn't be running")
}
if err := container.Start(); err != nil {
t.Fatal(err)
}
// Give some time to lxc to spawn the process (setuid might take some time)
container.WaitTimeout(500 * time.Millisecond)
if !container.State.Running {
t.Errorf("Container should be running")
}
if err := container.Kill(); err != nil {
t.Fatal(err)
}
if container.State.Running {
t.Errorf("Container shouldn't be running")
}
container.Wait()
if container.State.Running {
t.Errorf("Container shouldn't be running")
}
// Try stopping twice
if err := container.Kill(); err != nil {
t.Fatal(err)
}
}
func TestKill(t *testing.T) {
runtime, err := newTestRuntime()
if err != nil {
t.Fatal(err)
}
defer nuke(runtime)
container, err := NewBuilder(runtime).Create(&Config{
Image: GetTestImage(runtime).Id,
Cmd: []string{"cat", "/dev/zero"},
},
)
if err != nil {
t.Fatal(err)
}
defer runtime.Destroy(container)
if container.State.Running {
t.Errorf("Container shouldn't be running")
}
if err := container.Start(); err != nil {
t.Fatal(err)
}
// Give some time to lxc to spawn the process
container.WaitTimeout(500 * time.Millisecond)
if !container.State.Running {
t.Errorf("Container should be running")
}
if err := container.Kill(); err != nil {
t.Fatal(err)
}
if container.State.Running {
t.Errorf("Container shouldn't be running")
}
container.Wait()
if container.State.Running {
t.Errorf("Container shouldn't be running")
}
// Try stopping twice
if err := container.Kill(); err != nil {
t.Fatal(err)
}
}
func TestExitCode(t *testing.T) {
runtime, err := newTestRuntime()
if err != nil {
t.Fatal(err)
}
defer nuke(runtime)
builder := NewBuilder(runtime)
trueContainer, err := builder.Create(&Config{
Image: GetTestImage(runtime).Id,
Cmd: []string{"/bin/true", ""},
})
if err != nil {
t.Fatal(err)
}
defer runtime.Destroy(trueContainer)
if err := trueContainer.Run(); err != nil {
t.Fatal(err)
}
if trueContainer.State.ExitCode != 0 {
t.Errorf("Unexpected exit code %d (expected 0)", trueContainer.State.ExitCode)
}
falseContainer, err := builder.Create(&Config{
Image: GetTestImage(runtime).Id,
Cmd: []string{"/bin/false", ""},
})
if err != nil {
t.Fatal(err)
}
defer runtime.Destroy(falseContainer)
if err := falseContainer.Run(); err != nil {
t.Fatal(err)
}
if falseContainer.State.ExitCode != 1 {
t.Errorf("Unexpected exit code %d (expected 1)", falseContainer.State.ExitCode)
}
}
func TestRestart(t *testing.T) {
runtime, err := newTestRuntime()
if err != nil {
t.Fatal(err)
}
defer nuke(runtime)
container, err := NewBuilder(runtime).Create(&Config{
Image: GetTestImage(runtime).Id,
Cmd: []string{"echo", "-n", "foobar"},
},
)
if err != nil {
t.Fatal(err)
}
defer runtime.Destroy(container)
output, err := container.Output()
if err != nil {
t.Fatal(err)
}
if string(output) != "foobar" {
t.Error(string(output))
}
// Run the container again and check the output
output, err = container.Output()
if err != nil {
t.Fatal(err)
}
if string(output) != "foobar" {
t.Error(string(output))
}
}
func TestRestartStdin(t *testing.T) {
runtime, err := newTestRuntime()
if err != nil {
t.Fatal(err)
}
defer nuke(runtime)
container, err := NewBuilder(runtime).Create(&Config{
Image: GetTestImage(runtime).Id,
Cmd: []string{"cat"},
OpenStdin: true,
},
)
if err != nil {
t.Fatal(err)
}
defer runtime.Destroy(container)
stdin, err := container.StdinPipe()
if err != nil {
t.Fatal(err)
}
stdout, err := container.StdoutPipe()
if err != nil {
t.Fatal(err)
}
if err := container.Start(); err != nil {
t.Fatal(err)
}
if _, err := io.WriteString(stdin, "hello world"); err != nil {
t.Fatal(err)
}
if err := stdin.Close(); err != nil {
t.Fatal(err)
}
container.Wait()
output, err := ioutil.ReadAll(stdout)
if err != nil {
t.Fatal(err)
}
if err := stdout.Close(); err != nil {
t.Fatal(err)
}
if string(output) != "hello world" {
t.Fatalf("Unexpected output. Expected %s, received: %s", "hello world", string(output))
}
// Restart and try again
stdin, err = container.StdinPipe()
if err != nil {
t.Fatal(err)
}
stdout, err = container.StdoutPipe()
if err != nil {
t.Fatal(err)
}
if err := container.Start(); err != nil {
t.Fatal(err)
}
if _, err := io.WriteString(stdin, "hello world #2"); err != nil {
t.Fatal(err)
}
if err := stdin.Close(); err != nil {
t.Fatal(err)
}
container.Wait()
output, err = ioutil.ReadAll(stdout)
if err != nil {
t.Fatal(err)
}
if err := stdout.Close(); err != nil {
t.Fatal(err)
}
if string(output) != "hello world #2" {
t.Fatalf("Unexpected output. Expected %s, received: %s", "hello world #2", string(output))
}
}
func TestUser(t *testing.T) {
runtime, err := newTestRuntime()
if err != nil {
t.Fatal(err)
}
defer nuke(runtime)
builder := NewBuilder(runtime)
// Default user must be root
container, err := builder.Create(&Config{
Image: GetTestImage(runtime).Id,
Cmd: []string{"id"},
},
)
if err != nil {
t.Fatal(err)
}
defer runtime.Destroy(container)
output, err := container.Output()
if err != nil {
t.Fatal(err)
}
if !strings.Contains(string(output), "uid=0(root) gid=0(root)") {
t.Error(string(output))
}
// Set a username
container, err = builder.Create(&Config{
Image: GetTestImage(runtime).Id,
Cmd: []string{"id"},
User: "root",
},
)
if err != nil {
t.Fatal(err)
}
defer runtime.Destroy(container)
output, err = container.Output()
if err != nil || container.State.ExitCode != 0 {
t.Fatal(err)
}
if !strings.Contains(string(output), "uid=0(root) gid=0(root)") {
t.Error(string(output))
}
// Set a UID
container, err = builder.Create(&Config{
Image: GetTestImage(runtime).Id,
Cmd: []string{"id"},
User: "0",
},
)
if err != nil || container.State.ExitCode != 0 {
t.Fatal(err)
}
defer runtime.Destroy(container)
output, err = container.Output()
if err != nil || container.State.ExitCode != 0 {
t.Fatal(err)
}
if !strings.Contains(string(output), "uid=0(root) gid=0(root)") {
t.Error(string(output))
}
// Set a different user by uid
container, err = builder.Create(&Config{
Image: GetTestImage(runtime).Id,
Cmd: []string{"id"},
User: "1",
},
)
if err != nil {
t.Fatal(err)
}
defer runtime.Destroy(container)
output, err = container.Output()
if err != nil {
t.Fatal(err)
} else if container.State.ExitCode != 0 {
t.Fatalf("Container exit code is invalid: %d\nOutput:\n%s\n", container.State.ExitCode, output)
}
if !strings.Contains(string(output), "uid=1(daemon) gid=1(daemon)") {
t.Error(string(output))
}
// Set a different user by username
container, err = builder.Create(&Config{
Image: GetTestImage(runtime).Id,
Cmd: []string{"id"},
User: "daemon",
},
)
if err != nil {
t.Fatal(err)
}
defer runtime.Destroy(container)
output, err = container.Output()
if err != nil || container.State.ExitCode != 0 {
t.Fatal(err)
}
if !strings.Contains(string(output), "uid=1(daemon) gid=1(daemon)") {
t.Error(string(output))
}
}
func TestMultipleContainers(t *testing.T) {
runtime, err := newTestRuntime()
if err != nil {
t.Fatal(err)
}
defer nuke(runtime)
builder := NewBuilder(runtime)
container1, err := builder.Create(&Config{
Image: GetTestImage(runtime).Id,
Cmd: []string{"cat", "/dev/zero"},
},
)
if err != nil {
t.Fatal(err)
}
defer runtime.Destroy(container1)
container2, err := builder.Create(&Config{
Image: GetTestImage(runtime).Id,
Cmd: []string{"cat", "/dev/zero"},
},
)
if err != nil {
t.Fatal(err)
}
defer runtime.Destroy(container2)
// Start both containers
if err := container1.Start(); err != nil {
t.Fatal(err)
}
if err := container2.Start(); err != nil {
t.Fatal(err)
}
// Make sure they are running before trying to kill them
container1.WaitTimeout(250 * time.Millisecond)
container2.WaitTimeout(250 * time.Millisecond)
// If we are here, both containers should be running
if !container1.State.Running {
t.Fatal("Container not running")
}
if !container2.State.Running {
t.Fatal("Container not running")
}
// Kill them
if err := container1.Kill(); err != nil {
t.Fatal(err)
}
if err := container2.Kill(); err != nil {
t.Fatal(err)
}
}
func TestStdin(t *testing.T) {
runtime, err := newTestRuntime()
if err != nil {
t.Fatal(err)
}
defer nuke(runtime)
container, err := NewBuilder(runtime).Create(&Config{
Image: GetTestImage(runtime).Id,
Cmd: []string{"cat"},
OpenStdin: true,
},
)
if err != nil {
t.Fatal(err)
}
defer runtime.Destroy(container)
stdin, err := container.StdinPipe()
if err != nil {
t.Fatal(err)
}
stdout, err := container.StdoutPipe()
if err != nil {
t.Fatal(err)
}
if err := container.Start(); err != nil {
t.Fatal(err)
}
defer stdin.Close()
defer stdout.Close()
if _, err := io.WriteString(stdin, "hello world"); err != nil {
t.Fatal(err)
}
if err := stdin.Close(); err != nil {
t.Fatal(err)
}
container.Wait()
output, err := ioutil.ReadAll(stdout)
if err != nil {
t.Fatal(err)
}
if string(output) != "hello world" {
t.Fatalf("Unexpected output. Expected %s, received: %s", "hello world", string(output))
}
}
func TestTty(t *testing.T) {
runtime, err := newTestRuntime()
if err != nil {
t.Fatal(err)
}
defer nuke(runtime)
container, err := NewBuilder(runtime).Create(&Config{
Image: GetTestImage(runtime).Id,
Cmd: []string{"cat"},
OpenStdin: true,
},
)
if err != nil {
t.Fatal(err)
}
defer runtime.Destroy(container)
stdin, err := container.StdinPipe()
if err != nil {
t.Fatal(err)
}
stdout, err := container.StdoutPipe()
if err != nil {
t.Fatal(err)
}
if err := container.Start(); err != nil {
t.Fatal(err)
}
defer stdin.Close()
defer stdout.Close()
if _, err := io.WriteString(stdin, "hello world"); err != nil {
t.Fatal(err)
}
if err := stdin.Close(); err != nil {
t.Fatal(err)
}
container.Wait()
output, err := ioutil.ReadAll(stdout)
if err != nil {
t.Fatal(err)
}
if string(output) != "hello world" {
t.Fatalf("Unexpected output. Expected %s, received: %s", "hello world", string(output))
}
}
func TestEnv(t *testing.T) {
runtime, err := newTestRuntime()
if err != nil {
t.Fatal(err)
}
defer nuke(runtime)
container, err := NewBuilder(runtime).Create(&Config{
Image: GetTestImage(runtime).Id,
Cmd: []string{"/usr/bin/env"},
},
)
if err != nil {
t.Fatal(err)
}
defer runtime.Destroy(container)
stdout, err := container.StdoutPipe()
if err != nil {
t.Fatal(err)
}
defer stdout.Close()
if err := container.Start(); err != nil {
t.Fatal(err)
}
container.Wait()
output, err := ioutil.ReadAll(stdout)
if err != nil {
t.Fatal(err)
}
actualEnv := strings.Split(string(output), "\n")
if actualEnv[len(actualEnv)-1] == "" {
actualEnv = actualEnv[:len(actualEnv)-1]
}
sort.Strings(actualEnv)
goodEnv := []string{
"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin",
"HOME=/",
}
sort.Strings(goodEnv)
if len(goodEnv) != len(actualEnv) {
t.Fatalf("Wrong environment: should be %d variables, not: '%s'\n", len(goodEnv), strings.Join(actualEnv, ", "))
}
for i := range goodEnv {
if actualEnv[i] != goodEnv[i] {
t.Fatalf("Wrong environment variable: should be %s, not %s", goodEnv[i], actualEnv[i])
}
}
}
func grepFile(t *testing.T, path string, pattern string) {
f, err := os.Open(path)
if err != nil {
t.Fatal(err)
}
defer f.Close()
r := bufio.NewReader(f)
var (
line string
)
err = nil
for err == nil {
line, err = r.ReadString('\n')
if strings.Contains(line, pattern) == true {
return
}
}
t.Fatalf("grepFile: pattern \"%s\" not found in \"%s\"", pattern, path)
}
func TestLXCConfig(t *testing.T) {
runtime, err := newTestRuntime()
if err != nil {
t.Fatal(err)
}
defer nuke(runtime)
// Memory is allocated randomly for testing
rand.Seed(time.Now().UTC().UnixNano())
memMin := 33554432
memMax := 536870912
mem := memMin + rand.Intn(memMax-memMin)
container, err := NewBuilder(runtime).Create(&Config{
Image: GetTestImage(runtime).Id,
Cmd: []string{"/bin/true"},
Hostname: "foobar",
Memory: int64(mem),
},
)
if err != nil {
t.Fatal(err)
}
defer runtime.Destroy(container)
container.generateLXCConfig()
grepFile(t, container.lxcConfigPath(), "lxc.utsname = foobar")
grepFile(t, container.lxcConfigPath(),
fmt.Sprintf("lxc.cgroup.memory.limit_in_bytes = %d", mem))
grepFile(t, container.lxcConfigPath(),
fmt.Sprintf("lxc.cgroup.memory.memsw.limit_in_bytes = %d", mem*2))
}
func BenchmarkRunSequencial(b *testing.B) {
runtime, err := newTestRuntime()
if err != nil {
b.Fatal(err)
}
defer nuke(runtime)
for i := 0; i < b.N; i++ {
container, err := NewBuilder(runtime).Create(&Config{
Image: GetTestImage(runtime).Id,
Cmd: []string{"echo", "-n", "foo"},
},
)
if err != nil {
b.Fatal(err)
}
defer runtime.Destroy(container)
output, err := container.Output()
if err != nil {
b.Fatal(err)
}
if string(output) != "foo" {
b.Fatalf("Unexpected output: %s", output)
}
if err := runtime.Destroy(container); err != nil {
b.Fatal(err)
}
}
}
func BenchmarkRunParallel(b *testing.B) {
runtime, err := newTestRuntime()
if err != nil {
b.Fatal(err)
}
defer nuke(runtime)
var tasks []chan error
for i := 0; i < b.N; i++ {
complete := make(chan error)
tasks = append(tasks, complete)
go func(i int, complete chan error) {
container, err := NewBuilder(runtime).Create(&Config{
Image: GetTestImage(runtime).Id,
Cmd: []string{"echo", "-n", "foo"},
},
)
if err != nil {
complete <- err
return
}
defer runtime.Destroy(container)
if err := container.Start(); err != nil {
complete <- err
return
}
if err := container.WaitTimeout(15 * time.Second); err != nil {
complete <- err
return
}
// if string(output) != "foo" {
// complete <- fmt.Errorf("Unexecpted output: %v", string(output))
// }
if err := runtime.Destroy(container); err != nil {
complete <- err
return
}
complete <- nil
}(i, complete)
}
var errors []error
for _, task := range tasks {
err := <-task
if err != nil {
errors = append(errors, err)
}
}
if len(errors) > 0 {
b.Fatal(errors)
}
}
| container_test.go | 1 | https://github.com/moby/moby/commit/efd9becb78c82ddef07efb7e76e0100d7a712281 | [
0.9975429773330688,
0.4576670527458191,
0.00016255075752269477,
0.2543964684009552,
0.45034316182136536
] |
{
"id": 6,
"code_window": [
"\tcontainer, err := NewBuilder(runtime).Create(&Config{\n",
"\t\tImage: GetTestImage(runtime).Id,\n",
"\t\tCmd: []string{\"/bin/true\"},\n",
"\n",
"\t\tHostname: \"foobar\",\n",
"\t\tMemory: int64(mem),\n",
"\t},\n",
"\t)\n",
"\tif err != nil {\n",
"\t\tt.Fatal(err)\n",
"\t}\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\tHostname: \"foobar\",\n",
"\t\tMemory: int64(mem),\n",
"\t\tCpuShares: int64(cpu),\n"
],
"file_path": "container_test.go",
"type": "replace",
"edit_start_line_idx": 1065
} | # Generate AUTHORS: git log --all --format='%aN <%aE>' | sort -uf | grep -v vagrant-ubuntu-12
<[email protected]> <[email protected]>
<[email protected]> <[email protected]>
<[email protected]> <[email protected]>
Guillaume J. Charmes <[email protected]> creack <[email protected]>
<[email protected]> <[email protected]>
<[email protected]> <[email protected]>
<[email protected]> <[email protected]>
Thatcher Peskens <[email protected]> dhrp <[email protected]>
Thatcher Peskens <[email protected]> dhrp <[email protected]>
Jérôme Petazzoni <[email protected]> jpetazzo <[email protected]>
Jérôme Petazzoni <[email protected]> <[email protected]>
Joffrey F <[email protected]>
<[email protected]> <[email protected]>
Tim Terhorst <[email protected]>
Andy Smith <[email protected]>
<[email protected]> <[email protected]>
<[email protected]> <[email protected]>
<[email protected]> <[email protected]>
| .mailmap | 0 | https://github.com/moby/moby/commit/efd9becb78c82ddef07efb7e76e0100d7a712281 | [
0.000177277805050835,
0.00017702014883980155,
0.00017676249262876809,
0.00017702014883980155,
2.576562110334635e-7
] |
{
"id": 6,
"code_window": [
"\tcontainer, err := NewBuilder(runtime).Create(&Config{\n",
"\t\tImage: GetTestImage(runtime).Id,\n",
"\t\tCmd: []string{\"/bin/true\"},\n",
"\n",
"\t\tHostname: \"foobar\",\n",
"\t\tMemory: int64(mem),\n",
"\t},\n",
"\t)\n",
"\tif err != nil {\n",
"\t\tt.Fatal(err)\n",
"\t}\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\tHostname: \"foobar\",\n",
"\t\tMemory: int64(mem),\n",
"\t\tCpuShares: int64(cpu),\n"
],
"file_path": "container_test.go",
"type": "replace",
"edit_start_line_idx": 1065
} | package rcli
import (
"bufio"
"bytes"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"log"
"net"
)
// Note: the globals are here to avoid import cycle
// FIXME: Handle debug levels mode?
var DEBUG_FLAG bool = false
var CLIENT_SOCKET io.Writer = nil
type DockerTCPConn struct {
conn *net.TCPConn
options *DockerConnOptions
optionsBuf *[]byte
handshaked bool
client bool
}
func NewDockerTCPConn(conn *net.TCPConn, client bool) *DockerTCPConn {
return &DockerTCPConn{
conn: conn,
options: &DockerConnOptions{},
client: client,
}
}
func (c *DockerTCPConn) SetOptionRawTerminal() {
c.options.RawTerminal = true
}
func (c *DockerTCPConn) GetOptions() *DockerConnOptions {
if c.client && !c.handshaked {
// Attempt to parse options encoded as a JSON dict and store
// the reminder of what we read from the socket in a buffer.
//
// bufio (and its ReadBytes method) would have been nice here,
// but if json.Unmarshal() fails (which will happen if we speak
// to a version of docker that doesn't send any option), then
// we can't put the data back in it for the next Read().
c.handshaked = true
buf := make([]byte, 4096)
if n, _ := c.conn.Read(buf); n > 0 {
buf = buf[:n]
if nl := bytes.IndexByte(buf, '\n'); nl != -1 {
if err := json.Unmarshal(buf[:nl], c.options); err == nil {
buf = buf[nl+1:]
}
}
c.optionsBuf = &buf
}
}
return c.options
}
func (c *DockerTCPConn) Read(b []byte) (int, error) {
if c.optionsBuf != nil {
// Consume what we buffered in GetOptions() first:
optionsBuf := *c.optionsBuf
optionsBuflen := len(optionsBuf)
copied := copy(b, optionsBuf)
if copied < optionsBuflen {
optionsBuf = optionsBuf[copied:]
c.optionsBuf = &optionsBuf
return copied, nil
}
c.optionsBuf = nil
return copied, nil
}
return c.conn.Read(b)
}
func (c *DockerTCPConn) Write(b []byte) (int, error) {
optionsLen := 0
if !c.client && !c.handshaked {
c.handshaked = true
options, _ := json.Marshal(c.options)
options = append(options, '\n')
if optionsLen, err := c.conn.Write(options); err != nil {
return optionsLen, err
}
}
n, err := c.conn.Write(b)
return n + optionsLen, err
}
func (c *DockerTCPConn) Flush() error {
_, err := c.Write([]byte{})
return err
}
func (c *DockerTCPConn) Close() error { return c.conn.Close() }
func (c *DockerTCPConn) CloseWrite() error { return c.conn.CloseWrite() }
func (c *DockerTCPConn) CloseRead() error { return c.conn.CloseRead() }
// Connect to a remote endpoint using protocol `proto` and address `addr`,
// issue a single call, and return the result.
// `proto` may be "tcp", "unix", etc. See the `net` package for available protocols.
func Call(proto, addr string, args ...string) (DockerConn, error) {
cmd, err := json.Marshal(args)
if err != nil {
return nil, err
}
conn, err := dialDocker(proto, addr)
if err != nil {
return nil, err
}
if _, err := fmt.Fprintln(conn, string(cmd)); err != nil {
return nil, err
}
return conn, nil
}
// Listen on `addr`, using protocol `proto`, for incoming rcli calls,
// and pass them to `service`.
func ListenAndServe(proto, addr string, service Service) error {
listener, err := net.Listen(proto, addr)
if err != nil {
return err
}
log.Printf("Listening for RCLI/%s on %s\n", proto, addr)
defer listener.Close()
for {
if conn, err := listener.Accept(); err != nil {
return err
} else {
conn, err := newDockerServerConn(conn)
if err != nil {
return err
}
go func(conn DockerConn) {
defer conn.Close()
if DEBUG_FLAG {
CLIENT_SOCKET = conn
}
if err := Serve(conn, service); err != nil {
log.Println("Error:", err.Error())
fmt.Fprintln(conn, "Error:", err.Error())
}
}(conn)
}
}
return nil
}
// Parse an rcli call on a new connection, and pass it to `service` if it
// is valid.
func Serve(conn DockerConn, service Service) error {
r := bufio.NewReader(conn)
var args []string
if line, err := r.ReadString('\n'); err != nil {
return err
} else if err := json.Unmarshal([]byte(line), &args); err != nil {
return err
} else {
return call(service, ioutil.NopCloser(r), conn, args...)
}
return nil
}
| rcli/tcp.go | 0 | https://github.com/moby/moby/commit/efd9becb78c82ddef07efb7e76e0100d7a712281 | [
0.0002252425329061225,
0.0001721745211398229,
0.00016317707195412368,
0.00016855950525496155,
0.00001385251380270347
] |
{
"id": 6,
"code_window": [
"\tcontainer, err := NewBuilder(runtime).Create(&Config{\n",
"\t\tImage: GetTestImage(runtime).Id,\n",
"\t\tCmd: []string{\"/bin/true\"},\n",
"\n",
"\t\tHostname: \"foobar\",\n",
"\t\tMemory: int64(mem),\n",
"\t},\n",
"\t)\n",
"\tif err != nil {\n",
"\t\tt.Fatal(err)\n",
"\t}\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\tHostname: \"foobar\",\n",
"\t\tMemory: int64(mem),\n",
"\t\tCpuShares: int64(cpu),\n"
],
"file_path": "container_test.go",
"type": "replace",
"edit_start_line_idx": 1065
} | <!DOCTYPE html>
<!--[if lt IE 7]> <html class="no-js lt-ie9 lt-ie8 lt-ie7"> <![endif]-->
<!--[if IE 7]> <html class="no-js lt-ie9 lt-ie8"> <![endif]-->
<!--[if IE 8]> <html class="no-js lt-ie9"> <![endif]-->
<!--[if gt IE 8]><!-->
<html class="no-js" xmlns="http://www.w3.org/1999/html"> <!--<![endif]-->
<head>
<meta charset="utf-8">
<meta http-equiv="X-UA-Compatible" content="IE=edge,chrome=1">
<meta name="google-site-verification" content="UxV66EKuPe87dgnH1sbrldrx6VsoWMrx5NjwkgUFxXI" />
<title>Docker - the Linux container engine</title>
<meta name="description" content="Docker encapsulates heterogeneous payloads in standard containers">
<meta name="viewport" content="width=device-width">
<!-- twitter bootstrap -->
<link rel="stylesheet" href="_static/css/bootstrap.min.css">
<link rel="stylesheet" href="_static/css/bootstrap-responsive.min.css">
<!-- main style file -->
<link rel="stylesheet" href="_static/css/main.css">
<!-- vendor scripts -->
<script src="_static/js/vendor/jquery-1.9.1.min.js" type="text/javascript" ></script>
<script src="_static/js/vendor/modernizr-2.6.2-respond-1.1.0.min.js" type="text/javascript" ></script>
<style>
.indexlabel {
float: left;
width: 150px;
display: block;
padding: 10px 20px 10px;
font-size: 20px;
font-weight: 200;
background-color: #a30000;
color: white;
height: 22px;
}
.searchbutton {
font-size: 20px;
height: 40px;
}
.debug {
border: 1px red dotted;
}
</style>
</head>
<body>
<div class="navbar navbar-fixed-top">
<div class="navbar-dotcloud">
<div class="container" style="text-align: center;">
<div class="pull-right" >
<ul class="nav">
<li class="active"><a href="./">Introduction</a></li>
<li ><a href="gettingstarted/">Getting started</a></li>
<li class=""><a href="http://docs.docker.io/en/latest/concepts/containers/">Documentation</a></li>
</ul>
<div class="social links" style="float: right; margin-top: 14px; margin-left: 12px">
<a class="twitter" href="http://twitter.com/getdocker">Twitter</a>
<a class="github" href="https://github.com/dotcloud/docker/">GitHub</a>
</div>
</div>
</div>
</div>
</div>
<div class="container" style="margin-top: 30px;">
<div class="row">
<div class="span12">
<section class="contentblock header">
<div class="span5" style="margin-bottom: 15px;">
<div style="text-align: center;" >
<img src="_static/img/docker_letters_500px.png">
<h2>The Linux container engine</h2>
</div>
<div style="display: block; text-align: center; margin-top: 20px;">
<h5>
Docker is an open-source engine which automates the deployment of applications as highly portable, self-sufficient containers which are independent of hardware, language, framework, packaging system and hosting provider.
</h5>
</div>
<div style="display: block; text-align: center; margin-top: 30px;">
<a class="btn btn-custom btn-large" href="gettingstarted/">Let's get started</a>
</div>
</div>
<div class="span6" >
<div class="js-video" >
<iframe width="600" height="360" src="http://www.youtube.com/embed/wW9CAH9nSLs?feature=player_detailpage&rel=0&modestbranding=1&start=11" frameborder="0" allowfullscreen></iframe>
</div>
</div>
<br style="clear: both"/>
</section>
</div>
</div>
</div>
<div class="container">
<div class="row">
<div class="span6">
<section class="contentblock">
<h4>Heterogeneous payloads</h4>
<p>Any combination of binaries, libraries, configuration files, scripts, virtualenvs, jars, gems, tarballs, you name it. No more juggling between domain-specific tools. Docker can deploy and run them all.</p>
<h4>Any server</h4>
<p>Docker can run on any x64 machine with a modern linux kernel - whether it's a laptop, a bare metal server or a VM. This makes it perfect for multi-cloud deployments.</p>
<h4>Isolation</h4>
<p>Docker isolates processes from each other and from the underlying host, using lightweight containers.</p>
<h4>Repeatability</h4>
<p>Because each container is isolated in its own filesystem, they behave the same regardless of where, when, and alongside what they run.</p>
</section>
</div>
<div class="span6">
<section class="contentblock">
<h1>New! Docker Index</h1>
On the Docker Index you can find and explore pre-made container images. It allows you to share your images and download them.
<br><br>
<a href="https://index.docker.io" target="_blank">
<div class="indexlabel">
DOCKER index
</div>
</a>
<input type="button" class="searchbutton" type="submit" value="Search images"
onClick="window.open('https://index.docker.io')" />
</section>
<section class="contentblock">
<div id="wufoo-z7x3p3">
Fill out my <a href="http://dotclouddocker.wufoo.com/forms/z7x3p3">online form</a>.
</div>
<script type="text/javascript">var z7x3p3;(function(d, t) {
var s = d.createElement(t), options = {
'userName':'dotclouddocker',
'formHash':'z7x3p3',
'autoResize':true,
'height':'577',
'async':true,
'header':'show'};
s.src = ('https:' == d.location.protocol ? 'https://' : 'http://') + 'wufoo.com/scripts/embed/form.js';
s.onload = s.onreadystatechange = function() {
var rs = this.readyState; if (rs) if (rs != 'complete') if (rs != 'loaded') return;
try { z7x3p3 = new WufooForm();z7x3p3.initialize(options);z7x3p3.display(); } catch (e) {}};
var scr = d.getElementsByTagName(t)[0], par = scr.parentNode; par.insertBefore(s, scr);
})(document, 'script');</script>
</section>
</div>
</div>
</div>
<style>
.twitterblock {
min-height: 75px;
}
.twitterblock img {
float: left;
margin-right: 10px;
}
</style>
<div class="container">
<div class="row">
<div class="span6">
<section class="contentblock twitterblock">
<img src="https://twimg0-a.akamaihd.net/profile_images/2491994496/rbevyyq6ykp6bnoby2je_bigger.jpeg">
<em>John Willis @botchagalupe:</em> IMHO docker is to paas what chef was to Iaas 4 years ago
</section>
</div>
<div class="span6">
<section class="contentblock twitterblock">
<img src="https://twimg0-a.akamaihd.net/profile_images/3348427561/9d7f08f1e103a16c8debd169301b9944_bigger.jpeg">
<em>John Feminella @superninjarobot:</em> So, @getdocker is pure excellence. If you've ever wished for arbitrary, PaaS-agnostic, lxc/aufs Linux containers, this is your jam!
</section>
</div>
</div>
<div class="row">
<div class="span6">
<section class="contentblock twitterblock">
<img src="https://si0.twimg.com/profile_images/3408403010/4496ccdd14e9b7285eca04c31a740207_bigger.jpeg">
<em>David Romulan @destructuring:</em> I haven't had this much fun since AWS
</section>
</div>
<div class="span6">
<section class="contentblock twitterblock">
<img src="https://si0.twimg.com/profile_images/780893320/My_Avatar_bigger.jpg">
<em>Ricardo Gladwell @rgladwell:</em> wow @getdocker is either amazing or totally stupid
</section>
</div>
</div>
</div>
<div class="container">
<div class="row">
<div class="span6">
<section class="contentblock">
<h2>Notable features</h2>
<ul>
<li>Filesystem isolation: each process container runs in a completely separate root filesystem.</li>
<li>Resource isolation: system resources like cpu and memory can be allocated differently to each process container, using cgroups.</li>
<li>Network isolation: each process container runs in its own network namespace, with a virtual interface and IP address of its own.</li>
<li>Copy-on-write: root filesystems are created using copy-on-write, which makes deployment extremeley fast, memory-cheap and disk-cheap.</li>
<li>Logging: the standard streams (stdout/stderr/stdin) of each process container is collected and logged for real-time or batch retrieval.</li>
<li>Change management: changes to a container's filesystem can be committed into a new image and re-used to create more containers. No templating or manual configuration required.</li>
<li>Interactive shell: docker can allocate a pseudo-tty and attach to the standard input of any container, for example to run a throwaway interactive shell.</li>
</ul>
<h2>Under the hood</h2>
<p>Under the hood, Docker is built on the following components:</p>
<ul>
<li>The <a href="http://blog.dotcloud.com/kernel-secrets-from-the-paas-garage-part-24-c">cgroup</a> and <a href="http://blog.dotcloud.com/under-the-hood-linux-kernels-on-dotcloud-part">namespacing</a> capabilities of the Linux kernel;</li>
<li><a href="http://aufs.sourceforge.net/aufs.html">AUFS</a>, a powerful union filesystem with copy-on-write capabilities;</li>
<li>The <a href="http://golang.org">Go</a> programming language;</li>
<li><a href="http://lxc.sourceforge.net/">lxc</a>, a set of convenience scripts to simplify the creation of linux containers.</li>
</ul>
<h2>Who started it</h2>
<p>
Docker is an open-source implementation of the deployment engine which powers <a href="http://dotcloud.com">dotCloud</a>, a popular Platform-as-a-Service.</p>
<p>It benefits directly from the experience accumulated over several years of large-scale operation and support of hundreds of thousands
of applications and databases.
</p>
</section>
</div>
<div class="span6">
<section class="contentblock">
<h3 id="twitter">Twitter</h3>
<a class="twitter-timeline" href="https://twitter.com/getdocker" data-widget-id="312730839718957056">Tweets by @getdocker</a>
<script>!function(d,s,id){var js,fjs=d.getElementsByTagName(s)[0];if(!d.getElementById(id)){js=d.createElement(s);js.id=id;js.src="//platform.twitter.com/widgets.js";fjs.parentNode.insertBefore(js,fjs);}}(document,"script","twitter-wjs");</script>
</section>
</div>
</div>
</div> <!-- end container -->
<div class="container">
<footer id="footer" class="footer">
<div class="row">
<div class="span12">
Docker is a project by <a href="http://www.dotcloud.com">dotCloud</a>
</div>
</div>
<div class="row">
<div class="emptyspace" style="height: 40px">
</div>
</div>
</footer>
</div>
<!-- bootstrap javascipts -->
<script src="_static/js/vendor/bootstrap.min.js" type="text/javascript"></script>
<!-- Google analytics -->
<script type="text/javascript">
var _gaq = _gaq || [];
_gaq.push(['_setAccount', 'UA-6096819-11']);
_gaq.push(['_setDomainName', 'docker.io']);
_gaq.push(['_setAllowLinker', true]);
_gaq.push(['_trackPageview']);
(function() {
var ga = document.createElement('script'); ga.type = 'text/javascript'; ga.async = true;
ga.src = ('https:' == document.location.protocol ? 'https://ssl' : 'http://www') + '.google-analytics.com/ga.js';
var s = document.getElementsByTagName('script')[0]; s.parentNode.insertBefore(ga, s);
})();
</script>
</body>
</html>
| docs/sources/index.html | 0 | https://github.com/moby/moby/commit/efd9becb78c82ddef07efb7e76e0100d7a712281 | [
0.00017805350944399834,
0.00017122880672104657,
0.00016123817476909608,
0.00017115837545134127,
0.000004186050318821799
] |
{
"id": 7,
"code_window": [
"\n",
" {\"Hostname\": \"\",\n",
" \"User\": \"\",\n",
" \"Memory\": 0,\n",
" \"MemorySwap\": 0,\n",
" \"PortSpecs\": [\"22\", \"80\", \"443\"],\n",
" \"Tty\": true,\n"
],
"labels": [
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
" \"CpuShares\": 0,\n"
],
"file_path": "docs/sources/commandline/command/commit.rst",
"type": "add",
"edit_start_line_idx": 18
} | ===========================================================
``commit`` -- Create a new image from a container's changes
===========================================================
::
Usage: docker commit [OPTIONS] CONTAINER [REPOSITORY [TAG]]
Create a new image from a container's changes
-m="": Commit message
-author="": Author (eg. "John Hannibal Smith <[email protected]>"
-run="": Config automatically applied when the image is run. "+`(ex: {"Cmd": ["cat", "/world"], "PortSpecs": ["22"]}')
Full -run example::
{"Hostname": "",
"User": "",
"Memory": 0,
"MemorySwap": 0,
"PortSpecs": ["22", "80", "443"],
"Tty": true,
"OpenStdin": true,
"StdinOnce": true,
"Env": ["FOO=BAR", "FOO2=BAR2"],
"Cmd": ["cat", "-e", "/etc/resolv.conf"],
"Dns": ["8.8.8.8", "8.8.4.4"]}
| docs/sources/commandline/command/commit.rst | 1 | https://github.com/moby/moby/commit/efd9becb78c82ddef07efb7e76e0100d7a712281 | [
0.0002561722067184746,
0.00020673936523962766,
0.00016804646293167025,
0.0001959994260687381,
0.00003676997221191414
] |
{
"id": 7,
"code_window": [
"\n",
" {\"Hostname\": \"\",\n",
" \"User\": \"\",\n",
" \"Memory\": 0,\n",
" \"MemorySwap\": 0,\n",
" \"PortSpecs\": [\"22\", \"80\", \"443\"],\n",
" \"Tty\": true,\n"
],
"labels": [
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
" \"CpuShares\": 0,\n"
],
"file_path": "docs/sources/commandline/command/commit.rst",
"type": "add",
"edit_start_line_idx": 18
} | ===========================================
``build`` -- Build a container from Dockerfile via stdin
===========================================
::
Usage: docker build -
Example: cat Dockerfile | docker build -
Build a new image from the Dockerfile passed via stdin
| docs/sources/commandline/command/build.rst | 0 | https://github.com/moby/moby/commit/efd9becb78c82ddef07efb7e76e0100d7a712281 | [
0.00016695258091203868,
0.00016695258091203868,
0.00016695258091203868,
0.00016695258091203868,
0
] |
{
"id": 7,
"code_window": [
"\n",
" {\"Hostname\": \"\",\n",
" \"User\": \"\",\n",
" \"Memory\": 0,\n",
" \"MemorySwap\": 0,\n",
" \"PortSpecs\": [\"22\", \"80\", \"443\"],\n",
" \"Tty\": true,\n"
],
"labels": [
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
" \"CpuShares\": 0,\n"
],
"file_path": "docs/sources/commandline/command/commit.rst",
"type": "add",
"edit_start_line_idx": 18
} | # -*- mode: ruby -*-
# vi: set ft=ruby :
BOX_NAME = ENV['BOX_NAME'] || "ubuntu"
BOX_URI = ENV['BOX_URI'] || "http://files.vagrantup.com/precise64.box"
PPA_KEY = "E61D797F63561DC6"
Vagrant::Config.run do |config|
# Setup virtual machine box. This VM configuration code is always executed.
config.vm.box = BOX_NAME
config.vm.box_url = BOX_URI
# Add docker PPA key to the local repository and install docker
pkg_cmd = "apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv-keys #{PPA_KEY}; "
pkg_cmd << "echo 'deb http://ppa.launchpad.net/dotcloud/lxc-docker/ubuntu precise main' >/etc/apt/sources.list.d/lxc-docker.list; "
pkg_cmd << "apt-get update -qq; apt-get install -q -y lxc-docker"
if ARGV.include?("--provider=aws".downcase)
# Add AUFS dependency to amazon's VM
pkg_cmd << "; apt-get install linux-image-extra-3.2.0-40-virtual"
end
config.vm.provision :shell, :inline => pkg_cmd
end
# Providers were added on Vagrant >= 1.1.0
Vagrant::VERSION >= "1.1.0" and Vagrant.configure("2") do |config|
config.vm.provider :aws do |aws, override|
config.vm.box = "dummy"
config.vm.box_url = "https://github.com/mitchellh/vagrant-aws/raw/master/dummy.box"
aws.access_key_id = ENV["AWS_ACCESS_KEY_ID"]
aws.secret_access_key = ENV["AWS_SECRET_ACCESS_KEY"]
aws.keypair_name = ENV["AWS_KEYPAIR_NAME"]
override.ssh.private_key_path = ENV["AWS_SSH_PRIVKEY"]
override.ssh.username = "ubuntu"
aws.region = "us-east-1"
aws.ami = "ami-d0f89fb9"
aws.instance_type = "t1.micro"
end
config.vm.provider :rackspace do |rs|
config.vm.box = "dummy"
config.vm.box_url = "https://github.com/mitchellh/vagrant-rackspace/raw/master/dummy.box"
config.ssh.private_key_path = ENV["RS_PRIVATE_KEY"]
rs.username = ENV["RS_USERNAME"]
rs.api_key = ENV["RS_API_KEY"]
rs.public_key_path = ENV["RS_PUBLIC_KEY"]
rs.flavor = /512MB/
rs.image = /Ubuntu/
end
config.vm.provider :virtualbox do |vb|
config.vm.box = BOX_NAME
config.vm.box_url = BOX_URI
end
end
| Vagrantfile | 0 | https://github.com/moby/moby/commit/efd9becb78c82ddef07efb7e76e0100d7a712281 | [
0.0001681673020357266,
0.0001664596638875082,
0.00016472581773996353,
0.00016630902246106416,
0.0000012477446489356225
] |
{
"id": 7,
"code_window": [
"\n",
" {\"Hostname\": \"\",\n",
" \"User\": \"\",\n",
" \"Memory\": 0,\n",
" \"MemorySwap\": 0,\n",
" \"PortSpecs\": [\"22\", \"80\", \"443\"],\n",
" \"Tty\": true,\n"
],
"labels": [
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
" \"CpuShares\": 0,\n"
],
"file_path": "docs/sources/commandline/command/commit.rst",
"type": "add",
"edit_start_line_idx": 18
} | ===================================================================
``wait`` -- Block until a container stops, then print its exit code
===================================================================
::
Usage: docker wait [OPTIONS] NAME
Block until a container stops, then print its exit code.
| docs/sources/commandline/command/wait.rst | 0 | https://github.com/moby/moby/commit/efd9becb78c82ddef07efb7e76e0100d7a712281 | [
0.00016707585018593818,
0.00016707585018593818,
0.00016707585018593818,
0.00016707585018593818,
0
] |
{
"id": 8,
"code_window": [
"\n",
" Run a command in a new container\n",
"\n",
" -a=map[]: Attach to stdin, stdout or stderr.\n",
" -d=false: Detached mode: leave the container running in the background\n",
" -e=[]: Set environment variables\n",
" -h=\"\": Container host name\n"
],
"labels": [
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep"
],
"after_edit": [
" -c=1024: CPU shares (relative weight)\n"
],
"file_path": "docs/sources/commandline/command/run.rst",
"type": "add",
"edit_start_line_idx": 11
} | package docker
import (
"text/template"
)
const LxcTemplate = `
# hostname
{{if .Config.Hostname}}
lxc.utsname = {{.Config.Hostname}}
{{else}}
lxc.utsname = {{.Id}}
{{end}}
#lxc.aa_profile = unconfined
# network configuration
lxc.network.type = veth
lxc.network.flags = up
lxc.network.link = {{.NetworkSettings.Bridge}}
lxc.network.name = eth0
lxc.network.mtu = 1500
lxc.network.ipv4 = {{.NetworkSettings.IpAddress}}/{{.NetworkSettings.IpPrefixLen}}
# root filesystem
{{$ROOTFS := .RootfsPath}}
lxc.rootfs = {{$ROOTFS}}
# use a dedicated pts for the container (and limit the number of pseudo terminal
# available)
lxc.pts = 1024
# disable the main console
lxc.console = none
# no controlling tty at all
lxc.tty = 1
# no implicit access to devices
lxc.cgroup.devices.deny = a
# /dev/null and zero
lxc.cgroup.devices.allow = c 1:3 rwm
lxc.cgroup.devices.allow = c 1:5 rwm
# consoles
lxc.cgroup.devices.allow = c 5:1 rwm
lxc.cgroup.devices.allow = c 5:0 rwm
lxc.cgroup.devices.allow = c 4:0 rwm
lxc.cgroup.devices.allow = c 4:1 rwm
# /dev/urandom,/dev/random
lxc.cgroup.devices.allow = c 1:9 rwm
lxc.cgroup.devices.allow = c 1:8 rwm
# /dev/pts/* - pts namespaces are "coming soon"
lxc.cgroup.devices.allow = c 136:* rwm
lxc.cgroup.devices.allow = c 5:2 rwm
# tuntap
lxc.cgroup.devices.allow = c 10:200 rwm
# fuse
#lxc.cgroup.devices.allow = c 10:229 rwm
# rtc
#lxc.cgroup.devices.allow = c 254:0 rwm
# standard mount point
lxc.mount.entry = proc {{$ROOTFS}}/proc proc nosuid,nodev,noexec 0 0
lxc.mount.entry = sysfs {{$ROOTFS}}/sys sysfs nosuid,nodev,noexec 0 0
lxc.mount.entry = devpts {{$ROOTFS}}/dev/pts devpts newinstance,ptmxmode=0666,nosuid,noexec 0 0
#lxc.mount.entry = varrun {{$ROOTFS}}/var/run tmpfs mode=755,size=4096k,nosuid,nodev,noexec 0 0
#lxc.mount.entry = varlock {{$ROOTFS}}/var/lock tmpfs size=1024k,nosuid,nodev,noexec 0 0
#lxc.mount.entry = shm {{$ROOTFS}}/dev/shm tmpfs size=65536k,nosuid,nodev,noexec 0 0
# Inject docker-init
lxc.mount.entry = {{.SysInitPath}} {{$ROOTFS}}/sbin/init none bind,ro 0 0
# In order to get a working DNS environment, mount bind (ro) the host's /etc/resolv.conf into the container
lxc.mount.entry = {{.ResolvConfPath}} {{$ROOTFS}}/etc/resolv.conf none bind,ro 0 0
{{if .Volumes}}
{{range $virtualPath, $realPath := .GetVolumes}}
lxc.mount.entry = {{$realPath}} {{$ROOTFS}}/{{$virtualPath}} none bind,rw 0 0
{{end}}
{{end}}
# drop linux capabilities (apply mainly to the user root in the container)
lxc.cap.drop = audit_control audit_write mac_admin mac_override mknod setfcap setpcap sys_admin sys_boot sys_module sys_nice sys_pacct sys_rawio sys_resource sys_time sys_tty_config
# limits
{{if .Config.Memory}}
lxc.cgroup.memory.limit_in_bytes = {{.Config.Memory}}
lxc.cgroup.memory.soft_limit_in_bytes = {{.Config.Memory}}
{{with $memSwap := getMemorySwap .Config}}
lxc.cgroup.memory.memsw.limit_in_bytes = {{$memSwap}}
{{end}}
{{end}}
`
var LxcTemplateCompiled *template.Template
func getMemorySwap(config *Config) int64 {
// By default, MemorySwap is set to twice the size of RAM.
// If you want to omit MemorySwap, set it to `-1'.
if config.MemorySwap < 0 {
return 0
}
return config.Memory * 2
}
func init() {
var err error
funcMap := template.FuncMap{
"getMemorySwap": getMemorySwap,
}
LxcTemplateCompiled, err = template.New("lxc").Funcs(funcMap).Parse(LxcTemplate)
if err != nil {
panic(err)
}
}
| lxc_template.go | 1 | https://github.com/moby/moby/commit/efd9becb78c82ddef07efb7e76e0100d7a712281 | [
0.0004077730991411954,
0.00020379116176627576,
0.0001634459913475439,
0.00017138350813183933,
0.00006670396396657452
] |
{
"id": 8,
"code_window": [
"\n",
" Run a command in a new container\n",
"\n",
" -a=map[]: Attach to stdin, stdout or stderr.\n",
" -d=false: Detached mode: leave the container running in the background\n",
" -e=[]: Set environment variables\n",
" -h=\"\": Container host name\n"
],
"labels": [
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep"
],
"after_edit": [
" -c=1024: CPU shares (relative weight)\n"
],
"file_path": "docs/sources/commandline/command/run.rst",
"type": "add",
"edit_start_line_idx": 11
} | =========================================================================
``port`` -- Lookup the public-facing port which is NAT-ed to PRIVATE_PORT
=========================================================================
::
Usage: docker port [OPTIONS] CONTAINER PRIVATE_PORT
Lookup the public-facing port which is NAT-ed to PRIVATE_PORT
| docs/sources/commandline/command/port.rst | 0 | https://github.com/moby/moby/commit/efd9becb78c82ddef07efb7e76e0100d7a712281 | [
0.0015820731641724706,
0.0015820731641724706,
0.0015820731641724706,
0.0015820731641724706,
0
] |
{
"id": 8,
"code_window": [
"\n",
" Run a command in a new container\n",
"\n",
" -a=map[]: Attach to stdin, stdout or stderr.\n",
" -d=false: Detached mode: leave the container running in the background\n",
" -e=[]: Set environment variables\n",
" -h=\"\": Container host name\n"
],
"labels": [
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep"
],
"after_edit": [
" -c=1024: CPU shares (relative weight)\n"
],
"file_path": "docs/sources/commandline/command/run.rst",
"type": "add",
"edit_start_line_idx": 11
} | ============================================================
``login`` -- Register or Login to the docker registry server
============================================================
::
Usage: docker login
Register or Login to the docker registry server
| docs/sources/commandline/command/login.rst | 0 | https://github.com/moby/moby/commit/efd9becb78c82ddef07efb7e76e0100d7a712281 | [
0.00016453272837679833,
0.00016453272837679833,
0.00016453272837679833,
0.00016453272837679833,
0
] |
{
"id": 8,
"code_window": [
"\n",
" Run a command in a new container\n",
"\n",
" -a=map[]: Attach to stdin, stdout or stderr.\n",
" -d=false: Detached mode: leave the container running in the background\n",
" -e=[]: Set environment variables\n",
" -h=\"\": Container host name\n"
],
"labels": [
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep"
],
"after_edit": [
" -c=1024: CPU shares (relative weight)\n"
],
"file_path": "docs/sources/commandline/command/run.rst",
"type": "add",
"edit_start_line_idx": 11
} | :title: docker Registry documentation
:description: Documentation for docker Registry and Registry API
:keywords: docker, registry, api, index
Registry
========
Contents:
.. toctree::
:maxdepth: 2
api
| docs/sources/registry/index.rst | 0 | https://github.com/moby/moby/commit/efd9becb78c82ddef07efb7e76e0100d7a712281 | [
0.00017201824812218547,
0.00017180774011649191,
0.00017159721755888313,
0.00017180774011649191,
2.1051528165116906e-7
] |
{
"id": 9,
"code_window": [
"{{with $memSwap := getMemorySwap .Config}}\n",
"lxc.cgroup.memory.memsw.limit_in_bytes = {{$memSwap}}\n",
"{{end}}\n",
"{{end}}\n",
"`\n",
"\n",
"var LxcTemplateCompiled *template.Template\n"
],
"labels": [
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep"
],
"after_edit": [
"{{if .Config.CpuShares}}\n",
"lxc.cgroup.cpu.shares = {{.Config.CpuShares}}\n",
"{{end}}\n"
],
"file_path": "lxc_template.go",
"type": "add",
"edit_start_line_idx": 98
} | package docker
import (
"encoding/json"
"fmt"
"github.com/dotcloud/docker/rcli"
"github.com/kr/pty"
"io"
"io/ioutil"
"log"
"os"
"os/exec"
"path"
"sort"
"strconv"
"strings"
"syscall"
"time"
)
type Container struct {
root string
Id string
Created time.Time
Path string
Args []string
Config *Config
State State
Image string
network *NetworkInterface
NetworkSettings *NetworkSettings
SysInitPath string
ResolvConfPath string
cmd *exec.Cmd
stdout *writeBroadcaster
stderr *writeBroadcaster
stdin io.ReadCloser
stdinPipe io.WriteCloser
ptyMaster io.Closer
runtime *Runtime
waitLock chan struct{}
Volumes map[string]string
}
type Config struct {
Hostname string
User string
Memory int64 // Memory limit (in bytes)
MemorySwap int64 // Total memory usage (memory + swap); set `-1' to disable swap
AttachStdin bool
AttachStdout bool
AttachStderr bool
PortSpecs []string
Tty bool // Attach standard streams to a tty, including stdin if it is not closed.
OpenStdin bool // Open stdin
StdinOnce bool // If true, close stdin after the 1 attached client disconnects.
Env []string
Cmd []string
Dns []string
Image string // Name of the image as it was passed by the operator (eg. could be symbolic)
Volumes map[string]struct{}
VolumesFrom string
}
func ParseRun(args []string, stdout io.Writer, capabilities *Capabilities) (*Config, error) {
cmd := rcli.Subcmd(stdout, "run", "[OPTIONS] IMAGE COMMAND [ARG...]", "Run a command in a new container")
if len(args) > 0 && args[0] != "--help" {
cmd.SetOutput(ioutil.Discard)
}
flHostname := cmd.String("h", "", "Container host name")
flUser := cmd.String("u", "", "Username or UID")
flDetach := cmd.Bool("d", false, "Detached mode: leave the container running in the background")
flAttach := NewAttachOpts()
cmd.Var(flAttach, "a", "Attach to stdin, stdout or stderr.")
flStdin := cmd.Bool("i", false, "Keep stdin open even if not attached")
flTty := cmd.Bool("t", false, "Allocate a pseudo-tty")
flMemory := cmd.Int64("m", 0, "Memory limit (in bytes)")
if *flMemory > 0 && !capabilities.MemoryLimit {
fmt.Fprintf(stdout, "WARNING: Your kernel does not support memory limit capabilities. Limitation discarded.\n")
*flMemory = 0
}
var flPorts ListOpts
cmd.Var(&flPorts, "p", "Expose a container's port to the host (use 'docker port' to see the actual mapping)")
var flEnv ListOpts
cmd.Var(&flEnv, "e", "Set environment variables")
var flDns ListOpts
cmd.Var(&flDns, "dns", "Set custom dns servers")
flVolumes := NewPathOpts()
cmd.Var(flVolumes, "v", "Attach a data volume")
flVolumesFrom := cmd.String("volumes-from", "", "Mount volumes from the specified container")
if err := cmd.Parse(args); err != nil {
return nil, err
}
if *flDetach && len(flAttach) > 0 {
return nil, fmt.Errorf("Conflicting options: -a and -d")
}
// If neither -d or -a are set, attach to everything by default
if len(flAttach) == 0 && !*flDetach {
if !*flDetach {
flAttach.Set("stdout")
flAttach.Set("stderr")
if *flStdin {
flAttach.Set("stdin")
}
}
}
parsedArgs := cmd.Args()
runCmd := []string{}
image := ""
if len(parsedArgs) >= 1 {
image = cmd.Arg(0)
}
if len(parsedArgs) > 1 {
runCmd = parsedArgs[1:]
}
config := &Config{
Hostname: *flHostname,
PortSpecs: flPorts,
User: *flUser,
Tty: *flTty,
OpenStdin: *flStdin,
Memory: *flMemory,
AttachStdin: flAttach.Get("stdin"),
AttachStdout: flAttach.Get("stdout"),
AttachStderr: flAttach.Get("stderr"),
Env: flEnv,
Cmd: runCmd,
Dns: flDns,
Image: image,
Volumes: flVolumes,
VolumesFrom: *flVolumesFrom,
}
if *flMemory > 0 && !capabilities.SwapLimit {
fmt.Fprintf(stdout, "WARNING: Your kernel does not support swap limit capabilities. Limitation discarded.\n")
config.MemorySwap = -1
}
// When allocating stdin in attached mode, close stdin at client disconnect
if config.OpenStdin && config.AttachStdin {
config.StdinOnce = true
}
return config, nil
}
type NetworkSettings struct {
IpAddress string
IpPrefixLen int
Gateway string
Bridge string
PortMapping map[string]string
}
// String returns a human-readable description of the port mapping defined in the settings
func (settings *NetworkSettings) PortMappingHuman() string {
var mapping []string
for private, public := range settings.PortMapping {
mapping = append(mapping, fmt.Sprintf("%s->%s", public, private))
}
sort.Strings(mapping)
return strings.Join(mapping, ", ")
}
// Inject the io.Reader at the given path. Note: do not close the reader
func (container *Container) Inject(file io.Reader, pth string) error {
// Make sure the directory exists
if err := os.MkdirAll(path.Join(container.rwPath(), path.Dir(pth)), 0755); err != nil {
return err
}
// FIXME: Handle permissions/already existing dest
dest, err := os.Create(path.Join(container.rwPath(), pth))
if err != nil {
return err
}
if _, err := io.Copy(dest, file); err != nil {
return err
}
return nil
}
func (container *Container) Cmd() *exec.Cmd {
return container.cmd
}
func (container *Container) When() time.Time {
return container.Created
}
func (container *Container) FromDisk() error {
data, err := ioutil.ReadFile(container.jsonPath())
if err != nil {
return err
}
// Load container settings
if err := json.Unmarshal(data, container); err != nil {
return err
}
return nil
}
func (container *Container) ToDisk() (err error) {
data, err := json.Marshal(container)
if err != nil {
return
}
return ioutil.WriteFile(container.jsonPath(), data, 0666)
}
func (container *Container) generateLXCConfig() error {
fo, err := os.Create(container.lxcConfigPath())
if err != nil {
return err
}
defer fo.Close()
if err := LxcTemplateCompiled.Execute(fo, container); err != nil {
return err
}
return nil
}
func (container *Container) startPty() error {
ptyMaster, ptySlave, err := pty.Open()
if err != nil {
return err
}
container.ptyMaster = ptyMaster
container.cmd.Stdout = ptySlave
container.cmd.Stderr = ptySlave
// Copy the PTYs to our broadcasters
go func() {
defer container.stdout.CloseWriters()
Debugf("[startPty] Begin of stdout pipe")
io.Copy(container.stdout, ptyMaster)
Debugf("[startPty] End of stdout pipe")
}()
// stdin
if container.Config.OpenStdin {
container.cmd.Stdin = ptySlave
container.cmd.SysProcAttr = &syscall.SysProcAttr{Setctty: true, Setsid: true}
go func() {
defer container.stdin.Close()
Debugf("[startPty] Begin of stdin pipe")
io.Copy(ptyMaster, container.stdin)
Debugf("[startPty] End of stdin pipe")
}()
}
if err := container.cmd.Start(); err != nil {
return err
}
ptySlave.Close()
return nil
}
func (container *Container) start() error {
container.cmd.Stdout = container.stdout
container.cmd.Stderr = container.stderr
if container.Config.OpenStdin {
stdin, err := container.cmd.StdinPipe()
if err != nil {
return err
}
go func() {
defer stdin.Close()
Debugf("Begin of stdin pipe [start]")
io.Copy(stdin, container.stdin)
Debugf("End of stdin pipe [start]")
}()
}
return container.cmd.Start()
}
func (container *Container) Attach(stdin io.ReadCloser, stdinCloser io.Closer, stdout io.Writer, stderr io.Writer) chan error {
var cStdout, cStderr io.ReadCloser
var nJobs int
errors := make(chan error, 3)
if stdin != nil && container.Config.OpenStdin {
nJobs += 1
if cStdin, err := container.StdinPipe(); err != nil {
errors <- err
} else {
go func() {
Debugf("[start] attach stdin\n")
defer Debugf("[end] attach stdin\n")
// No matter what, when stdin is closed (io.Copy unblock), close stdout and stderr
if cStdout != nil {
defer cStdout.Close()
}
if cStderr != nil {
defer cStderr.Close()
}
if container.Config.StdinOnce && !container.Config.Tty {
defer cStdin.Close()
}
if container.Config.Tty {
_, err = CopyEscapable(cStdin, stdin)
} else {
_, err = io.Copy(cStdin, stdin)
}
if err != nil {
Debugf("[error] attach stdin: %s\n", err)
}
// Discard error, expecting pipe error
errors <- nil
}()
}
}
if stdout != nil {
nJobs += 1
if p, err := container.StdoutPipe(); err != nil {
errors <- err
} else {
cStdout = p
go func() {
Debugf("[start] attach stdout\n")
defer Debugf("[end] attach stdout\n")
// If we are in StdinOnce mode, then close stdin
if container.Config.StdinOnce {
if stdin != nil {
defer stdin.Close()
}
if stdinCloser != nil {
defer stdinCloser.Close()
}
}
_, err := io.Copy(stdout, cStdout)
if err != nil {
Debugf("[error] attach stdout: %s\n", err)
}
errors <- err
}()
}
}
if stderr != nil {
nJobs += 1
if p, err := container.StderrPipe(); err != nil {
errors <- err
} else {
cStderr = p
go func() {
Debugf("[start] attach stderr\n")
defer Debugf("[end] attach stderr\n")
// If we are in StdinOnce mode, then close stdin
if container.Config.StdinOnce {
if stdin != nil {
defer stdin.Close()
}
if stdinCloser != nil {
defer stdinCloser.Close()
}
}
_, err := io.Copy(stderr, cStderr)
if err != nil {
Debugf("[error] attach stderr: %s\n", err)
}
errors <- err
}()
}
}
return Go(func() error {
if cStdout != nil {
defer cStdout.Close()
}
if cStderr != nil {
defer cStderr.Close()
}
// FIXME: how do clean up the stdin goroutine without the unwanted side effect
// of closing the passed stdin? Add an intermediary io.Pipe?
for i := 0; i < nJobs; i += 1 {
Debugf("Waiting for job %d/%d\n", i+1, nJobs)
if err := <-errors; err != nil {
Debugf("Job %d returned error %s. Aborting all jobs\n", i+1, err)
return err
}
Debugf("Job %d completed successfully\n", i+1)
}
Debugf("All jobs completed successfully\n")
return nil
})
}
func (container *Container) Start() error {
container.State.lock()
defer container.State.unlock()
if container.State.Running {
return fmt.Errorf("The container %s is already running.", container.Id)
}
if err := container.EnsureMounted(); err != nil {
return err
}
if err := container.allocateNetwork(); err != nil {
return err
}
// Make sure the config is compatible with the current kernel
if container.Config.Memory > 0 && !container.runtime.capabilities.MemoryLimit {
log.Printf("WARNING: Your kernel does not support memory limit capabilities. Limitation discarded.\n")
container.Config.Memory = 0
}
if container.Config.Memory > 0 && !container.runtime.capabilities.SwapLimit {
log.Printf("WARNING: Your kernel does not support swap limit capabilities. Limitation discarded.\n")
container.Config.MemorySwap = -1
}
container.Volumes = make(map[string]string)
// Create the requested volumes volumes
for volPath := range container.Config.Volumes {
if c, err := container.runtime.volumes.Create(nil, container, "", "", nil); err != nil {
return err
} else {
if err := os.MkdirAll(path.Join(container.RootfsPath(), volPath), 0755); err != nil {
return nil
}
container.Volumes[volPath] = c.Id
}
}
if container.Config.VolumesFrom != "" {
c := container.runtime.Get(container.Config.VolumesFrom)
if c == nil {
return fmt.Errorf("Container %s not found. Impossible to mount its volumes", container.Id)
}
for volPath, id := range c.Volumes {
if _, exists := container.Volumes[volPath]; exists {
return fmt.Errorf("The requested volume %s overlap one of the volume of the container %s", volPath, c.Id)
}
if err := os.MkdirAll(path.Join(container.RootfsPath(), volPath), 0755); err != nil {
return nil
}
container.Volumes[volPath] = id
}
}
if err := container.generateLXCConfig(); err != nil {
return err
}
params := []string{
"-n", container.Id,
"-f", container.lxcConfigPath(),
"--",
"/sbin/init",
}
// Networking
params = append(params, "-g", container.network.Gateway.String())
// User
if container.Config.User != "" {
params = append(params, "-u", container.Config.User)
}
if container.Config.Tty {
params = append(params, "-e", "TERM=xterm")
}
// Setup environment
params = append(params,
"-e", "HOME=/",
"-e", "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin",
)
for _, elem := range container.Config.Env {
params = append(params, "-e", elem)
}
// Program
params = append(params, "--", container.Path)
params = append(params, container.Args...)
container.cmd = exec.Command("lxc-start", params...)
// Setup logging of stdout and stderr to disk
if err := container.runtime.LogToDisk(container.stdout, container.logPath("stdout")); err != nil {
return err
}
if err := container.runtime.LogToDisk(container.stderr, container.logPath("stderr")); err != nil {
return err
}
var err error
if container.Config.Tty {
err = container.startPty()
} else {
err = container.start()
}
if err != nil {
return err
}
// FIXME: save state on disk *first*, then converge
// this way disk state is used as a journal, eg. we can restore after crash etc.
container.State.setRunning(container.cmd.Process.Pid)
// Init the lock
container.waitLock = make(chan struct{})
container.ToDisk()
go container.monitor()
return nil
}
func (container *Container) Run() error {
if err := container.Start(); err != nil {
return err
}
container.Wait()
return nil
}
func (container *Container) Output() (output []byte, err error) {
pipe, err := container.StdoutPipe()
if err != nil {
return nil, err
}
defer pipe.Close()
if err := container.Start(); err != nil {
return nil, err
}
output, err = ioutil.ReadAll(pipe)
container.Wait()
return output, err
}
// StdinPipe() returns a pipe connected to the standard input of the container's
// active process.
//
func (container *Container) StdinPipe() (io.WriteCloser, error) {
return container.stdinPipe, nil
}
func (container *Container) StdoutPipe() (io.ReadCloser, error) {
reader, writer := io.Pipe()
container.stdout.AddWriter(writer)
return newBufReader(reader), nil
}
func (container *Container) StderrPipe() (io.ReadCloser, error) {
reader, writer := io.Pipe()
container.stderr.AddWriter(writer)
return newBufReader(reader), nil
}
func (container *Container) allocateNetwork() error {
iface, err := container.runtime.networkManager.Allocate()
if err != nil {
return err
}
container.NetworkSettings.PortMapping = make(map[string]string)
for _, spec := range container.Config.PortSpecs {
if nat, err := iface.AllocatePort(spec); err != nil {
iface.Release()
return err
} else {
container.NetworkSettings.PortMapping[strconv.Itoa(nat.Backend)] = strconv.Itoa(nat.Frontend)
}
}
container.network = iface
container.NetworkSettings.Bridge = container.runtime.networkManager.bridgeIface
container.NetworkSettings.IpAddress = iface.IPNet.IP.String()
container.NetworkSettings.IpPrefixLen, _ = iface.IPNet.Mask.Size()
container.NetworkSettings.Gateway = iface.Gateway.String()
return nil
}
func (container *Container) releaseNetwork() {
container.network.Release()
container.network = nil
container.NetworkSettings = &NetworkSettings{}
}
// FIXME: replace this with a control socket within docker-init
func (container *Container) waitLxc() error {
for {
if output, err := exec.Command("lxc-info", "-n", container.Id).CombinedOutput(); err != nil {
return err
} else {
if !strings.Contains(string(output), "RUNNING") {
return nil
}
}
time.Sleep(500 * time.Millisecond)
}
return nil
}
func (container *Container) monitor() {
// Wait for the program to exit
Debugf("Waiting for process")
// If the command does not exists, try to wait via lxc
if container.cmd == nil {
if err := container.waitLxc(); err != nil {
Debugf("%s: Process: %s", container.Id, err)
}
} else {
if err := container.cmd.Wait(); err != nil {
// Discard the error as any signals or non 0 returns will generate an error
Debugf("%s: Process: %s", container.Id, err)
}
}
Debugf("Process finished")
var exitCode int = -1
if container.cmd != nil {
exitCode = container.cmd.ProcessState.Sys().(syscall.WaitStatus).ExitStatus()
}
// Cleanup
container.releaseNetwork()
if container.Config.OpenStdin {
if err := container.stdin.Close(); err != nil {
Debugf("%s: Error close stdin: %s", container.Id, err)
}
}
if err := container.stdout.CloseWriters(); err != nil {
Debugf("%s: Error close stdout: %s", container.Id, err)
}
if err := container.stderr.CloseWriters(); err != nil {
Debugf("%s: Error close stderr: %s", container.Id, err)
}
if container.ptyMaster != nil {
if err := container.ptyMaster.Close(); err != nil {
Debugf("%s: Error closing Pty master: %s", container.Id, err)
}
}
if err := container.Unmount(); err != nil {
log.Printf("%v: Failed to umount filesystem: %v", container.Id, err)
}
// Re-create a brand new stdin pipe once the container exited
if container.Config.OpenStdin {
container.stdin, container.stdinPipe = io.Pipe()
}
// Report status back
container.State.setStopped(exitCode)
// Release the lock
close(container.waitLock)
if err := container.ToDisk(); err != nil {
// FIXME: there is a race condition here which causes this to fail during the unit tests.
// If another goroutine was waiting for Wait() to return before removing the container's root
// from the filesystem... At this point it may already have done so.
// This is because State.setStopped() has already been called, and has caused Wait()
// to return.
// FIXME: why are we serializing running state to disk in the first place?
//log.Printf("%s: Failed to dump configuration to the disk: %s", container.Id, err)
}
}
func (container *Container) kill() error {
if !container.State.Running {
return nil
}
// Sending SIGKILL to the process via lxc
output, err := exec.Command("lxc-kill", "-n", container.Id, "9").CombinedOutput()
if err != nil {
log.Printf("error killing container %s (%s, %s)", container.Id, output, err)
}
// 2. Wait for the process to die, in last resort, try to kill the process directly
if err := container.WaitTimeout(10 * time.Second); err != nil {
if container.cmd == nil {
return fmt.Errorf("lxc-kill failed, impossible to kill the container %s", container.Id)
}
log.Printf("Container %s failed to exit within 10 seconds of lxc SIGKILL - trying direct SIGKILL", container.Id)
if err := container.cmd.Process.Kill(); err != nil {
return err
}
}
// Wait for the container to be actually stopped
container.Wait()
return nil
}
func (container *Container) Kill() error {
container.State.lock()
defer container.State.unlock()
if !container.State.Running {
return nil
}
return container.kill()
}
func (container *Container) Stop(seconds int) error {
container.State.lock()
defer container.State.unlock()
if !container.State.Running {
return nil
}
// 1. Send a SIGTERM
if output, err := exec.Command("lxc-kill", "-n", container.Id, "15").CombinedOutput(); err != nil {
log.Print(string(output))
log.Print("Failed to send SIGTERM to the process, force killing")
if err := container.kill(); err != nil {
return err
}
}
// 2. Wait for the process to exit on its own
if err := container.WaitTimeout(time.Duration(seconds) * time.Second); err != nil {
log.Printf("Container %v failed to exit within %d seconds of SIGTERM - using the force", container.Id, seconds)
if err := container.kill(); err != nil {
return err
}
}
return nil
}
func (container *Container) Restart(seconds int) error {
if err := container.Stop(seconds); err != nil {
return err
}
if err := container.Start(); err != nil {
return err
}
return nil
}
// Wait blocks until the container stops running, then returns its exit code.
func (container *Container) Wait() int {
<-container.waitLock
return container.State.ExitCode
}
func (container *Container) ExportRw() (Archive, error) {
return Tar(container.rwPath(), Uncompressed)
}
func (container *Container) RwChecksum() (string, error) {
rwData, err := Tar(container.rwPath(), Xz)
if err != nil {
return "", err
}
return HashData(rwData)
}
func (container *Container) Export() (Archive, error) {
if err := container.EnsureMounted(); err != nil {
return nil, err
}
return Tar(container.RootfsPath(), Uncompressed)
}
func (container *Container) WaitTimeout(timeout time.Duration) error {
done := make(chan bool)
go func() {
container.Wait()
done <- true
}()
select {
case <-time.After(timeout):
return fmt.Errorf("Timed Out")
case <-done:
return nil
}
panic("unreachable")
}
func (container *Container) EnsureMounted() error {
if mounted, err := container.Mounted(); err != nil {
return err
} else if mounted {
return nil
}
return container.Mount()
}
func (container *Container) Mount() error {
image, err := container.GetImage()
if err != nil {
return err
}
return image.Mount(container.RootfsPath(), container.rwPath())
}
func (container *Container) Changes() ([]Change, error) {
image, err := container.GetImage()
if err != nil {
return nil, err
}
return image.Changes(container.rwPath())
}
func (container *Container) GetImage() (*Image, error) {
if container.runtime == nil {
return nil, fmt.Errorf("Can't get image of unregistered container")
}
return container.runtime.graph.Get(container.Image)
}
func (container *Container) Mounted() (bool, error) {
return Mounted(container.RootfsPath())
}
func (container *Container) Unmount() error {
return Unmount(container.RootfsPath())
}
// ShortId returns a shorthand version of the container's id for convenience.
// A collision with other container shorthands is very unlikely, but possible.
// In case of a collision a lookup with Runtime.Get() will fail, and the caller
// will need to use a langer prefix, or the full-length container Id.
func (container *Container) ShortId() string {
return TruncateId(container.Id)
}
func (container *Container) logPath(name string) string {
return path.Join(container.root, fmt.Sprintf("%s-%s.log", container.Id, name))
}
func (container *Container) ReadLog(name string) (io.Reader, error) {
return os.Open(container.logPath(name))
}
func (container *Container) jsonPath() string {
return path.Join(container.root, "config.json")
}
func (container *Container) lxcConfigPath() string {
return path.Join(container.root, "config.lxc")
}
// This method must be exported to be used from the lxc template
func (container *Container) RootfsPath() string {
return path.Join(container.root, "rootfs")
}
func (container *Container) GetVolumes() (map[string]string, error) {
ret := make(map[string]string)
for volPath, id := range container.Volumes {
volume, err := container.runtime.volumes.Get(id)
if err != nil {
return nil, err
}
root, err := volume.root()
if err != nil {
return nil, err
}
ret[volPath] = path.Join(root, "layer")
}
return ret, nil
}
func (container *Container) rwPath() string {
return path.Join(container.root, "rw")
}
func validateId(id string) error {
if id == "" {
return fmt.Errorf("Invalid empty id")
}
return nil
}
| container.go | 1 | https://github.com/moby/moby/commit/efd9becb78c82ddef07efb7e76e0100d7a712281 | [
0.9989715814590454,
0.011429955251514912,
0.00016007943486329168,
0.00016941866488195956,
0.10527248680591583
] |
{
"id": 9,
"code_window": [
"{{with $memSwap := getMemorySwap .Config}}\n",
"lxc.cgroup.memory.memsw.limit_in_bytes = {{$memSwap}}\n",
"{{end}}\n",
"{{end}}\n",
"`\n",
"\n",
"var LxcTemplateCompiled *template.Template\n"
],
"labels": [
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep"
],
"after_edit": [
"{{if .Config.CpuShares}}\n",
"lxc.cgroup.cpu.shares = {{.Config.CpuShares}}\n",
"{{end}}\n"
],
"file_path": "lxc_template.go",
"type": "add",
"edit_start_line_idx": 98
} | Files used to setup the developer virtual machine
| hack/environment/README.rst | 0 | https://github.com/moby/moby/commit/efd9becb78c82ddef07efb7e76e0100d7a712281 | [
0.0001646606542635709,
0.0001646606542635709,
0.0001646606542635709,
0.0001646606542635709,
0
] |
{
"id": 9,
"code_window": [
"{{with $memSwap := getMemorySwap .Config}}\n",
"lxc.cgroup.memory.memsw.limit_in_bytes = {{$memSwap}}\n",
"{{end}}\n",
"{{end}}\n",
"`\n",
"\n",
"var LxcTemplateCompiled *template.Template\n"
],
"labels": [
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep"
],
"after_edit": [
"{{if .Config.CpuShares}}\n",
"lxc.cgroup.cpu.shares = {{.Config.CpuShares}}\n",
"{{end}}\n"
],
"file_path": "lxc_template.go",
"type": "add",
"edit_start_line_idx": 98
} |
$(function(){
// init multi-vers stuff
$('.tabswitcher').each(function(i, multi_vers){
var tabs = $('<ul></ul>');
$(multi_vers).prepend(tabs);
$(multi_vers).children('.tab').each(function(j, vers_content){
vers = $(vers_content).children(':first').text();
var id = 'multi_vers_' + '_' + i + '_' + j;
$(vers_content).attr('id', id);
$(tabs).append('<li><a href="#' + id + '">' + vers + '</a></li>');
});
});
$( ".tabswitcher" ).tabs();
// sidebar acordian-ing
// don't apply on last object (it should be the FAQ)
$('nav > ul > li > a').not(':last').click(function(){
if ($(this).parent().hasClass('current')) {
$(this).parent().children('ul').slideUp(200, function() {
$(this).parent().removeClass('current'); // toggle after effect
});
} else {
$('nav > ul > li > ul').slideUp(100);
var current = $(this);
setTimeout(function() {
$('nav > ul > li').removeClass('current');
current.parent().addClass('current'); // toggle before effect
current.parent().children('ul').hide();
current.parent().children('ul').slideDown(200);
}, 100);
}
return false;
});
}); | docs/theme/docker/static/js/docs.js | 0 | https://github.com/moby/moby/commit/efd9becb78c82ddef07efb7e76e0100d7a712281 | [
0.00017174061213154346,
0.00016898816102184355,
0.00016353859973605722,
0.00017033671610988677,
0.000003202989319106564
] |
{
"id": 9,
"code_window": [
"{{with $memSwap := getMemorySwap .Config}}\n",
"lxc.cgroup.memory.memsw.limit_in_bytes = {{$memSwap}}\n",
"{{end}}\n",
"{{end}}\n",
"`\n",
"\n",
"var LxcTemplateCompiled *template.Template\n"
],
"labels": [
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep"
],
"after_edit": [
"{{if .Config.CpuShares}}\n",
"lxc.cgroup.cpu.shares = {{.Config.CpuShares}}\n",
"{{end}}\n"
],
"file_path": "lxc_template.go",
"type": "add",
"edit_start_line_idx": 98
} | Maintainer duty
===============
The Debian project specifies the role of a 'maintainer' which is the person
making the Debian package of the program. This role requires an 'sponsor' to
upload the package. As a maintainer you should follow the guide
http://www.debian.org/doc/manuals/maint-guide . Your sponsor will be there
helping you succeed.
The most relevant information to update is the changelog file:
Each new release should create a new first paragraph with new release version,
changes, and the maintainer information.
After this is done, follow README.debian to generate the actual source
packages and talk with your sponsor to upload them into the official Debian
package archive.
| packaging/debian/maintainer.rst | 0 | https://github.com/moby/moby/commit/efd9becb78c82ddef07efb7e76e0100d7a712281 | [
0.00017274572746828198,
0.00016958071500994265,
0.0001664156879996881,
0.00016958071500994265,
0.0000031650197342969477
] |
{
"id": 10,
"code_window": [
"\t\ta.AttachStderr != b.AttachStderr ||\n",
"\t\ta.User != b.User ||\n",
"\t\ta.Memory != b.Memory ||\n",
"\t\ta.MemorySwap != b.MemorySwap ||\n",
"\t\ta.OpenStdin != b.OpenStdin ||\n",
"\t\ta.Tty != b.Tty {\n",
"\t\treturn false\n",
"\t}\n",
"\tif len(a.Cmd) != len(b.Cmd) ||\n"
],
"labels": [
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\ta.CpuShares != b.CpuShares ||\n"
],
"file_path": "utils.go",
"type": "add",
"edit_start_line_idx": 488
} | package docker
import (
"encoding/json"
"fmt"
"github.com/dotcloud/docker/rcli"
"github.com/kr/pty"
"io"
"io/ioutil"
"log"
"os"
"os/exec"
"path"
"sort"
"strconv"
"strings"
"syscall"
"time"
)
type Container struct {
root string
Id string
Created time.Time
Path string
Args []string
Config *Config
State State
Image string
network *NetworkInterface
NetworkSettings *NetworkSettings
SysInitPath string
ResolvConfPath string
cmd *exec.Cmd
stdout *writeBroadcaster
stderr *writeBroadcaster
stdin io.ReadCloser
stdinPipe io.WriteCloser
ptyMaster io.Closer
runtime *Runtime
waitLock chan struct{}
Volumes map[string]string
}
type Config struct {
Hostname string
User string
Memory int64 // Memory limit (in bytes)
MemorySwap int64 // Total memory usage (memory + swap); set `-1' to disable swap
AttachStdin bool
AttachStdout bool
AttachStderr bool
PortSpecs []string
Tty bool // Attach standard streams to a tty, including stdin if it is not closed.
OpenStdin bool // Open stdin
StdinOnce bool // If true, close stdin after the 1 attached client disconnects.
Env []string
Cmd []string
Dns []string
Image string // Name of the image as it was passed by the operator (eg. could be symbolic)
Volumes map[string]struct{}
VolumesFrom string
}
func ParseRun(args []string, stdout io.Writer, capabilities *Capabilities) (*Config, error) {
cmd := rcli.Subcmd(stdout, "run", "[OPTIONS] IMAGE COMMAND [ARG...]", "Run a command in a new container")
if len(args) > 0 && args[0] != "--help" {
cmd.SetOutput(ioutil.Discard)
}
flHostname := cmd.String("h", "", "Container host name")
flUser := cmd.String("u", "", "Username or UID")
flDetach := cmd.Bool("d", false, "Detached mode: leave the container running in the background")
flAttach := NewAttachOpts()
cmd.Var(flAttach, "a", "Attach to stdin, stdout or stderr.")
flStdin := cmd.Bool("i", false, "Keep stdin open even if not attached")
flTty := cmd.Bool("t", false, "Allocate a pseudo-tty")
flMemory := cmd.Int64("m", 0, "Memory limit (in bytes)")
if *flMemory > 0 && !capabilities.MemoryLimit {
fmt.Fprintf(stdout, "WARNING: Your kernel does not support memory limit capabilities. Limitation discarded.\n")
*flMemory = 0
}
var flPorts ListOpts
cmd.Var(&flPorts, "p", "Expose a container's port to the host (use 'docker port' to see the actual mapping)")
var flEnv ListOpts
cmd.Var(&flEnv, "e", "Set environment variables")
var flDns ListOpts
cmd.Var(&flDns, "dns", "Set custom dns servers")
flVolumes := NewPathOpts()
cmd.Var(flVolumes, "v", "Attach a data volume")
flVolumesFrom := cmd.String("volumes-from", "", "Mount volumes from the specified container")
if err := cmd.Parse(args); err != nil {
return nil, err
}
if *flDetach && len(flAttach) > 0 {
return nil, fmt.Errorf("Conflicting options: -a and -d")
}
// If neither -d or -a are set, attach to everything by default
if len(flAttach) == 0 && !*flDetach {
if !*flDetach {
flAttach.Set("stdout")
flAttach.Set("stderr")
if *flStdin {
flAttach.Set("stdin")
}
}
}
parsedArgs := cmd.Args()
runCmd := []string{}
image := ""
if len(parsedArgs) >= 1 {
image = cmd.Arg(0)
}
if len(parsedArgs) > 1 {
runCmd = parsedArgs[1:]
}
config := &Config{
Hostname: *flHostname,
PortSpecs: flPorts,
User: *flUser,
Tty: *flTty,
OpenStdin: *flStdin,
Memory: *flMemory,
AttachStdin: flAttach.Get("stdin"),
AttachStdout: flAttach.Get("stdout"),
AttachStderr: flAttach.Get("stderr"),
Env: flEnv,
Cmd: runCmd,
Dns: flDns,
Image: image,
Volumes: flVolumes,
VolumesFrom: *flVolumesFrom,
}
if *flMemory > 0 && !capabilities.SwapLimit {
fmt.Fprintf(stdout, "WARNING: Your kernel does not support swap limit capabilities. Limitation discarded.\n")
config.MemorySwap = -1
}
// When allocating stdin in attached mode, close stdin at client disconnect
if config.OpenStdin && config.AttachStdin {
config.StdinOnce = true
}
return config, nil
}
type NetworkSettings struct {
IpAddress string
IpPrefixLen int
Gateway string
Bridge string
PortMapping map[string]string
}
// String returns a human-readable description of the port mapping defined in the settings
func (settings *NetworkSettings) PortMappingHuman() string {
var mapping []string
for private, public := range settings.PortMapping {
mapping = append(mapping, fmt.Sprintf("%s->%s", public, private))
}
sort.Strings(mapping)
return strings.Join(mapping, ", ")
}
// Inject the io.Reader at the given path. Note: do not close the reader
func (container *Container) Inject(file io.Reader, pth string) error {
// Make sure the directory exists
if err := os.MkdirAll(path.Join(container.rwPath(), path.Dir(pth)), 0755); err != nil {
return err
}
// FIXME: Handle permissions/already existing dest
dest, err := os.Create(path.Join(container.rwPath(), pth))
if err != nil {
return err
}
if _, err := io.Copy(dest, file); err != nil {
return err
}
return nil
}
func (container *Container) Cmd() *exec.Cmd {
return container.cmd
}
func (container *Container) When() time.Time {
return container.Created
}
func (container *Container) FromDisk() error {
data, err := ioutil.ReadFile(container.jsonPath())
if err != nil {
return err
}
// Load container settings
if err := json.Unmarshal(data, container); err != nil {
return err
}
return nil
}
func (container *Container) ToDisk() (err error) {
data, err := json.Marshal(container)
if err != nil {
return
}
return ioutil.WriteFile(container.jsonPath(), data, 0666)
}
func (container *Container) generateLXCConfig() error {
fo, err := os.Create(container.lxcConfigPath())
if err != nil {
return err
}
defer fo.Close()
if err := LxcTemplateCompiled.Execute(fo, container); err != nil {
return err
}
return nil
}
func (container *Container) startPty() error {
ptyMaster, ptySlave, err := pty.Open()
if err != nil {
return err
}
container.ptyMaster = ptyMaster
container.cmd.Stdout = ptySlave
container.cmd.Stderr = ptySlave
// Copy the PTYs to our broadcasters
go func() {
defer container.stdout.CloseWriters()
Debugf("[startPty] Begin of stdout pipe")
io.Copy(container.stdout, ptyMaster)
Debugf("[startPty] End of stdout pipe")
}()
// stdin
if container.Config.OpenStdin {
container.cmd.Stdin = ptySlave
container.cmd.SysProcAttr = &syscall.SysProcAttr{Setctty: true, Setsid: true}
go func() {
defer container.stdin.Close()
Debugf("[startPty] Begin of stdin pipe")
io.Copy(ptyMaster, container.stdin)
Debugf("[startPty] End of stdin pipe")
}()
}
if err := container.cmd.Start(); err != nil {
return err
}
ptySlave.Close()
return nil
}
func (container *Container) start() error {
container.cmd.Stdout = container.stdout
container.cmd.Stderr = container.stderr
if container.Config.OpenStdin {
stdin, err := container.cmd.StdinPipe()
if err != nil {
return err
}
go func() {
defer stdin.Close()
Debugf("Begin of stdin pipe [start]")
io.Copy(stdin, container.stdin)
Debugf("End of stdin pipe [start]")
}()
}
return container.cmd.Start()
}
func (container *Container) Attach(stdin io.ReadCloser, stdinCloser io.Closer, stdout io.Writer, stderr io.Writer) chan error {
var cStdout, cStderr io.ReadCloser
var nJobs int
errors := make(chan error, 3)
if stdin != nil && container.Config.OpenStdin {
nJobs += 1
if cStdin, err := container.StdinPipe(); err != nil {
errors <- err
} else {
go func() {
Debugf("[start] attach stdin\n")
defer Debugf("[end] attach stdin\n")
// No matter what, when stdin is closed (io.Copy unblock), close stdout and stderr
if cStdout != nil {
defer cStdout.Close()
}
if cStderr != nil {
defer cStderr.Close()
}
if container.Config.StdinOnce && !container.Config.Tty {
defer cStdin.Close()
}
if container.Config.Tty {
_, err = CopyEscapable(cStdin, stdin)
} else {
_, err = io.Copy(cStdin, stdin)
}
if err != nil {
Debugf("[error] attach stdin: %s\n", err)
}
// Discard error, expecting pipe error
errors <- nil
}()
}
}
if stdout != nil {
nJobs += 1
if p, err := container.StdoutPipe(); err != nil {
errors <- err
} else {
cStdout = p
go func() {
Debugf("[start] attach stdout\n")
defer Debugf("[end] attach stdout\n")
// If we are in StdinOnce mode, then close stdin
if container.Config.StdinOnce {
if stdin != nil {
defer stdin.Close()
}
if stdinCloser != nil {
defer stdinCloser.Close()
}
}
_, err := io.Copy(stdout, cStdout)
if err != nil {
Debugf("[error] attach stdout: %s\n", err)
}
errors <- err
}()
}
}
if stderr != nil {
nJobs += 1
if p, err := container.StderrPipe(); err != nil {
errors <- err
} else {
cStderr = p
go func() {
Debugf("[start] attach stderr\n")
defer Debugf("[end] attach stderr\n")
// If we are in StdinOnce mode, then close stdin
if container.Config.StdinOnce {
if stdin != nil {
defer stdin.Close()
}
if stdinCloser != nil {
defer stdinCloser.Close()
}
}
_, err := io.Copy(stderr, cStderr)
if err != nil {
Debugf("[error] attach stderr: %s\n", err)
}
errors <- err
}()
}
}
return Go(func() error {
if cStdout != nil {
defer cStdout.Close()
}
if cStderr != nil {
defer cStderr.Close()
}
// FIXME: how do clean up the stdin goroutine without the unwanted side effect
// of closing the passed stdin? Add an intermediary io.Pipe?
for i := 0; i < nJobs; i += 1 {
Debugf("Waiting for job %d/%d\n", i+1, nJobs)
if err := <-errors; err != nil {
Debugf("Job %d returned error %s. Aborting all jobs\n", i+1, err)
return err
}
Debugf("Job %d completed successfully\n", i+1)
}
Debugf("All jobs completed successfully\n")
return nil
})
}
func (container *Container) Start() error {
container.State.lock()
defer container.State.unlock()
if container.State.Running {
return fmt.Errorf("The container %s is already running.", container.Id)
}
if err := container.EnsureMounted(); err != nil {
return err
}
if err := container.allocateNetwork(); err != nil {
return err
}
// Make sure the config is compatible with the current kernel
if container.Config.Memory > 0 && !container.runtime.capabilities.MemoryLimit {
log.Printf("WARNING: Your kernel does not support memory limit capabilities. Limitation discarded.\n")
container.Config.Memory = 0
}
if container.Config.Memory > 0 && !container.runtime.capabilities.SwapLimit {
log.Printf("WARNING: Your kernel does not support swap limit capabilities. Limitation discarded.\n")
container.Config.MemorySwap = -1
}
container.Volumes = make(map[string]string)
// Create the requested volumes volumes
for volPath := range container.Config.Volumes {
if c, err := container.runtime.volumes.Create(nil, container, "", "", nil); err != nil {
return err
} else {
if err := os.MkdirAll(path.Join(container.RootfsPath(), volPath), 0755); err != nil {
return nil
}
container.Volumes[volPath] = c.Id
}
}
if container.Config.VolumesFrom != "" {
c := container.runtime.Get(container.Config.VolumesFrom)
if c == nil {
return fmt.Errorf("Container %s not found. Impossible to mount its volumes", container.Id)
}
for volPath, id := range c.Volumes {
if _, exists := container.Volumes[volPath]; exists {
return fmt.Errorf("The requested volume %s overlap one of the volume of the container %s", volPath, c.Id)
}
if err := os.MkdirAll(path.Join(container.RootfsPath(), volPath), 0755); err != nil {
return nil
}
container.Volumes[volPath] = id
}
}
if err := container.generateLXCConfig(); err != nil {
return err
}
params := []string{
"-n", container.Id,
"-f", container.lxcConfigPath(),
"--",
"/sbin/init",
}
// Networking
params = append(params, "-g", container.network.Gateway.String())
// User
if container.Config.User != "" {
params = append(params, "-u", container.Config.User)
}
if container.Config.Tty {
params = append(params, "-e", "TERM=xterm")
}
// Setup environment
params = append(params,
"-e", "HOME=/",
"-e", "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin",
)
for _, elem := range container.Config.Env {
params = append(params, "-e", elem)
}
// Program
params = append(params, "--", container.Path)
params = append(params, container.Args...)
container.cmd = exec.Command("lxc-start", params...)
// Setup logging of stdout and stderr to disk
if err := container.runtime.LogToDisk(container.stdout, container.logPath("stdout")); err != nil {
return err
}
if err := container.runtime.LogToDisk(container.stderr, container.logPath("stderr")); err != nil {
return err
}
var err error
if container.Config.Tty {
err = container.startPty()
} else {
err = container.start()
}
if err != nil {
return err
}
// FIXME: save state on disk *first*, then converge
// this way disk state is used as a journal, eg. we can restore after crash etc.
container.State.setRunning(container.cmd.Process.Pid)
// Init the lock
container.waitLock = make(chan struct{})
container.ToDisk()
go container.monitor()
return nil
}
func (container *Container) Run() error {
if err := container.Start(); err != nil {
return err
}
container.Wait()
return nil
}
func (container *Container) Output() (output []byte, err error) {
pipe, err := container.StdoutPipe()
if err != nil {
return nil, err
}
defer pipe.Close()
if err := container.Start(); err != nil {
return nil, err
}
output, err = ioutil.ReadAll(pipe)
container.Wait()
return output, err
}
// StdinPipe() returns a pipe connected to the standard input of the container's
// active process.
//
func (container *Container) StdinPipe() (io.WriteCloser, error) {
return container.stdinPipe, nil
}
func (container *Container) StdoutPipe() (io.ReadCloser, error) {
reader, writer := io.Pipe()
container.stdout.AddWriter(writer)
return newBufReader(reader), nil
}
func (container *Container) StderrPipe() (io.ReadCloser, error) {
reader, writer := io.Pipe()
container.stderr.AddWriter(writer)
return newBufReader(reader), nil
}
func (container *Container) allocateNetwork() error {
iface, err := container.runtime.networkManager.Allocate()
if err != nil {
return err
}
container.NetworkSettings.PortMapping = make(map[string]string)
for _, spec := range container.Config.PortSpecs {
if nat, err := iface.AllocatePort(spec); err != nil {
iface.Release()
return err
} else {
container.NetworkSettings.PortMapping[strconv.Itoa(nat.Backend)] = strconv.Itoa(nat.Frontend)
}
}
container.network = iface
container.NetworkSettings.Bridge = container.runtime.networkManager.bridgeIface
container.NetworkSettings.IpAddress = iface.IPNet.IP.String()
container.NetworkSettings.IpPrefixLen, _ = iface.IPNet.Mask.Size()
container.NetworkSettings.Gateway = iface.Gateway.String()
return nil
}
func (container *Container) releaseNetwork() {
container.network.Release()
container.network = nil
container.NetworkSettings = &NetworkSettings{}
}
// FIXME: replace this with a control socket within docker-init
func (container *Container) waitLxc() error {
for {
if output, err := exec.Command("lxc-info", "-n", container.Id).CombinedOutput(); err != nil {
return err
} else {
if !strings.Contains(string(output), "RUNNING") {
return nil
}
}
time.Sleep(500 * time.Millisecond)
}
return nil
}
func (container *Container) monitor() {
// Wait for the program to exit
Debugf("Waiting for process")
// If the command does not exists, try to wait via lxc
if container.cmd == nil {
if err := container.waitLxc(); err != nil {
Debugf("%s: Process: %s", container.Id, err)
}
} else {
if err := container.cmd.Wait(); err != nil {
// Discard the error as any signals or non 0 returns will generate an error
Debugf("%s: Process: %s", container.Id, err)
}
}
Debugf("Process finished")
var exitCode int = -1
if container.cmd != nil {
exitCode = container.cmd.ProcessState.Sys().(syscall.WaitStatus).ExitStatus()
}
// Cleanup
container.releaseNetwork()
if container.Config.OpenStdin {
if err := container.stdin.Close(); err != nil {
Debugf("%s: Error close stdin: %s", container.Id, err)
}
}
if err := container.stdout.CloseWriters(); err != nil {
Debugf("%s: Error close stdout: %s", container.Id, err)
}
if err := container.stderr.CloseWriters(); err != nil {
Debugf("%s: Error close stderr: %s", container.Id, err)
}
if container.ptyMaster != nil {
if err := container.ptyMaster.Close(); err != nil {
Debugf("%s: Error closing Pty master: %s", container.Id, err)
}
}
if err := container.Unmount(); err != nil {
log.Printf("%v: Failed to umount filesystem: %v", container.Id, err)
}
// Re-create a brand new stdin pipe once the container exited
if container.Config.OpenStdin {
container.stdin, container.stdinPipe = io.Pipe()
}
// Report status back
container.State.setStopped(exitCode)
// Release the lock
close(container.waitLock)
if err := container.ToDisk(); err != nil {
// FIXME: there is a race condition here which causes this to fail during the unit tests.
// If another goroutine was waiting for Wait() to return before removing the container's root
// from the filesystem... At this point it may already have done so.
// This is because State.setStopped() has already been called, and has caused Wait()
// to return.
// FIXME: why are we serializing running state to disk in the first place?
//log.Printf("%s: Failed to dump configuration to the disk: %s", container.Id, err)
}
}
func (container *Container) kill() error {
if !container.State.Running {
return nil
}
// Sending SIGKILL to the process via lxc
output, err := exec.Command("lxc-kill", "-n", container.Id, "9").CombinedOutput()
if err != nil {
log.Printf("error killing container %s (%s, %s)", container.Id, output, err)
}
// 2. Wait for the process to die, in last resort, try to kill the process directly
if err := container.WaitTimeout(10 * time.Second); err != nil {
if container.cmd == nil {
return fmt.Errorf("lxc-kill failed, impossible to kill the container %s", container.Id)
}
log.Printf("Container %s failed to exit within 10 seconds of lxc SIGKILL - trying direct SIGKILL", container.Id)
if err := container.cmd.Process.Kill(); err != nil {
return err
}
}
// Wait for the container to be actually stopped
container.Wait()
return nil
}
func (container *Container) Kill() error {
container.State.lock()
defer container.State.unlock()
if !container.State.Running {
return nil
}
return container.kill()
}
func (container *Container) Stop(seconds int) error {
container.State.lock()
defer container.State.unlock()
if !container.State.Running {
return nil
}
// 1. Send a SIGTERM
if output, err := exec.Command("lxc-kill", "-n", container.Id, "15").CombinedOutput(); err != nil {
log.Print(string(output))
log.Print("Failed to send SIGTERM to the process, force killing")
if err := container.kill(); err != nil {
return err
}
}
// 2. Wait for the process to exit on its own
if err := container.WaitTimeout(time.Duration(seconds) * time.Second); err != nil {
log.Printf("Container %v failed to exit within %d seconds of SIGTERM - using the force", container.Id, seconds)
if err := container.kill(); err != nil {
return err
}
}
return nil
}
func (container *Container) Restart(seconds int) error {
if err := container.Stop(seconds); err != nil {
return err
}
if err := container.Start(); err != nil {
return err
}
return nil
}
// Wait blocks until the container stops running, then returns its exit code.
func (container *Container) Wait() int {
<-container.waitLock
return container.State.ExitCode
}
func (container *Container) ExportRw() (Archive, error) {
return Tar(container.rwPath(), Uncompressed)
}
func (container *Container) RwChecksum() (string, error) {
rwData, err := Tar(container.rwPath(), Xz)
if err != nil {
return "", err
}
return HashData(rwData)
}
func (container *Container) Export() (Archive, error) {
if err := container.EnsureMounted(); err != nil {
return nil, err
}
return Tar(container.RootfsPath(), Uncompressed)
}
func (container *Container) WaitTimeout(timeout time.Duration) error {
done := make(chan bool)
go func() {
container.Wait()
done <- true
}()
select {
case <-time.After(timeout):
return fmt.Errorf("Timed Out")
case <-done:
return nil
}
panic("unreachable")
}
func (container *Container) EnsureMounted() error {
if mounted, err := container.Mounted(); err != nil {
return err
} else if mounted {
return nil
}
return container.Mount()
}
func (container *Container) Mount() error {
image, err := container.GetImage()
if err != nil {
return err
}
return image.Mount(container.RootfsPath(), container.rwPath())
}
func (container *Container) Changes() ([]Change, error) {
image, err := container.GetImage()
if err != nil {
return nil, err
}
return image.Changes(container.rwPath())
}
func (container *Container) GetImage() (*Image, error) {
if container.runtime == nil {
return nil, fmt.Errorf("Can't get image of unregistered container")
}
return container.runtime.graph.Get(container.Image)
}
func (container *Container) Mounted() (bool, error) {
return Mounted(container.RootfsPath())
}
func (container *Container) Unmount() error {
return Unmount(container.RootfsPath())
}
// ShortId returns a shorthand version of the container's id for convenience.
// A collision with other container shorthands is very unlikely, but possible.
// In case of a collision a lookup with Runtime.Get() will fail, and the caller
// will need to use a langer prefix, or the full-length container Id.
func (container *Container) ShortId() string {
return TruncateId(container.Id)
}
func (container *Container) logPath(name string) string {
return path.Join(container.root, fmt.Sprintf("%s-%s.log", container.Id, name))
}
func (container *Container) ReadLog(name string) (io.Reader, error) {
return os.Open(container.logPath(name))
}
func (container *Container) jsonPath() string {
return path.Join(container.root, "config.json")
}
func (container *Container) lxcConfigPath() string {
return path.Join(container.root, "config.lxc")
}
// This method must be exported to be used from the lxc template
func (container *Container) RootfsPath() string {
return path.Join(container.root, "rootfs")
}
func (container *Container) GetVolumes() (map[string]string, error) {
ret := make(map[string]string)
for volPath, id := range container.Volumes {
volume, err := container.runtime.volumes.Get(id)
if err != nil {
return nil, err
}
root, err := volume.root()
if err != nil {
return nil, err
}
ret[volPath] = path.Join(root, "layer")
}
return ret, nil
}
func (container *Container) rwPath() string {
return path.Join(container.root, "rw")
}
func validateId(id string) error {
if id == "" {
return fmt.Errorf("Invalid empty id")
}
return nil
}
| container.go | 1 | https://github.com/moby/moby/commit/efd9becb78c82ddef07efb7e76e0100d7a712281 | [
0.007793172728270292,
0.0005319290794432163,
0.00016126217087730765,
0.00017276649305131286,
0.0012882810551673174
] |
{
"id": 10,
"code_window": [
"\t\ta.AttachStderr != b.AttachStderr ||\n",
"\t\ta.User != b.User ||\n",
"\t\ta.Memory != b.Memory ||\n",
"\t\ta.MemorySwap != b.MemorySwap ||\n",
"\t\ta.OpenStdin != b.OpenStdin ||\n",
"\t\ta.Tty != b.Tty {\n",
"\t\treturn false\n",
"\t}\n",
"\tif len(a.Cmd) != len(b.Cmd) ||\n"
],
"labels": [
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\ta.CpuShares != b.CpuShares ||\n"
],
"file_path": "utils.go",
"type": "add",
"edit_start_line_idx": 488
} | :title: Setting up a dev environment
:description: Guides on how to contribute to docker
:keywords: Docker, documentation, developers, contributing, dev environment
Setting up a dev environment
============================
Instructions that have been verified to work on Ubuntu 12.10,
.. code-block:: bash
sudo apt-get -y install lxc wget bsdtar curl golang git
export GOPATH=~/go/
export PATH=$GOPATH/bin:$PATH
mkdir -p $GOPATH/src/github.com/dotcloud
cd $GOPATH/src/github.com/dotcloud
git clone [email protected]:dotcloud/docker.git
cd docker
go get -v github.com/dotcloud/docker/...
go install -v github.com/dotcloud/docker/...
Then run the docker daemon,
.. code-block:: bash
sudo $GOPATH/bin/docker -d
Run the ``go install`` command (above) to recompile docker.
| docs/sources/contributing/devenvironment.rst | 0 | https://github.com/moby/moby/commit/efd9becb78c82ddef07efb7e76e0100d7a712281 | [
0.00017234249389730394,
0.00016815209528431296,
0.000162201322382316,
0.0001690322533249855,
0.000003796410737777478
] |
{
"id": 10,
"code_window": [
"\t\ta.AttachStderr != b.AttachStderr ||\n",
"\t\ta.User != b.User ||\n",
"\t\ta.Memory != b.Memory ||\n",
"\t\ta.MemorySwap != b.MemorySwap ||\n",
"\t\ta.OpenStdin != b.OpenStdin ||\n",
"\t\ta.Tty != b.Tty {\n",
"\t\treturn false\n",
"\t}\n",
"\tif len(a.Cmd) != len(b.Cmd) ||\n"
],
"labels": [
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\ta.CpuShares != b.CpuShares ||\n"
],
"file_path": "utils.go",
"type": "add",
"edit_start_line_idx": 488
} | :title: Docker Examples
:description: Examples on how to use Docker
:keywords: docker, hello world, examples
Examples
============
Contents:
.. toctree::
:maxdepth: 1
running_examples
hello_world
hello_world_daemon
python_web_app
running_redis_service
running_ssh_service
couchdb_data_volumes
| docs/sources/examples/index.rst | 0 | https://github.com/moby/moby/commit/efd9becb78c82ddef07efb7e76e0100d7a712281 | [
0.00017596877296455204,
0.00017226791533175856,
0.00016524165403097868,
0.00017559331899974495,
0.000004970680492988322
] |
{
"id": 10,
"code_window": [
"\t\ta.AttachStderr != b.AttachStderr ||\n",
"\t\ta.User != b.User ||\n",
"\t\ta.Memory != b.Memory ||\n",
"\t\ta.MemorySwap != b.MemorySwap ||\n",
"\t\ta.OpenStdin != b.OpenStdin ||\n",
"\t\ta.Tty != b.Tty {\n",
"\t\treturn false\n",
"\t}\n",
"\tif len(a.Cmd) != len(b.Cmd) ||\n"
],
"labels": [
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\ta.CpuShares != b.CpuShares ||\n"
],
"file_path": "utils.go",
"type": "add",
"edit_start_line_idx": 488
} | README.md
| packaging/debian/docs | 0 | https://github.com/moby/moby/commit/efd9becb78c82ddef07efb7e76e0100d7a712281 | [
0.0001678230764809996,
0.0001678230764809996,
0.0001678230764809996,
0.0001678230764809996,
0
] |
{
"id": 0,
"code_window": [
"}\n",
"\n",
"func (b *executorBuilder) buildDeallocate(v *plan.Deallocate) Executor {\n",
"\treturn &DeallocateExec{\n",
"\t\tbaseExecutor: newBaseExecutor(nil, b.ctx),\n",
"\t\tName: v.Name,\n",
"\t}\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\te := &DeallocateExec{\n"
],
"file_path": "executor/builder.go",
"type": "replace",
"edit_start_line_idx": 207
} | // Copyright 2015 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package executor
import (
"math"
"sort"
"sync"
"time"
"github.com/cznic/sortutil"
"github.com/juju/errors"
"github.com/pingcap/tidb/ast"
"github.com/pingcap/tidb/context"
"github.com/pingcap/tidb/distsql"
"github.com/pingcap/tidb/domain"
"github.com/pingcap/tidb/expression"
"github.com/pingcap/tidb/infoschema"
"github.com/pingcap/tidb/model"
"github.com/pingcap/tidb/plan"
"github.com/pingcap/tidb/table"
"github.com/pingcap/tidb/types"
"github.com/pingcap/tidb/util/admin"
"github.com/pingcap/tipb/go-tipb"
goctx "golang.org/x/net/context"
)
// executorBuilder builds an Executor from a Plan.
// The InfoSchema must not change during execution.
type executorBuilder struct {
ctx context.Context
is infoschema.InfoSchema
priority int
startTS uint64 // cached when the first time getStartTS() is called
// err is set when there is error happened during Executor building process.
err error
}
func newExecutorBuilder(ctx context.Context, is infoschema.InfoSchema, priority int) *executorBuilder {
return &executorBuilder{
ctx: ctx,
is: is,
priority: priority,
}
}
func (b *executorBuilder) build(p plan.Plan) Executor {
switch v := p.(type) {
case nil:
return nil
case *plan.CheckTable:
return b.buildCheckTable(v)
case *plan.DDL:
return b.buildDDL(v)
case *plan.Deallocate:
return b.buildDeallocate(v)
case *plan.Delete:
return b.buildDelete(v)
case *plan.Execute:
return b.buildExecute(v)
case *plan.Explain:
return b.buildExplain(v)
case *plan.Insert:
return b.buildInsert(v)
case *plan.LoadData:
return b.buildLoadData(v)
case *plan.PhysicalLimit:
return b.buildLimit(v)
case *plan.Prepare:
return b.buildPrepare(v)
case *plan.PhysicalLock:
return b.buildSelectLock(v)
case *plan.CancelDDLJobs:
return b.buildCancelDDLJobs(v)
case *plan.ShowDDL:
return b.buildShowDDL(v)
case *plan.ShowDDLJobs:
return b.buildShowDDLJobs(v)
case *plan.Show:
return b.buildShow(v)
case *plan.Simple:
return b.buildSimple(v)
case *plan.Set:
return b.buildSet(v)
case *plan.PhysicalSort:
return b.buildSort(v)
case *plan.PhysicalTopN:
return b.buildTopN(v)
case *plan.PhysicalUnionAll:
return b.buildUnionAll(v)
case *plan.Update:
return b.buildUpdate(v)
case *plan.PhysicalUnionScan:
return b.buildUnionScanExec(v)
case *plan.PhysicalHashJoin:
return b.buildHashJoin(v)
case *plan.PhysicalMergeJoin:
return b.buildMergeJoin(v)
case *plan.PhysicalHashSemiJoin:
return b.buildSemiJoin(v)
case *plan.PhysicalIndexJoin:
return b.buildIndexLookUpJoin(v)
case *plan.PhysicalSelection:
return b.buildSelection(v)
case *plan.PhysicalHashAgg:
return b.buildHashAgg(v)
case *plan.PhysicalStreamAgg:
return b.buildStreamAgg(v)
case *plan.PhysicalProjection:
return b.buildProjection(v)
case *plan.PhysicalMemTable:
return b.buildMemTable(v)
case *plan.PhysicalTableDual:
return b.buildTableDual(v)
case *plan.PhysicalApply:
return b.buildApply(v)
case *plan.PhysicalExists:
return b.buildExists(v)
case *plan.PhysicalMaxOneRow:
return b.buildMaxOneRow(v)
case *plan.Analyze:
return b.buildAnalyze(v)
case *plan.PhysicalTableReader:
return b.buildTableReader(v)
case *plan.PhysicalIndexReader:
return b.buildIndexReader(v)
case *plan.PhysicalIndexLookUpReader:
return b.buildIndexLookUpReader(v)
default:
b.err = ErrUnknownPlan.Gen("Unknown Plan %T", p)
return nil
}
}
func (b *executorBuilder) buildCancelDDLJobs(v *plan.CancelDDLJobs) Executor {
e := &CancelDDLJobsExec{
baseExecutor: newBaseExecutor(v.Schema(), b.ctx),
jobIDs: v.JobIDs,
}
e.errs, b.err = admin.CancelJobs(e.ctx.Txn(), e.jobIDs)
if b.err != nil {
b.err = errors.Trace(b.err)
return nil
}
e.supportChk = true
return e
}
func (b *executorBuilder) buildShowDDL(v *plan.ShowDDL) Executor {
// We get DDLInfo here because for Executors that returns result set,
// next will be called after transaction has been committed.
// We need the transaction to get DDLInfo.
e := &ShowDDLExec{
baseExecutor: newBaseExecutor(v.Schema(), b.ctx),
}
var err error
ownerManager := domain.GetDomain(e.ctx).DDL().OwnerManager()
ctx, cancel := goctx.WithTimeout(goctx.Background(), 3*time.Second)
e.ddlOwnerID, err = ownerManager.GetOwnerID(ctx)
cancel()
if err != nil {
b.err = errors.Trace(err)
return nil
}
ddlInfo, err := admin.GetDDLInfo(e.ctx.Txn())
if err != nil {
b.err = errors.Trace(err)
return nil
}
e.ddlInfo = ddlInfo
e.selfID = ownerManager.ID()
e.supportChk = true
return e
}
func (b *executorBuilder) buildShowDDLJobs(v *plan.ShowDDLJobs) Executor {
e := &ShowDDLJobsExec{
baseExecutor: newBaseExecutor(v.Schema(), b.ctx),
}
e.supportChk = true
return e
}
func (b *executorBuilder) buildCheckTable(v *plan.CheckTable) Executor {
e := &CheckTableExec{
baseExecutor: newBaseExecutor(v.Schema(), b.ctx),
tables: v.Tables,
is: b.is,
}
e.supportChk = true
return e
}
func (b *executorBuilder) buildDeallocate(v *plan.Deallocate) Executor {
return &DeallocateExec{
baseExecutor: newBaseExecutor(nil, b.ctx),
Name: v.Name,
}
}
func (b *executorBuilder) buildSelectLock(v *plan.PhysicalLock) Executor {
src := b.build(v.Children()[0])
if b.err != nil {
b.err = errors.Trace(b.err)
return nil
}
if !b.ctx.GetSessionVars().InTxn() {
// Locking of rows for update using SELECT FOR UPDATE only applies when autocommit
// is disabled (either by beginning transaction with START TRANSACTION or by setting
// autocommit to 0. If autocommit is enabled, the rows matching the specification are not locked.
// See https://dev.mysql.com/doc/refman/5.7/en/innodb-locking-reads.html
return src
}
e := &SelectLockExec{
baseExecutor: newBaseExecutor(v.Schema(), b.ctx, src),
Lock: v.Lock,
}
e.supportChk = true
return e
}
func (b *executorBuilder) buildLimit(v *plan.PhysicalLimit) Executor {
childExec := b.build(v.Children()[0])
if b.err != nil {
b.err = errors.Trace(b.err)
return nil
}
e := &LimitExec{
baseExecutor: newBaseExecutor(v.Schema(), b.ctx, childExec),
begin: v.Offset,
end: v.Offset + v.Count,
}
e.supportChk = true
return e
}
func (b *executorBuilder) buildPrepare(v *plan.Prepare) Executor {
e := &PrepareExec{
baseExecutor: newBaseExecutor(v.Schema(), b.ctx),
is: b.is,
name: v.Name,
sqlText: v.SQLText,
}
e.supportChk = true
return e
}
func (b *executorBuilder) buildExecute(v *plan.Execute) Executor {
return &ExecuteExec{
baseExecutor: newBaseExecutor(nil, b.ctx),
IS: b.is,
Name: v.Name,
UsingVars: v.UsingVars,
ID: v.ExecID,
Stmt: v.Stmt,
Plan: v.Plan,
}
}
func (b *executorBuilder) buildShow(v *plan.Show) Executor {
e := &ShowExec{
baseExecutor: newBaseExecutor(v.Schema(), b.ctx),
Tp: v.Tp,
DBName: model.NewCIStr(v.DBName),
Table: v.Table,
Column: v.Column,
User: v.User,
Flag: v.Flag,
Full: v.Full,
GlobalScope: v.GlobalScope,
is: b.is,
}
if e.Tp == ast.ShowGrants && e.User == nil {
e.User = e.ctx.GetSessionVars().User
}
if len(v.Conditions) == 0 {
return e
}
sel := &SelectionExec{
baseExecutor: newBaseExecutor(v.Schema(), b.ctx, e),
filters: v.Conditions,
}
return sel
}
func (b *executorBuilder) buildSimple(v *plan.Simple) Executor {
switch s := v.Statement.(type) {
case *ast.GrantStmt:
return b.buildGrant(s)
case *ast.RevokeStmt:
return b.buildRevoke(s)
}
return &SimpleExec{
baseExecutor: newBaseExecutor(v.Schema(), b.ctx),
Statement: v.Statement,
is: b.is,
}
}
func (b *executorBuilder) buildSet(v *plan.Set) Executor {
e := &SetExecutor{
baseExecutor: newBaseExecutor(v.Schema(), b.ctx),
vars: v.VarAssigns,
}
e.supportChk = true
return e
}
func (b *executorBuilder) buildInsert(v *plan.Insert) Executor {
ivs := &InsertValues{
baseExecutor: newBaseExecutor(nil, b.ctx),
Columns: v.Columns,
Lists: v.Lists,
Setlist: v.Setlist,
GenColumns: v.GenCols.Columns,
GenExprs: v.GenCols.Exprs,
needFillDefaultValues: v.NeedFillDefaultValue,
}
ivs.SelectExec = b.build(v.SelectPlan)
if b.err != nil {
b.err = errors.Trace(b.err)
return nil
}
ivs.Table = v.Table
if v.IsReplace {
return b.buildReplace(ivs)
}
insert := &InsertExec{
InsertValues: ivs,
OnDuplicate: append(v.OnDuplicate, v.GenCols.OnDuplicates...),
Priority: v.Priority,
IgnoreErr: v.IgnoreErr,
}
insert.supportChk = true
return insert
}
func (b *executorBuilder) buildLoadData(v *plan.LoadData) Executor {
tbl, ok := b.is.TableByID(v.Table.TableInfo.ID)
if !ok {
b.err = errors.Errorf("Can not get table %d", v.Table.TableInfo.ID)
return nil
}
insertVal := &InsertValues{
baseExecutor: newBaseExecutor(nil, b.ctx),
Table: tbl,
Columns: v.Columns,
GenColumns: v.GenCols.Columns,
GenExprs: v.GenCols.Exprs,
}
tableCols := tbl.Cols()
columns, err := insertVal.getColumns(tableCols)
if err != nil {
b.err = errors.Trace(err)
return nil
}
return &LoadData{
baseExecutor: newBaseExecutor(nil, b.ctx),
IsLocal: v.IsLocal,
loadDataInfo: &LoadDataInfo{
row: make([]types.Datum, len(columns)),
insertVal: insertVal,
Path: v.Path,
Table: tbl,
FieldsInfo: v.FieldsInfo,
LinesInfo: v.LinesInfo,
Ctx: b.ctx,
columns: columns,
},
}
}
func (b *executorBuilder) buildReplace(vals *InsertValues) Executor {
return &ReplaceExec{
InsertValues: vals,
}
}
func (b *executorBuilder) buildGrant(grant *ast.GrantStmt) Executor {
return &GrantExec{
baseExecutor: newBaseExecutor(nil, b.ctx),
Privs: grant.Privs,
ObjectType: grant.ObjectType,
Level: grant.Level,
Users: grant.Users,
WithGrant: grant.WithGrant,
is: b.is,
}
}
func (b *executorBuilder) buildRevoke(revoke *ast.RevokeStmt) Executor {
e := &RevokeExec{
ctx: b.ctx,
Privs: revoke.Privs,
ObjectType: revoke.ObjectType,
Level: revoke.Level,
Users: revoke.Users,
is: b.is,
}
e.supportChk = true
return e
}
func (b *executorBuilder) buildDDL(v *plan.DDL) Executor {
e := &DDLExec{
baseExecutor: newBaseExecutor(v.Schema(), b.ctx),
stmt: v.Statement,
is: b.is,
}
e.supportChk = true
return e
}
func (b *executorBuilder) buildExplain(v *plan.Explain) Executor {
e := &ExplainExec{
baseExecutor: newBaseExecutor(v.Schema(), b.ctx),
}
e.rows = make([][]string, 0, len(v.Rows))
for _, row := range v.Rows {
e.rows = append(e.rows, row)
}
e.supportChk = true
return e
}
func (b *executorBuilder) buildUnionScanExec(v *plan.PhysicalUnionScan) Executor {
src := b.build(v.Children()[0])
if b.err != nil {
return nil
}
us := &UnionScanExec{baseExecutor: newBaseExecutor(v.Schema(), b.ctx, src)}
// Get the handle column index of the below plan.
// We can guarantee that there must be only one col in the map.
for _, cols := range v.Children()[0].Schema().TblID2Handle {
us.belowHandleIndex = cols[0].Index
}
switch x := src.(type) {
case *TableReaderExecutor:
us.desc = x.desc
us.dirty = getDirtyDB(b.ctx).getDirtyTable(x.table.Meta().ID)
us.conditions = v.Conditions
us.columns = x.columns
b.err = us.buildAndSortAddedRows(x.table)
case *IndexReaderExecutor:
us.desc = x.desc
for _, ic := range x.index.Columns {
for i, col := range x.schema.Columns {
if col.ColName.L == ic.Name.L {
us.usedIndex = append(us.usedIndex, i)
break
}
}
}
us.dirty = getDirtyDB(b.ctx).getDirtyTable(x.table.Meta().ID)
us.conditions = v.Conditions
us.columns = x.columns
b.err = us.buildAndSortAddedRows(x.table)
case *IndexLookUpExecutor:
us.desc = x.desc
for _, ic := range x.index.Columns {
for i, col := range x.schema.Columns {
if col.ColName.L == ic.Name.L {
us.usedIndex = append(us.usedIndex, i)
break
}
}
}
us.dirty = getDirtyDB(b.ctx).getDirtyTable(x.table.Meta().ID)
us.conditions = v.Conditions
us.columns = x.columns
b.err = us.buildAndSortAddedRows(x.table)
default:
// The mem table will not be written by sql directly, so we can omit the union scan to avoid err reporting.
return src
}
if b.err != nil {
return nil
}
return us
}
// buildMergeJoin builds MergeJoinExec executor.
func (b *executorBuilder) buildMergeJoin(v *plan.PhysicalMergeJoin) Executor {
leftExec := b.build(v.Children()[0])
if b.err != nil {
b.err = errors.Trace(b.err)
return nil
}
rightExec := b.build(v.Children()[1])
if b.err != nil {
b.err = errors.Trace(b.err)
return nil
}
leftKeys := make([]*expression.Column, 0, len(v.EqualConditions))
rightKeys := make([]*expression.Column, 0, len(v.EqualConditions))
for _, eqCond := range v.EqualConditions {
if len(eqCond.GetArgs()) != 2 {
b.err = errors.Annotate(ErrBuildExecutor, "invalid join key for equal condition")
return nil
}
leftKey, ok := eqCond.GetArgs()[0].(*expression.Column)
if !ok {
b.err = errors.Annotate(ErrBuildExecutor, "left side of join key must be column for merge join")
return nil
}
rightKey, ok := eqCond.GetArgs()[1].(*expression.Column)
if !ok {
b.err = errors.Annotate(ErrBuildExecutor, "right side of join key must be column for merge join")
return nil
}
leftKeys = append(leftKeys, leftKey)
rightKeys = append(rightKeys, rightKey)
}
leftRowBlock := &rowBlockIterator{
ctx: b.ctx,
reader: leftExec,
filter: v.LeftConditions,
joinKeys: leftKeys,
}
rightRowBlock := &rowBlockIterator{
ctx: b.ctx,
reader: rightExec,
filter: v.RightConditions,
joinKeys: rightKeys,
}
defaultValues := v.DefaultValues
if defaultValues == nil {
defaultValues = make([]types.Datum, rightExec.Schema().Len())
}
e := &MergeJoinExec{
baseExecutor: newBaseExecutor(v.Schema(), b.ctx, leftExec, rightExec),
resultGenerator: newJoinResultGenerator(b.ctx, v.JoinType, false, defaultValues, v.OtherConditions, nil, nil),
stmtCtx: b.ctx.GetSessionVars().StmtCtx,
// left is the outer side by default.
outerKeys: leftKeys,
innerKeys: rightKeys,
outerIter: leftRowBlock,
innerIter: rightRowBlock,
}
if v.JoinType == plan.RightOuterJoin {
e.outerKeys, e.innerKeys = e.innerKeys, e.outerKeys
e.outerIter, e.innerIter = e.innerIter, e.outerIter
}
if v.JoinType != plan.InnerJoin {
e.outerFilter = e.outerIter.filter
e.outerIter.filter = nil
}
return e
}
func (b *executorBuilder) buildHashJoin(v *plan.PhysicalHashJoin) Executor {
leftHashKey := make([]*expression.Column, 0, len(v.EqualConditions))
rightHashKey := make([]*expression.Column, 0, len(v.EqualConditions))
for _, eqCond := range v.EqualConditions {
ln, _ := eqCond.GetArgs()[0].(*expression.Column)
rn, _ := eqCond.GetArgs()[1].(*expression.Column)
leftHashKey = append(leftHashKey, ln)
rightHashKey = append(rightHashKey, rn)
}
leftExec := b.build(v.Children()[0])
rightExec := b.build(v.Children()[1])
// for hash join, inner table is always the smaller one.
e := &HashJoinExec{
baseExecutor: newBaseExecutor(v.Schema(), b.ctx, leftExec, rightExec),
concurrency: v.Concurrency,
joinType: v.JoinType,
}
defaultValues := v.DefaultValues
if v.SmallChildIdx == 0 {
e.innerExec = leftExec
e.outerExec = rightExec
e.innerFilter = v.LeftConditions
e.outerFilter = v.RightConditions
e.innerKeys = leftHashKey
e.outerKeys = rightHashKey
if defaultValues == nil {
defaultValues = make([]types.Datum, e.innerExec.Schema().Len())
}
e.resultGenerator = newJoinResultGenerator(b.ctx, v.JoinType, v.SmallChildIdx == 0, defaultValues,
v.OtherConditions, nil, nil)
} else {
e.innerExec = rightExec
e.outerExec = leftExec
e.innerFilter = v.RightConditions
e.outerFilter = v.LeftConditions
e.innerKeys = rightHashKey
e.outerKeys = leftHashKey
if defaultValues == nil {
defaultValues = make([]types.Datum, e.innerExec.Schema().Len())
}
e.resultGenerator = newJoinResultGenerator(b.ctx, v.JoinType, v.SmallChildIdx == 0,
defaultValues, v.OtherConditions, nil, nil)
}
return e
}
func (b *executorBuilder) buildSemiJoin(v *plan.PhysicalHashSemiJoin) *HashSemiJoinExec {
leftHashKey := make([]*expression.Column, 0, len(v.EqualConditions))
rightHashKey := make([]*expression.Column, 0, len(v.EqualConditions))
for _, eqCond := range v.EqualConditions {
ln, _ := eqCond.GetArgs()[0].(*expression.Column)
rn, _ := eqCond.GetArgs()[1].(*expression.Column)
leftHashKey = append(leftHashKey, ln)
rightHashKey = append(rightHashKey, rn)
}
e := &HashSemiJoinExec{
baseExecutor: newBaseExecutor(v.Schema(), b.ctx),
otherFilter: v.OtherConditions,
bigFilter: v.LeftConditions,
smallFilter: v.RightConditions,
bigExec: b.build(v.Children()[0]),
smallExec: b.build(v.Children()[1]),
prepared: false,
bigHashKey: leftHashKey,
smallHashKey: rightHashKey,
auxMode: v.WithAux,
anti: v.Anti,
}
return e
}
func (b *executorBuilder) buildHashAgg(v *plan.PhysicalHashAgg) Executor {
src := b.build(v.Children()[0])
if b.err != nil {
b.err = errors.Trace(b.err)
return nil
}
return &HashAggExec{
baseExecutor: newBaseExecutor(v.Schema(), b.ctx, src),
sc: b.ctx.GetSessionVars().StmtCtx,
AggFuncs: v.AggFuncs,
GroupByItems: v.GroupByItems,
}
}
func (b *executorBuilder) buildStreamAgg(v *plan.PhysicalStreamAgg) Executor {
src := b.build(v.Children()[0])
if b.err != nil {
b.err = errors.Trace(b.err)
return nil
}
return &StreamAggExec{
baseExecutor: newBaseExecutor(v.Schema(), b.ctx, src),
StmtCtx: b.ctx.GetSessionVars().StmtCtx,
AggFuncs: v.AggFuncs,
GroupByItems: v.GroupByItems,
}
}
func (b *executorBuilder) buildSelection(v *plan.PhysicalSelection) Executor {
childExec := b.build(v.Children()[0])
if b.err != nil {
b.err = errors.Trace(b.err)
return nil
}
e := &SelectionExec{
baseExecutor: newBaseExecutor(v.Schema(), b.ctx, childExec),
filters: v.Conditions,
}
e.supportChk = true
return e
}
func (b *executorBuilder) buildProjection(v *plan.PhysicalProjection) Executor {
childExec := b.build(v.Children()[0])
if b.err != nil {
b.err = errors.Trace(b.err)
return nil
}
e := &ProjectionExec{
baseExecutor: newBaseExecutor(v.Schema(), b.ctx, childExec),
exprs: v.Exprs,
}
e.baseExecutor.supportChk = true
return e
}
func (b *executorBuilder) buildTableDual(v *plan.PhysicalTableDual) Executor {
return &TableDualExec{
baseExecutor: newBaseExecutor(v.Schema(), b.ctx),
rowCount: v.RowCount,
}
}
func (b *executorBuilder) getStartTS() uint64 {
if b.startTS != 0 {
// Return the cached value.
return b.startTS
}
startTS := b.ctx.GetSessionVars().SnapshotTS
if startTS == 0 {
startTS = b.ctx.Txn().StartTS()
}
b.startTS = startTS
return startTS
}
func (b *executorBuilder) buildMemTable(v *plan.PhysicalMemTable) Executor {
tb, _ := b.is.TableByID(v.Table.ID)
ts := &TableScanExec{
baseExecutor: newBaseExecutor(v.Schema(), b.ctx),
t: tb,
columns: v.Columns,
seekHandle: math.MinInt64,
ranges: v.Ranges,
isVirtualTable: tb.Type() == table.VirtualTable,
}
return ts
}
func (b *executorBuilder) buildSort(v *plan.PhysicalSort) Executor {
childExec := b.build(v.Children()[0])
if b.err != nil {
b.err = errors.Trace(b.err)
return nil
}
sortExec := SortExec{
baseExecutor: newBaseExecutor(v.Schema(), b.ctx, childExec),
ByItems: v.ByItems,
schema: v.Schema(),
}
sortExec.supportChk = true
return &sortExec
}
func (b *executorBuilder) buildTopN(v *plan.PhysicalTopN) Executor {
childExec := b.build(v.Children()[0])
if b.err != nil {
b.err = errors.Trace(b.err)
return nil
}
sortExec := SortExec{
baseExecutor: newBaseExecutor(v.Schema(), b.ctx, childExec),
ByItems: v.ByItems,
schema: v.Schema(),
}
sortExec.supportChk = true
return &TopNExec{
SortExec: sortExec,
limit: &plan.PhysicalLimit{Count: v.Count, Offset: v.Offset},
}
}
func (b *executorBuilder) buildNestedLoopJoin(v *plan.PhysicalHashJoin) *NestedLoopJoinExec {
for _, cond := range v.EqualConditions {
cond.GetArgs()[0].(*expression.Column).ResolveIndices(v.Schema())
cond.GetArgs()[1].(*expression.Column).ResolveIndices(v.Schema())
}
defaultValues := v.DefaultValues
if v.SmallChildIdx == 1 {
if defaultValues == nil {
defaultValues = make([]types.Datum, v.Children()[1].Schema().Len())
}
return &NestedLoopJoinExec{
baseExecutor: newBaseExecutor(v.Schema(), b.ctx),
SmallExec: b.build(v.Children()[1]),
BigExec: b.build(v.Children()[0]),
BigFilter: v.LeftConditions,
SmallFilter: v.RightConditions,
OtherFilter: append(expression.ScalarFuncs2Exprs(v.EqualConditions), v.OtherConditions...),
outer: v.JoinType != plan.InnerJoin,
defaultValues: defaultValues,
}
}
if defaultValues == nil {
defaultValues = make([]types.Datum, v.Children()[0].Schema().Len())
}
return &NestedLoopJoinExec{
baseExecutor: newBaseExecutor(v.Schema(), b.ctx),
SmallExec: b.build(v.Children()[0]),
BigExec: b.build(v.Children()[1]),
leftSmall: true,
BigFilter: v.RightConditions,
SmallFilter: v.LeftConditions,
OtherFilter: append(expression.ScalarFuncs2Exprs(v.EqualConditions), v.OtherConditions...),
outer: v.JoinType != plan.InnerJoin,
defaultValues: defaultValues,
}
}
func (b *executorBuilder) buildApply(v *plan.PhysicalApply) Executor {
var join joinExec
switch x := v.PhysicalJoin.(type) {
case *plan.PhysicalHashSemiJoin:
join = b.buildSemiJoin(x)
case *plan.PhysicalHashJoin:
if x.JoinType == plan.InnerJoin || x.JoinType == plan.LeftOuterJoin || x.JoinType == plan.RightOuterJoin {
join = b.buildNestedLoopJoin(x)
} else {
b.err = errors.Errorf("Unsupported join type %v in nested loop join", x.JoinType)
}
default:
b.err = errors.Errorf("Unsupported plan type %T in apply", v)
}
apply := &ApplyJoinExec{
baseExecutor: newBaseExecutor(v.Schema(), b.ctx),
join: join,
outerSchema: v.OuterSchema,
}
return apply
}
func (b *executorBuilder) buildExists(v *plan.PhysicalExists) Executor {
childExec := b.build(v.Children()[0])
if b.err != nil {
b.err = errors.Trace(b.err)
return nil
}
e := &ExistsExec{
baseExecutor: newBaseExecutor(v.Schema(), b.ctx, childExec),
}
e.supportChk = true
return e
}
func (b *executorBuilder) buildMaxOneRow(v *plan.PhysicalMaxOneRow) Executor {
childExec := b.build(v.Children()[0])
if b.err != nil {
b.err = errors.Trace(b.err)
return nil
}
return &MaxOneRowExec{
baseExecutor: newBaseExecutor(v.Schema(), b.ctx, childExec),
}
}
func (b *executorBuilder) buildUnionAll(v *plan.PhysicalUnionAll) Executor {
childExecs := make([]Executor, len(v.Children()))
for i, child := range v.Children() {
childExecs[i] = b.build(child)
if b.err != nil {
b.err = errors.Trace(b.err)
return nil
}
}
e := &UnionExec{
baseExecutor: newBaseExecutor(v.Schema(), b.ctx, childExecs...),
}
e.supportChk = true
return e
}
func (b *executorBuilder) buildUpdate(v *plan.Update) Executor {
tblID2table := make(map[int64]table.Table)
for id := range v.SelectPlan.Schema().TblID2Handle {
tblID2table[id], _ = b.is.TableByID(id)
}
selExec := b.build(v.SelectPlan)
if b.err != nil {
b.err = errors.Trace(b.err)
return nil
}
updateExec := &UpdateExec{
baseExecutor: newBaseExecutor(nil, b.ctx, selExec),
SelectExec: selExec,
OrderedList: v.OrderedList,
tblID2table: tblID2table,
IgnoreErr: v.IgnoreErr,
}
updateExec.supportChk = true
return updateExec
}
func (b *executorBuilder) buildDelete(v *plan.Delete) Executor {
tblID2table := make(map[int64]table.Table)
for id := range v.SelectPlan.Schema().TblID2Handle {
tblID2table[id], _ = b.is.TableByID(id)
}
selExec := b.build(v.SelectPlan)
if b.err != nil {
b.err = errors.Trace(b.err)
return nil
}
return &DeleteExec{
baseExecutor: newBaseExecutor(nil, b.ctx),
SelectExec: selExec,
Tables: v.Tables,
IsMultiTable: v.IsMultiTable,
tblID2Table: tblID2table,
}
}
func (b *executorBuilder) buildAnalyzeIndexPushdown(task plan.AnalyzeIndexTask) *AnalyzeIndexExec {
e := &AnalyzeIndexExec{
ctx: b.ctx,
tblInfo: task.TableInfo,
idxInfo: task.IndexInfo,
concurrency: b.ctx.GetSessionVars().IndexSerialScanConcurrency,
priority: b.priority,
analyzePB: &tipb.AnalyzeReq{
Tp: tipb.AnalyzeType_TypeIndex,
StartTs: math.MaxUint64,
Flags: statementContextToFlags(b.ctx.GetSessionVars().StmtCtx),
TimeZoneOffset: timeZoneOffset(b.ctx),
},
}
e.analyzePB.IdxReq = &tipb.AnalyzeIndexReq{
BucketSize: maxBucketSize,
NumColumns: int32(len(task.IndexInfo.Columns)),
}
if !task.IndexInfo.Unique {
depth := int32(defaultCMSketchDepth)
width := int32(defaultCMSketchWidth)
e.analyzePB.IdxReq.CmsketchDepth = &depth
e.analyzePB.IdxReq.CmsketchWidth = &width
}
return e
}
func (b *executorBuilder) buildAnalyzeColumnsPushdown(task plan.AnalyzeColumnsTask) *AnalyzeColumnsExec {
cols := task.ColsInfo
keepOrder := false
if task.PKInfo != nil {
keepOrder = true
cols = append([]*model.ColumnInfo{task.PKInfo}, cols...)
}
e := &AnalyzeColumnsExec{
ctx: b.ctx,
tblInfo: task.TableInfo,
colsInfo: task.ColsInfo,
pkInfo: task.PKInfo,
concurrency: b.ctx.GetSessionVars().DistSQLScanConcurrency,
priority: b.priority,
keepOrder: keepOrder,
analyzePB: &tipb.AnalyzeReq{
Tp: tipb.AnalyzeType_TypeColumn,
StartTs: math.MaxUint64,
Flags: statementContextToFlags(b.ctx.GetSessionVars().StmtCtx),
TimeZoneOffset: timeZoneOffset(b.ctx),
},
}
depth := int32(defaultCMSketchDepth)
width := int32(defaultCMSketchWidth)
e.analyzePB.ColReq = &tipb.AnalyzeColumnsReq{
BucketSize: maxBucketSize,
SampleSize: maxRegionSampleSize,
SketchSize: maxSketchSize,
ColumnsInfo: distsql.ColumnsToProto(cols, task.TableInfo.PKIsHandle),
CmsketchDepth: &depth,
CmsketchWidth: &width,
}
b.err = setPBColumnsDefaultValue(b.ctx, e.analyzePB.ColReq.ColumnsInfo, cols)
return e
}
func (b *executorBuilder) buildAnalyze(v *plan.Analyze) Executor {
e := &AnalyzeExec{
baseExecutor: newBaseExecutor(nil, b.ctx),
tasks: make([]*analyzeTask, 0, len(v.Children())),
}
for _, task := range v.ColTasks {
e.tasks = append(e.tasks, &analyzeTask{
taskType: colTask,
colExec: b.buildAnalyzeColumnsPushdown(task),
})
}
for _, task := range v.IdxTasks {
e.tasks = append(e.tasks, &analyzeTask{
taskType: idxTask,
idxExec: b.buildAnalyzeIndexPushdown(task),
})
}
return e
}
func (b *executorBuilder) constructDAGReq(plans []plan.PhysicalPlan) (*tipb.DAGRequest, error) {
dagReq := &tipb.DAGRequest{}
dagReq.StartTs = b.getStartTS()
dagReq.TimeZoneOffset = timeZoneOffset(b.ctx)
sc := b.ctx.GetSessionVars().StmtCtx
dagReq.Flags = statementContextToFlags(sc)
for _, p := range plans {
execPB, err := p.ToPB(b.ctx)
if err != nil {
return nil, errors.Trace(err)
}
dagReq.Executors = append(dagReq.Executors, execPB)
}
return dagReq, nil
}
func (b *executorBuilder) buildIndexLookUpJoin(v *plan.PhysicalIndexJoin) Executor {
outerExec := b.build(v.Children()[v.OuterIndex])
if b.err != nil {
b.err = errors.Trace(b.err)
return nil
}
if outerExec.supportChunk() {
// All inner data reader supports chunk(TableReader, IndexReader, IndexLookUpReadfer),
// we only need to check outer.
return b.buildNewIndexLookUpJoin(v, outerExec)
}
batchSize := 1
if !v.KeepOrder {
batchSize = b.ctx.GetSessionVars().IndexJoinBatchSize
}
innerExecBuilder := &dataReaderBuilder{v.Children()[1-v.OuterIndex], b}
defaultValues := v.DefaultValues
if defaultValues == nil {
defaultValues = make([]types.Datum, innerExecBuilder.Schema().Len())
}
return &IndexLookUpJoin{
baseExecutor: newBaseExecutor(v.Schema(), b.ctx, outerExec),
outerExec: outerExec,
innerExecBuilder: innerExecBuilder,
outerKeys: v.OuterJoinKeys,
innerKeys: v.InnerJoinKeys,
outerFilter: v.LeftConditions,
innerFilter: v.RightConditions,
resultGenerator: newJoinResultGenerator(b.ctx, v.JoinType, v.OuterIndex == 1, defaultValues, v.OtherConditions, nil, nil),
maxBatchSize: batchSize,
}
}
func (b *executorBuilder) buildNewIndexLookUpJoin(v *plan.PhysicalIndexJoin, outerExec Executor) Executor {
outerFilter, innerFilter := v.LeftConditions, v.RightConditions
leftTypes, rightTypes := v.Children()[0].Schema().GetTypes(), v.Children()[1].Schema().GetTypes()
outerTypes, innerTypes := leftTypes, rightTypes
if v.OuterIndex == 1 {
outerFilter, innerFilter = v.RightConditions, v.LeftConditions
outerTypes, innerTypes = rightTypes, leftTypes
}
defaultValues := v.DefaultValues
if defaultValues == nil {
defaultValues = make([]types.Datum, len(innerTypes))
}
innerPlan := v.Children()[1-v.OuterIndex]
e := &NewIndexLookUpJoin{
baseExecutor: newBaseExecutor(v.Schema(), b.ctx, outerExec),
outerCtx: outerCtx{
rowTypes: outerTypes,
filter: outerFilter,
},
innerCtx: innerCtx{
readerBuilder: &dataReaderBuilder{innerPlan, b},
rowTypes: innerTypes,
filter: innerFilter,
},
workerWg: new(sync.WaitGroup),
resultGenerator: newJoinResultGenerator(b.ctx, v.JoinType, v.OuterIndex == 1, defaultValues, v.OtherConditions, leftTypes, rightTypes),
}
e.supportChk = true
outerKeyCols := make([]int, len(v.OuterJoinKeys))
for i := 0; i < len(v.OuterJoinKeys); i++ {
outerKeyCols[i] = v.OuterJoinKeys[i].Index
}
e.outerCtx.keyCols = outerKeyCols
innerKeyCols := make([]int, len(v.InnerJoinKeys))
for i := 0; i < len(v.InnerJoinKeys); i++ {
innerKeyCols[i] = v.InnerJoinKeys[i].Index
}
e.innerCtx.keyCols = innerKeyCols
e.joinResult = e.newChunk()
return e
}
func buildNoRangeTableReader(b *executorBuilder, v *plan.PhysicalTableReader) (*TableReaderExecutor, error) {
dagReq, err := b.constructDAGReq(v.TablePlans)
if err != nil {
return nil, errors.Trace(err)
}
ts := v.TablePlans[0].(*plan.PhysicalTableScan)
table, _ := b.is.TableByID(ts.Table.ID)
e := &TableReaderExecutor{
baseExecutor: newBaseExecutor(v.Schema(), b.ctx),
dagPB: dagReq,
tableID: ts.Table.ID,
table: table,
keepOrder: ts.KeepOrder,
desc: ts.Desc,
columns: ts.Columns,
priority: b.priority,
}
e.baseExecutor.supportChk = true
for i := range v.Schema().Columns {
dagReq.OutputOffsets = append(dagReq.OutputOffsets, uint32(i))
}
return e, nil
}
func (b *executorBuilder) buildTableReader(v *plan.PhysicalTableReader) *TableReaderExecutor {
ret, err := buildNoRangeTableReader(b, v)
if err != nil {
b.err = errors.Trace(err)
return nil
}
ts := v.TablePlans[0].(*plan.PhysicalTableScan)
ret.ranges = ts.Ranges
return ret
}
func buildNoRangeIndexReader(b *executorBuilder, v *plan.PhysicalIndexReader) (*IndexReaderExecutor, error) {
dagReq, err := b.constructDAGReq(v.IndexPlans)
if err != nil {
return nil, errors.Trace(err)
}
is := v.IndexPlans[0].(*plan.PhysicalIndexScan)
table, _ := b.is.TableByID(is.Table.ID)
e := &IndexReaderExecutor{
baseExecutor: newBaseExecutor(v.Schema(), b.ctx),
dagPB: dagReq,
tableID: is.Table.ID,
table: table,
index: is.Index,
keepOrder: !is.OutOfOrder,
desc: is.Desc,
columns: is.Columns,
priority: b.priority,
}
e.supportChk = true
for _, col := range v.OutputColumns {
dagReq.OutputOffsets = append(dagReq.OutputOffsets, uint32(col.Index))
}
return e, nil
}
func (b *executorBuilder) buildIndexReader(v *plan.PhysicalIndexReader) *IndexReaderExecutor {
ret, err := buildNoRangeIndexReader(b, v)
if err != nil {
b.err = errors.Trace(err)
return nil
}
is := v.IndexPlans[0].(*plan.PhysicalIndexScan)
ret.ranges = is.Ranges
return ret
}
func buildNoRangeIndexLookUpReader(b *executorBuilder, v *plan.PhysicalIndexLookUpReader) (*IndexLookUpExecutor, error) {
indexReq, err := b.constructDAGReq(v.IndexPlans)
if err != nil {
return nil, errors.Trace(err)
}
tableReq, err := b.constructDAGReq(v.TablePlans)
if err != nil {
return nil, errors.Trace(err)
}
is := v.IndexPlans[0].(*plan.PhysicalIndexScan)
indexReq.OutputOffsets = []uint32{uint32(len(is.Index.Columns))}
table, _ := b.is.TableByID(is.Table.ID)
for i := 0; i < v.Schema().Len(); i++ {
tableReq.OutputOffsets = append(tableReq.OutputOffsets, uint32(i))
}
e := &IndexLookUpExecutor{
baseExecutor: newBaseExecutor(v.Schema(), b.ctx),
dagPB: indexReq,
tableID: is.Table.ID,
table: table,
index: is.Index,
keepOrder: !is.OutOfOrder,
desc: is.Desc,
tableRequest: tableReq,
columns: is.Columns,
priority: b.priority,
dataReaderBuilder: &dataReaderBuilder{executorBuilder: b},
}
e.supportChk = true
if cols, ok := v.Schema().TblID2Handle[is.Table.ID]; ok {
e.handleIdx = cols[0].Index
}
return e, nil
}
func (b *executorBuilder) buildIndexLookUpReader(v *plan.PhysicalIndexLookUpReader) *IndexLookUpExecutor {
ret, err := buildNoRangeIndexLookUpReader(b, v)
if err != nil {
b.err = errors.Trace(err)
return nil
}
is := v.IndexPlans[0].(*plan.PhysicalIndexScan)
ret.ranges = is.Ranges
return ret
}
// dataReaderBuilder build an executor.
// The executor can be used to read data in the ranges which are constructed by datums.
// Differences from executorBuilder:
// 1. dataReaderBuilder calculate data range from argument, rather than plan.
// 2. the result executor is already opened.
type dataReaderBuilder struct {
plan.Plan
*executorBuilder
}
func (builder *dataReaderBuilder) buildExecutorForDatums(goCtx goctx.Context, datums [][]types.Datum) (Executor, error) {
switch v := builder.Plan.(type) {
case *plan.PhysicalIndexReader:
return builder.buildIndexReaderForDatums(goCtx, v, datums)
case *plan.PhysicalTableReader:
return builder.buildTableReaderForDatums(goCtx, v, datums)
case *plan.PhysicalIndexLookUpReader:
return builder.buildIndexLookUpReaderForDatums(goCtx, v, datums)
}
return nil, errors.New("Wrong plan type for dataReaderBuilder")
}
func (builder *dataReaderBuilder) buildTableReaderForDatums(goCtx goctx.Context, v *plan.PhysicalTableReader, datums [][]types.Datum) (Executor, error) {
e, err := buildNoRangeTableReader(builder.executorBuilder, v)
if err != nil {
return nil, errors.Trace(err)
}
handles := make([]int64, 0, len(datums))
for _, datum := range datums {
handles = append(handles, datum[0].GetInt64())
}
return builder.buildTableReaderFromHandles(goCtx, e, handles)
}
func (builder *dataReaderBuilder) buildTableReaderFromHandles(goCtx goctx.Context, e *TableReaderExecutor, handles []int64) (Executor, error) {
sort.Sort(sortutil.Int64Slice(handles))
var b requestBuilder
kvReq, err := b.SetTableHandles(e.tableID, handles).
SetDAGRequest(e.dagPB).
SetDesc(e.desc).
SetKeepOrder(e.keepOrder).
SetPriority(e.priority).
SetFromSessionVars(e.ctx.GetSessionVars()).
Build()
if err != nil {
return nil, errors.Trace(err)
}
e.result, err = distsql.SelectDAG(goCtx, builder.ctx, kvReq, e.schema.GetTypes())
if err != nil {
return nil, errors.Trace(err)
}
e.result.Fetch(goCtx)
return e, nil
}
func (builder *dataReaderBuilder) buildIndexReaderForDatums(goCtx goctx.Context, v *plan.PhysicalIndexReader, values [][]types.Datum) (Executor, error) {
e, err := buildNoRangeIndexReader(builder.executorBuilder, v)
if err != nil {
return nil, errors.Trace(err)
}
var b requestBuilder
kvReq, err := b.SetIndexValues(e.tableID, e.index.ID, values).
SetDAGRequest(e.dagPB).
SetDesc(e.desc).
SetKeepOrder(e.keepOrder).
SetPriority(e.priority).
SetFromSessionVars(e.ctx.GetSessionVars()).
Build()
if err != nil {
return nil, errors.Trace(err)
}
e.result, err = distsql.SelectDAG(goCtx, builder.ctx, kvReq, e.schema.GetTypes())
if err != nil {
return nil, errors.Trace(err)
}
e.result.Fetch(goCtx)
return e, nil
}
func (builder *dataReaderBuilder) buildIndexLookUpReaderForDatums(goCtx goctx.Context, v *plan.PhysicalIndexLookUpReader, values [][]types.Datum) (Executor, error) {
e, err := buildNoRangeIndexLookUpReader(builder.executorBuilder, v)
if err != nil {
return nil, errors.Trace(err)
}
kvRanges, err := indexValuesToKVRanges(e.tableID, e.index.ID, values)
if err != nil {
return nil, errors.Trace(err)
}
err = e.open(goCtx, kvRanges)
return e, errors.Trace(err)
}
| executor/builder.go | 1 | https://github.com/pingcap/tidb/commit/b35e024ace8a525b61fd53eae22e9723d06454ca | [
0.9990490078926086,
0.2281470000743866,
0.00016323603631462902,
0.03328113630414009,
0.3467608094215393
] |
{
"id": 0,
"code_window": [
"}\n",
"\n",
"func (b *executorBuilder) buildDeallocate(v *plan.Deallocate) Executor {\n",
"\treturn &DeallocateExec{\n",
"\t\tbaseExecutor: newBaseExecutor(nil, b.ctx),\n",
"\t\tName: v.Name,\n",
"\t}\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\te := &DeallocateExec{\n"
],
"file_path": "executor/builder.go",
"type": "replace",
"edit_start_line_idx": 207
} | // Created by cgo -godefs - DO NOT EDIT
// cgo -godefs types_netbsd.go
// +build amd64,netbsd
package unix
const (
sizeofPtr = 0x8
sizeofShort = 0x2
sizeofInt = 0x4
sizeofLong = 0x8
sizeofLongLong = 0x8
)
type (
_C_short int16
_C_int int32
_C_long int64
_C_long_long int64
)
type Timespec struct {
Sec int64
Nsec int64
}
type Timeval struct {
Sec int64
Usec int32
Pad_cgo_0 [4]byte
}
type Rusage struct {
Utime Timeval
Stime Timeval
Maxrss int64
Ixrss int64
Idrss int64
Isrss int64
Minflt int64
Majflt int64
Nswap int64
Inblock int64
Oublock int64
Msgsnd int64
Msgrcv int64
Nsignals int64
Nvcsw int64
Nivcsw int64
}
type Rlimit struct {
Cur uint64
Max uint64
}
type _Gid_t uint32
type Stat_t struct {
Dev uint64
Mode uint32
Pad_cgo_0 [4]byte
Ino uint64
Nlink uint32
Uid uint32
Gid uint32
Pad_cgo_1 [4]byte
Rdev uint64
Atimespec Timespec
Mtimespec Timespec
Ctimespec Timespec
Birthtimespec Timespec
Size int64
Blocks int64
Blksize uint32
Flags uint32
Gen uint32
Spare [2]uint32
Pad_cgo_2 [4]byte
}
type Statfs_t [0]byte
type Flock_t struct {
Start int64
Len int64
Pid int32
Type int16
Whence int16
}
type Dirent struct {
Fileno uint64
Reclen uint16
Namlen uint16
Type uint8
Name [512]int8
Pad_cgo_0 [3]byte
}
type Fsid struct {
X__fsid_val [2]int32
}
type RawSockaddrInet4 struct {
Len uint8
Family uint8
Port uint16
Addr [4]byte /* in_addr */
Zero [8]int8
}
type RawSockaddrInet6 struct {
Len uint8
Family uint8
Port uint16
Flowinfo uint32
Addr [16]byte /* in6_addr */
Scope_id uint32
}
type RawSockaddrUnix struct {
Len uint8
Family uint8
Path [104]int8
}
type RawSockaddrDatalink struct {
Len uint8
Family uint8
Index uint16
Type uint8
Nlen uint8
Alen uint8
Slen uint8
Data [12]int8
}
type RawSockaddr struct {
Len uint8
Family uint8
Data [14]int8
}
type RawSockaddrAny struct {
Addr RawSockaddr
Pad [92]int8
}
type _Socklen uint32
type Linger struct {
Onoff int32
Linger int32
}
type Iovec struct {
Base *byte
Len uint64
}
type IPMreq struct {
Multiaddr [4]byte /* in_addr */
Interface [4]byte /* in_addr */
}
type IPv6Mreq struct {
Multiaddr [16]byte /* in6_addr */
Interface uint32
}
type Msghdr struct {
Name *byte
Namelen uint32
Pad_cgo_0 [4]byte
Iov *Iovec
Iovlen int32
Pad_cgo_1 [4]byte
Control *byte
Controllen uint32
Flags int32
}
type Cmsghdr struct {
Len uint32
Level int32
Type int32
}
type Inet6Pktinfo struct {
Addr [16]byte /* in6_addr */
Ifindex uint32
}
type IPv6MTUInfo struct {
Addr RawSockaddrInet6
Mtu uint32
}
type ICMPv6Filter struct {
Filt [8]uint32
}
const (
SizeofSockaddrInet4 = 0x10
SizeofSockaddrInet6 = 0x1c
SizeofSockaddrAny = 0x6c
SizeofSockaddrUnix = 0x6a
SizeofSockaddrDatalink = 0x14
SizeofLinger = 0x8
SizeofIPMreq = 0x8
SizeofIPv6Mreq = 0x14
SizeofMsghdr = 0x30
SizeofCmsghdr = 0xc
SizeofInet6Pktinfo = 0x14
SizeofIPv6MTUInfo = 0x20
SizeofICMPv6Filter = 0x20
)
const (
PTRACE_TRACEME = 0x0
PTRACE_CONT = 0x7
PTRACE_KILL = 0x8
)
type Kevent_t struct {
Ident uint64
Filter uint32
Flags uint32
Fflags uint32
Pad_cgo_0 [4]byte
Data int64
Udata int64
}
type FdSet struct {
Bits [8]uint32
}
const (
SizeofIfMsghdr = 0x98
SizeofIfData = 0x88
SizeofIfaMsghdr = 0x18
SizeofIfAnnounceMsghdr = 0x18
SizeofRtMsghdr = 0x78
SizeofRtMetrics = 0x50
)
type IfMsghdr struct {
Msglen uint16
Version uint8
Type uint8
Addrs int32
Flags int32
Index uint16
Pad_cgo_0 [2]byte
Data IfData
}
type IfData struct {
Type uint8
Addrlen uint8
Hdrlen uint8
Pad_cgo_0 [1]byte
Link_state int32
Mtu uint64
Metric uint64
Baudrate uint64
Ipackets uint64
Ierrors uint64
Opackets uint64
Oerrors uint64
Collisions uint64
Ibytes uint64
Obytes uint64
Imcasts uint64
Omcasts uint64
Iqdrops uint64
Noproto uint64
Lastchange Timespec
}
type IfaMsghdr struct {
Msglen uint16
Version uint8
Type uint8
Addrs int32
Flags int32
Metric int32
Index uint16
Pad_cgo_0 [6]byte
}
type IfAnnounceMsghdr struct {
Msglen uint16
Version uint8
Type uint8
Index uint16
Name [16]int8
What uint16
}
type RtMsghdr struct {
Msglen uint16
Version uint8
Type uint8
Index uint16
Pad_cgo_0 [2]byte
Flags int32
Addrs int32
Pid int32
Seq int32
Errno int32
Use int32
Inits int32
Pad_cgo_1 [4]byte
Rmx RtMetrics
}
type RtMetrics struct {
Locks uint64
Mtu uint64
Hopcount uint64
Recvpipe uint64
Sendpipe uint64
Ssthresh uint64
Rtt uint64
Rttvar uint64
Expire int64
Pksent int64
}
type Mclpool [0]byte
const (
SizeofBpfVersion = 0x4
SizeofBpfStat = 0x80
SizeofBpfProgram = 0x10
SizeofBpfInsn = 0x8
SizeofBpfHdr = 0x20
)
type BpfVersion struct {
Major uint16
Minor uint16
}
type BpfStat struct {
Recv uint64
Drop uint64
Capt uint64
Padding [13]uint64
}
type BpfProgram struct {
Len uint32
Pad_cgo_0 [4]byte
Insns *BpfInsn
}
type BpfInsn struct {
Code uint16
Jt uint8
Jf uint8
K uint32
}
type BpfHdr struct {
Tstamp BpfTimeval
Caplen uint32
Datalen uint32
Hdrlen uint16
Pad_cgo_0 [6]byte
}
type BpfTimeval struct {
Sec int64
Usec int64
}
type Termios struct {
Iflag uint32
Oflag uint32
Cflag uint32
Lflag uint32
Cc [20]uint8
Ispeed int32
Ospeed int32
}
type Sysctlnode struct {
Flags uint32
Num int32
Name [32]int8
Ver uint32
X__rsvd uint32
Un [16]byte
X_sysctl_size [8]byte
X_sysctl_func [8]byte
X_sysctl_parent [8]byte
X_sysctl_desc [8]byte
}
| _vendor/src/golang.org/x/sys/unix/ztypes_netbsd_amd64.go | 0 | https://github.com/pingcap/tidb/commit/b35e024ace8a525b61fd53eae22e9723d06454ca | [
0.0008096212986856699,
0.00019261852139607072,
0.00016360597510356456,
0.00016888110258150846,
0.0001035412133205682
] |
{
"id": 0,
"code_window": [
"}\n",
"\n",
"func (b *executorBuilder) buildDeallocate(v *plan.Deallocate) Executor {\n",
"\treturn &DeallocateExec{\n",
"\t\tbaseExecutor: newBaseExecutor(nil, b.ctx),\n",
"\t\tName: v.Name,\n",
"\t}\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\te := &DeallocateExec{\n"
],
"file_path": "executor/builder.go",
"type": "replace",
"edit_start_line_idx": 207
} | // Copyright (c) 2017 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package jaeger
import (
"time"
)
// ReporterOption is a function that sets some option on the reporter.
type ReporterOption func(c *reporterOptions)
// ReporterOptions is a factory for all available ReporterOption's
var ReporterOptions reporterOptions
// reporterOptions control behavior of the reporter.
type reporterOptions struct {
// queueSize is the size of internal queue where reported spans are stored before they are processed in the background
queueSize int
// bufferFlushInterval is how often the buffer is force-flushed, even if it's not full
bufferFlushInterval time.Duration
// logger is used to log errors of span submissions
logger Logger
// metrics is used to record runtime stats
metrics *Metrics
}
// QueueSize creates a ReporterOption that sets the size of the internal queue where
// spans are stored before they are processed.
func (reporterOptions) QueueSize(queueSize int) ReporterOption {
return func(r *reporterOptions) {
r.queueSize = queueSize
}
}
// Metrics creates a ReporterOption that initializes Metrics in the reporter,
// which is used to record runtime statistics.
func (reporterOptions) Metrics(metrics *Metrics) ReporterOption {
return func(r *reporterOptions) {
r.metrics = metrics
}
}
// BufferFlushInterval creates a ReporterOption that sets how often the queue
// is force-flushed.
func (reporterOptions) BufferFlushInterval(bufferFlushInterval time.Duration) ReporterOption {
return func(r *reporterOptions) {
r.bufferFlushInterval = bufferFlushInterval
}
}
// Logger creates a ReporterOption that initializes the logger used to log
// errors of span submissions.
func (reporterOptions) Logger(logger Logger) ReporterOption {
return func(r *reporterOptions) {
r.logger = logger
}
}
| _vendor/src/github.com/uber/jaeger-client-go/reporter_options.go | 0 | https://github.com/pingcap/tidb/commit/b35e024ace8a525b61fd53eae22e9723d06454ca | [
0.0003554144059307873,
0.00022340650320984423,
0.00016608914302196354,
0.00018346248543821275,
0.00007641220872756094
] |
{
"id": 0,
"code_window": [
"}\n",
"\n",
"func (b *executorBuilder) buildDeallocate(v *plan.Deallocate) Executor {\n",
"\treturn &DeallocateExec{\n",
"\t\tbaseExecutor: newBaseExecutor(nil, b.ctx),\n",
"\t\tName: v.Name,\n",
"\t}\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\te := &DeallocateExec{\n"
],
"file_path": "executor/builder.go",
"type": "replace",
"edit_start_line_idx": 207
} | // mksysnum_netbsd.pl
// MACHINE GENERATED BY THE ABOVE COMMAND; DO NOT EDIT
// +build amd64,netbsd
package unix
const (
SYS_EXIT = 1 // { void|sys||exit(int rval); }
SYS_FORK = 2 // { int|sys||fork(void); }
SYS_READ = 3 // { ssize_t|sys||read(int fd, void *buf, size_t nbyte); }
SYS_WRITE = 4 // { ssize_t|sys||write(int fd, const void *buf, size_t nbyte); }
SYS_OPEN = 5 // { int|sys||open(const char *path, int flags, ... mode_t mode); }
SYS_CLOSE = 6 // { int|sys||close(int fd); }
SYS_LINK = 9 // { int|sys||link(const char *path, const char *link); }
SYS_UNLINK = 10 // { int|sys||unlink(const char *path); }
SYS_CHDIR = 12 // { int|sys||chdir(const char *path); }
SYS_FCHDIR = 13 // { int|sys||fchdir(int fd); }
SYS_CHMOD = 15 // { int|sys||chmod(const char *path, mode_t mode); }
SYS_CHOWN = 16 // { int|sys||chown(const char *path, uid_t uid, gid_t gid); }
SYS_BREAK = 17 // { int|sys||obreak(char *nsize); }
SYS_GETPID = 20 // { pid_t|sys||getpid_with_ppid(void); }
SYS_UNMOUNT = 22 // { int|sys||unmount(const char *path, int flags); }
SYS_SETUID = 23 // { int|sys||setuid(uid_t uid); }
SYS_GETUID = 24 // { uid_t|sys||getuid_with_euid(void); }
SYS_GETEUID = 25 // { uid_t|sys||geteuid(void); }
SYS_PTRACE = 26 // { int|sys||ptrace(int req, pid_t pid, void *addr, int data); }
SYS_RECVMSG = 27 // { ssize_t|sys||recvmsg(int s, struct msghdr *msg, int flags); }
SYS_SENDMSG = 28 // { ssize_t|sys||sendmsg(int s, const struct msghdr *msg, int flags); }
SYS_RECVFROM = 29 // { ssize_t|sys||recvfrom(int s, void *buf, size_t len, int flags, struct sockaddr *from, socklen_t *fromlenaddr); }
SYS_ACCEPT = 30 // { int|sys||accept(int s, struct sockaddr *name, socklen_t *anamelen); }
SYS_GETPEERNAME = 31 // { int|sys||getpeername(int fdes, struct sockaddr *asa, socklen_t *alen); }
SYS_GETSOCKNAME = 32 // { int|sys||getsockname(int fdes, struct sockaddr *asa, socklen_t *alen); }
SYS_ACCESS = 33 // { int|sys||access(const char *path, int flags); }
SYS_CHFLAGS = 34 // { int|sys||chflags(const char *path, u_long flags); }
SYS_FCHFLAGS = 35 // { int|sys||fchflags(int fd, u_long flags); }
SYS_SYNC = 36 // { void|sys||sync(void); }
SYS_KILL = 37 // { int|sys||kill(pid_t pid, int signum); }
SYS_GETPPID = 39 // { pid_t|sys||getppid(void); }
SYS_DUP = 41 // { int|sys||dup(int fd); }
SYS_PIPE = 42 // { int|sys||pipe(void); }
SYS_GETEGID = 43 // { gid_t|sys||getegid(void); }
SYS_PROFIL = 44 // { int|sys||profil(char *samples, size_t size, u_long offset, u_int scale); }
SYS_KTRACE = 45 // { int|sys||ktrace(const char *fname, int ops, int facs, pid_t pid); }
SYS_GETGID = 47 // { gid_t|sys||getgid_with_egid(void); }
SYS___GETLOGIN = 49 // { int|sys||__getlogin(char *namebuf, size_t namelen); }
SYS___SETLOGIN = 50 // { int|sys||__setlogin(const char *namebuf); }
SYS_ACCT = 51 // { int|sys||acct(const char *path); }
SYS_IOCTL = 54 // { int|sys||ioctl(int fd, u_long com, ... void *data); }
SYS_REVOKE = 56 // { int|sys||revoke(const char *path); }
SYS_SYMLINK = 57 // { int|sys||symlink(const char *path, const char *link); }
SYS_READLINK = 58 // { ssize_t|sys||readlink(const char *path, char *buf, size_t count); }
SYS_EXECVE = 59 // { int|sys||execve(const char *path, char * const *argp, char * const *envp); }
SYS_UMASK = 60 // { mode_t|sys||umask(mode_t newmask); }
SYS_CHROOT = 61 // { int|sys||chroot(const char *path); }
SYS_VFORK = 66 // { int|sys||vfork(void); }
SYS_SBRK = 69 // { int|sys||sbrk(intptr_t incr); }
SYS_SSTK = 70 // { int|sys||sstk(int incr); }
SYS_VADVISE = 72 // { int|sys||ovadvise(int anom); }
SYS_MUNMAP = 73 // { int|sys||munmap(void *addr, size_t len); }
SYS_MPROTECT = 74 // { int|sys||mprotect(void *addr, size_t len, int prot); }
SYS_MADVISE = 75 // { int|sys||madvise(void *addr, size_t len, int behav); }
SYS_MINCORE = 78 // { int|sys||mincore(void *addr, size_t len, char *vec); }
SYS_GETGROUPS = 79 // { int|sys||getgroups(int gidsetsize, gid_t *gidset); }
SYS_SETGROUPS = 80 // { int|sys||setgroups(int gidsetsize, const gid_t *gidset); }
SYS_GETPGRP = 81 // { int|sys||getpgrp(void); }
SYS_SETPGID = 82 // { int|sys||setpgid(pid_t pid, pid_t pgid); }
SYS_DUP2 = 90 // { int|sys||dup2(int from, int to); }
SYS_FCNTL = 92 // { int|sys||fcntl(int fd, int cmd, ... void *arg); }
SYS_FSYNC = 95 // { int|sys||fsync(int fd); }
SYS_SETPRIORITY = 96 // { int|sys||setpriority(int which, id_t who, int prio); }
SYS_CONNECT = 98 // { int|sys||connect(int s, const struct sockaddr *name, socklen_t namelen); }
SYS_GETPRIORITY = 100 // { int|sys||getpriority(int which, id_t who); }
SYS_BIND = 104 // { int|sys||bind(int s, const struct sockaddr *name, socklen_t namelen); }
SYS_SETSOCKOPT = 105 // { int|sys||setsockopt(int s, int level, int name, const void *val, socklen_t valsize); }
SYS_LISTEN = 106 // { int|sys||listen(int s, int backlog); }
SYS_GETSOCKOPT = 118 // { int|sys||getsockopt(int s, int level, int name, void *val, socklen_t *avalsize); }
SYS_READV = 120 // { ssize_t|sys||readv(int fd, const struct iovec *iovp, int iovcnt); }
SYS_WRITEV = 121 // { ssize_t|sys||writev(int fd, const struct iovec *iovp, int iovcnt); }
SYS_FCHOWN = 123 // { int|sys||fchown(int fd, uid_t uid, gid_t gid); }
SYS_FCHMOD = 124 // { int|sys||fchmod(int fd, mode_t mode); }
SYS_SETREUID = 126 // { int|sys||setreuid(uid_t ruid, uid_t euid); }
SYS_SETREGID = 127 // { int|sys||setregid(gid_t rgid, gid_t egid); }
SYS_RENAME = 128 // { int|sys||rename(const char *from, const char *to); }
SYS_FLOCK = 131 // { int|sys||flock(int fd, int how); }
SYS_MKFIFO = 132 // { int|sys||mkfifo(const char *path, mode_t mode); }
SYS_SENDTO = 133 // { ssize_t|sys||sendto(int s, const void *buf, size_t len, int flags, const struct sockaddr *to, socklen_t tolen); }
SYS_SHUTDOWN = 134 // { int|sys||shutdown(int s, int how); }
SYS_SOCKETPAIR = 135 // { int|sys||socketpair(int domain, int type, int protocol, int *rsv); }
SYS_MKDIR = 136 // { int|sys||mkdir(const char *path, mode_t mode); }
SYS_RMDIR = 137 // { int|sys||rmdir(const char *path); }
SYS_SETSID = 147 // { int|sys||setsid(void); }
SYS_SYSARCH = 165 // { int|sys||sysarch(int op, void *parms); }
SYS_PREAD = 173 // { ssize_t|sys||pread(int fd, void *buf, size_t nbyte, int PAD, off_t offset); }
SYS_PWRITE = 174 // { ssize_t|sys||pwrite(int fd, const void *buf, size_t nbyte, int PAD, off_t offset); }
SYS_NTP_ADJTIME = 176 // { int|sys||ntp_adjtime(struct timex *tp); }
SYS_SETGID = 181 // { int|sys||setgid(gid_t gid); }
SYS_SETEGID = 182 // { int|sys||setegid(gid_t egid); }
SYS_SETEUID = 183 // { int|sys||seteuid(uid_t euid); }
SYS_PATHCONF = 191 // { long|sys||pathconf(const char *path, int name); }
SYS_FPATHCONF = 192 // { long|sys||fpathconf(int fd, int name); }
SYS_GETRLIMIT = 194 // { int|sys||getrlimit(int which, struct rlimit *rlp); }
SYS_SETRLIMIT = 195 // { int|sys||setrlimit(int which, const struct rlimit *rlp); }
SYS_MMAP = 197 // { void *|sys||mmap(void *addr, size_t len, int prot, int flags, int fd, long PAD, off_t pos); }
SYS_LSEEK = 199 // { off_t|sys||lseek(int fd, int PAD, off_t offset, int whence); }
SYS_TRUNCATE = 200 // { int|sys||truncate(const char *path, int PAD, off_t length); }
SYS_FTRUNCATE = 201 // { int|sys||ftruncate(int fd, int PAD, off_t length); }
SYS___SYSCTL = 202 // { int|sys||__sysctl(const int *name, u_int namelen, void *old, size_t *oldlenp, const void *new, size_t newlen); }
SYS_MLOCK = 203 // { int|sys||mlock(const void *addr, size_t len); }
SYS_MUNLOCK = 204 // { int|sys||munlock(const void *addr, size_t len); }
SYS_UNDELETE = 205 // { int|sys||undelete(const char *path); }
SYS_GETPGID = 207 // { pid_t|sys||getpgid(pid_t pid); }
SYS_REBOOT = 208 // { int|sys||reboot(int opt, char *bootstr); }
SYS_POLL = 209 // { int|sys||poll(struct pollfd *fds, u_int nfds, int timeout); }
SYS_SEMGET = 221 // { int|sys||semget(key_t key, int nsems, int semflg); }
SYS_SEMOP = 222 // { int|sys||semop(int semid, struct sembuf *sops, size_t nsops); }
SYS_SEMCONFIG = 223 // { int|sys||semconfig(int flag); }
SYS_MSGGET = 225 // { int|sys||msgget(key_t key, int msgflg); }
SYS_MSGSND = 226 // { int|sys||msgsnd(int msqid, const void *msgp, size_t msgsz, int msgflg); }
SYS_MSGRCV = 227 // { ssize_t|sys||msgrcv(int msqid, void *msgp, size_t msgsz, long msgtyp, int msgflg); }
SYS_SHMAT = 228 // { void *|sys||shmat(int shmid, const void *shmaddr, int shmflg); }
SYS_SHMDT = 230 // { int|sys||shmdt(const void *shmaddr); }
SYS_SHMGET = 231 // { int|sys||shmget(key_t key, size_t size, int shmflg); }
SYS_TIMER_CREATE = 235 // { int|sys||timer_create(clockid_t clock_id, struct sigevent *evp, timer_t *timerid); }
SYS_TIMER_DELETE = 236 // { int|sys||timer_delete(timer_t timerid); }
SYS_TIMER_GETOVERRUN = 239 // { int|sys||timer_getoverrun(timer_t timerid); }
SYS_FDATASYNC = 241 // { int|sys||fdatasync(int fd); }
SYS_MLOCKALL = 242 // { int|sys||mlockall(int flags); }
SYS_MUNLOCKALL = 243 // { int|sys||munlockall(void); }
SYS_SIGQUEUEINFO = 245 // { int|sys||sigqueueinfo(pid_t pid, const siginfo_t *info); }
SYS_MODCTL = 246 // { int|sys||modctl(int cmd, void *arg); }
SYS___POSIX_RENAME = 270 // { int|sys||__posix_rename(const char *from, const char *to); }
SYS_SWAPCTL = 271 // { int|sys||swapctl(int cmd, void *arg, int misc); }
SYS_MINHERIT = 273 // { int|sys||minherit(void *addr, size_t len, int inherit); }
SYS_LCHMOD = 274 // { int|sys||lchmod(const char *path, mode_t mode); }
SYS_LCHOWN = 275 // { int|sys||lchown(const char *path, uid_t uid, gid_t gid); }
SYS___POSIX_CHOWN = 283 // { int|sys||__posix_chown(const char *path, uid_t uid, gid_t gid); }
SYS___POSIX_FCHOWN = 284 // { int|sys||__posix_fchown(int fd, uid_t uid, gid_t gid); }
SYS___POSIX_LCHOWN = 285 // { int|sys||__posix_lchown(const char *path, uid_t uid, gid_t gid); }
SYS_GETSID = 286 // { pid_t|sys||getsid(pid_t pid); }
SYS___CLONE = 287 // { pid_t|sys||__clone(int flags, void *stack); }
SYS_FKTRACE = 288 // { int|sys||fktrace(int fd, int ops, int facs, pid_t pid); }
SYS_PREADV = 289 // { ssize_t|sys||preadv(int fd, const struct iovec *iovp, int iovcnt, int PAD, off_t offset); }
SYS_PWRITEV = 290 // { ssize_t|sys||pwritev(int fd, const struct iovec *iovp, int iovcnt, int PAD, off_t offset); }
SYS___GETCWD = 296 // { int|sys||__getcwd(char *bufp, size_t length); }
SYS_FCHROOT = 297 // { int|sys||fchroot(int fd); }
SYS_LCHFLAGS = 304 // { int|sys||lchflags(const char *path, u_long flags); }
SYS_ISSETUGID = 305 // { int|sys||issetugid(void); }
SYS_UTRACE = 306 // { int|sys||utrace(const char *label, void *addr, size_t len); }
SYS_GETCONTEXT = 307 // { int|sys||getcontext(struct __ucontext *ucp); }
SYS_SETCONTEXT = 308 // { int|sys||setcontext(const struct __ucontext *ucp); }
SYS__LWP_CREATE = 309 // { int|sys||_lwp_create(const struct __ucontext *ucp, u_long flags, lwpid_t *new_lwp); }
SYS__LWP_EXIT = 310 // { int|sys||_lwp_exit(void); }
SYS__LWP_SELF = 311 // { lwpid_t|sys||_lwp_self(void); }
SYS__LWP_WAIT = 312 // { int|sys||_lwp_wait(lwpid_t wait_for, lwpid_t *departed); }
SYS__LWP_SUSPEND = 313 // { int|sys||_lwp_suspend(lwpid_t target); }
SYS__LWP_CONTINUE = 314 // { int|sys||_lwp_continue(lwpid_t target); }
SYS__LWP_WAKEUP = 315 // { int|sys||_lwp_wakeup(lwpid_t target); }
SYS__LWP_GETPRIVATE = 316 // { void *|sys||_lwp_getprivate(void); }
SYS__LWP_SETPRIVATE = 317 // { void|sys||_lwp_setprivate(void *ptr); }
SYS__LWP_KILL = 318 // { int|sys||_lwp_kill(lwpid_t target, int signo); }
SYS__LWP_DETACH = 319 // { int|sys||_lwp_detach(lwpid_t target); }
SYS__LWP_UNPARK = 321 // { int|sys||_lwp_unpark(lwpid_t target, const void *hint); }
SYS__LWP_UNPARK_ALL = 322 // { ssize_t|sys||_lwp_unpark_all(const lwpid_t *targets, size_t ntargets, const void *hint); }
SYS__LWP_SETNAME = 323 // { int|sys||_lwp_setname(lwpid_t target, const char *name); }
SYS__LWP_GETNAME = 324 // { int|sys||_lwp_getname(lwpid_t target, char *name, size_t len); }
SYS__LWP_CTL = 325 // { int|sys||_lwp_ctl(int features, struct lwpctl **address); }
SYS___SIGACTION_SIGTRAMP = 340 // { int|sys||__sigaction_sigtramp(int signum, const struct sigaction *nsa, struct sigaction *osa, const void *tramp, int vers); }
SYS_PMC_GET_INFO = 341 // { int|sys||pmc_get_info(int ctr, int op, void *args); }
SYS_PMC_CONTROL = 342 // { int|sys||pmc_control(int ctr, int op, void *args); }
SYS_RASCTL = 343 // { int|sys||rasctl(void *addr, size_t len, int op); }
SYS_KQUEUE = 344 // { int|sys||kqueue(void); }
SYS__SCHED_SETPARAM = 346 // { int|sys||_sched_setparam(pid_t pid, lwpid_t lid, int policy, const struct sched_param *params); }
SYS__SCHED_GETPARAM = 347 // { int|sys||_sched_getparam(pid_t pid, lwpid_t lid, int *policy, struct sched_param *params); }
SYS__SCHED_SETAFFINITY = 348 // { int|sys||_sched_setaffinity(pid_t pid, lwpid_t lid, size_t size, const cpuset_t *cpuset); }
SYS__SCHED_GETAFFINITY = 349 // { int|sys||_sched_getaffinity(pid_t pid, lwpid_t lid, size_t size, cpuset_t *cpuset); }
SYS_SCHED_YIELD = 350 // { int|sys||sched_yield(void); }
SYS_FSYNC_RANGE = 354 // { int|sys||fsync_range(int fd, int flags, off_t start, off_t length); }
SYS_UUIDGEN = 355 // { int|sys||uuidgen(struct uuid *store, int count); }
SYS_GETVFSSTAT = 356 // { int|sys||getvfsstat(struct statvfs *buf, size_t bufsize, int flags); }
SYS_STATVFS1 = 357 // { int|sys||statvfs1(const char *path, struct statvfs *buf, int flags); }
SYS_FSTATVFS1 = 358 // { int|sys||fstatvfs1(int fd, struct statvfs *buf, int flags); }
SYS_EXTATTRCTL = 360 // { int|sys||extattrctl(const char *path, int cmd, const char *filename, int attrnamespace, const char *attrname); }
SYS_EXTATTR_SET_FILE = 361 // { int|sys||extattr_set_file(const char *path, int attrnamespace, const char *attrname, const void *data, size_t nbytes); }
SYS_EXTATTR_GET_FILE = 362 // { ssize_t|sys||extattr_get_file(const char *path, int attrnamespace, const char *attrname, void *data, size_t nbytes); }
SYS_EXTATTR_DELETE_FILE = 363 // { int|sys||extattr_delete_file(const char *path, int attrnamespace, const char *attrname); }
SYS_EXTATTR_SET_FD = 364 // { int|sys||extattr_set_fd(int fd, int attrnamespace, const char *attrname, const void *data, size_t nbytes); }
SYS_EXTATTR_GET_FD = 365 // { ssize_t|sys||extattr_get_fd(int fd, int attrnamespace, const char *attrname, void *data, size_t nbytes); }
SYS_EXTATTR_DELETE_FD = 366 // { int|sys||extattr_delete_fd(int fd, int attrnamespace, const char *attrname); }
SYS_EXTATTR_SET_LINK = 367 // { int|sys||extattr_set_link(const char *path, int attrnamespace, const char *attrname, const void *data, size_t nbytes); }
SYS_EXTATTR_GET_LINK = 368 // { ssize_t|sys||extattr_get_link(const char *path, int attrnamespace, const char *attrname, void *data, size_t nbytes); }
SYS_EXTATTR_DELETE_LINK = 369 // { int|sys||extattr_delete_link(const char *path, int attrnamespace, const char *attrname); }
SYS_EXTATTR_LIST_FD = 370 // { ssize_t|sys||extattr_list_fd(int fd, int attrnamespace, void *data, size_t nbytes); }
SYS_EXTATTR_LIST_FILE = 371 // { ssize_t|sys||extattr_list_file(const char *path, int attrnamespace, void *data, size_t nbytes); }
SYS_EXTATTR_LIST_LINK = 372 // { ssize_t|sys||extattr_list_link(const char *path, int attrnamespace, void *data, size_t nbytes); }
SYS_SETXATTR = 375 // { int|sys||setxattr(const char *path, const char *name, const void *value, size_t size, int flags); }
SYS_LSETXATTR = 376 // { int|sys||lsetxattr(const char *path, const char *name, const void *value, size_t size, int flags); }
SYS_FSETXATTR = 377 // { int|sys||fsetxattr(int fd, const char *name, const void *value, size_t size, int flags); }
SYS_GETXATTR = 378 // { int|sys||getxattr(const char *path, const char *name, void *value, size_t size); }
SYS_LGETXATTR = 379 // { int|sys||lgetxattr(const char *path, const char *name, void *value, size_t size); }
SYS_FGETXATTR = 380 // { int|sys||fgetxattr(int fd, const char *name, void *value, size_t size); }
SYS_LISTXATTR = 381 // { int|sys||listxattr(const char *path, char *list, size_t size); }
SYS_LLISTXATTR = 382 // { int|sys||llistxattr(const char *path, char *list, size_t size); }
SYS_FLISTXATTR = 383 // { int|sys||flistxattr(int fd, char *list, size_t size); }
SYS_REMOVEXATTR = 384 // { int|sys||removexattr(const char *path, const char *name); }
SYS_LREMOVEXATTR = 385 // { int|sys||lremovexattr(const char *path, const char *name); }
SYS_FREMOVEXATTR = 386 // { int|sys||fremovexattr(int fd, const char *name); }
SYS_GETDENTS = 390 // { int|sys|30|getdents(int fd, char *buf, size_t count); }
SYS_SOCKET = 394 // { int|sys|30|socket(int domain, int type, int protocol); }
SYS_GETFH = 395 // { int|sys|30|getfh(const char *fname, void *fhp, size_t *fh_size); }
SYS_MOUNT = 410 // { int|sys|50|mount(const char *type, const char *path, int flags, void *data, size_t data_len); }
SYS_MREMAP = 411 // { void *|sys||mremap(void *old_address, size_t old_size, void *new_address, size_t new_size, int flags); }
SYS_PSET_CREATE = 412 // { int|sys||pset_create(psetid_t *psid); }
SYS_PSET_DESTROY = 413 // { int|sys||pset_destroy(psetid_t psid); }
SYS_PSET_ASSIGN = 414 // { int|sys||pset_assign(psetid_t psid, cpuid_t cpuid, psetid_t *opsid); }
SYS__PSET_BIND = 415 // { int|sys||_pset_bind(idtype_t idtype, id_t first_id, id_t second_id, psetid_t psid, psetid_t *opsid); }
SYS_POSIX_FADVISE = 416 // { int|sys|50|posix_fadvise(int fd, int PAD, off_t offset, off_t len, int advice); }
SYS_SELECT = 417 // { int|sys|50|select(int nd, fd_set *in, fd_set *ou, fd_set *ex, struct timeval *tv); }
SYS_GETTIMEOFDAY = 418 // { int|sys|50|gettimeofday(struct timeval *tp, void *tzp); }
SYS_SETTIMEOFDAY = 419 // { int|sys|50|settimeofday(const struct timeval *tv, const void *tzp); }
SYS_UTIMES = 420 // { int|sys|50|utimes(const char *path, const struct timeval *tptr); }
SYS_ADJTIME = 421 // { int|sys|50|adjtime(const struct timeval *delta, struct timeval *olddelta); }
SYS_FUTIMES = 423 // { int|sys|50|futimes(int fd, const struct timeval *tptr); }
SYS_LUTIMES = 424 // { int|sys|50|lutimes(const char *path, const struct timeval *tptr); }
SYS_SETITIMER = 425 // { int|sys|50|setitimer(int which, const struct itimerval *itv, struct itimerval *oitv); }
SYS_GETITIMER = 426 // { int|sys|50|getitimer(int which, struct itimerval *itv); }
SYS_CLOCK_GETTIME = 427 // { int|sys|50|clock_gettime(clockid_t clock_id, struct timespec *tp); }
SYS_CLOCK_SETTIME = 428 // { int|sys|50|clock_settime(clockid_t clock_id, const struct timespec *tp); }
SYS_CLOCK_GETRES = 429 // { int|sys|50|clock_getres(clockid_t clock_id, struct timespec *tp); }
SYS_NANOSLEEP = 430 // { int|sys|50|nanosleep(const struct timespec *rqtp, struct timespec *rmtp); }
SYS___SIGTIMEDWAIT = 431 // { int|sys|50|__sigtimedwait(const sigset_t *set, siginfo_t *info, struct timespec *timeout); }
SYS__LWP_PARK = 434 // { int|sys|50|_lwp_park(const struct timespec *ts, lwpid_t unpark, const void *hint, const void *unparkhint); }
SYS_KEVENT = 435 // { int|sys|50|kevent(int fd, const struct kevent *changelist, size_t nchanges, struct kevent *eventlist, size_t nevents, const struct timespec *timeout); }
SYS_PSELECT = 436 // { int|sys|50|pselect(int nd, fd_set *in, fd_set *ou, fd_set *ex, const struct timespec *ts, const sigset_t *mask); }
SYS_POLLTS = 437 // { int|sys|50|pollts(struct pollfd *fds, u_int nfds, const struct timespec *ts, const sigset_t *mask); }
SYS_STAT = 439 // { int|sys|50|stat(const char *path, struct stat *ub); }
SYS_FSTAT = 440 // { int|sys|50|fstat(int fd, struct stat *sb); }
SYS_LSTAT = 441 // { int|sys|50|lstat(const char *path, struct stat *ub); }
SYS___SEMCTL = 442 // { int|sys|50|__semctl(int semid, int semnum, int cmd, ... union __semun *arg); }
SYS_SHMCTL = 443 // { int|sys|50|shmctl(int shmid, int cmd, struct shmid_ds *buf); }
SYS_MSGCTL = 444 // { int|sys|50|msgctl(int msqid, int cmd, struct msqid_ds *buf); }
SYS_GETRUSAGE = 445 // { int|sys|50|getrusage(int who, struct rusage *rusage); }
SYS_TIMER_SETTIME = 446 // { int|sys|50|timer_settime(timer_t timerid, int flags, const struct itimerspec *value, struct itimerspec *ovalue); }
SYS_TIMER_GETTIME = 447 // { int|sys|50|timer_gettime(timer_t timerid, struct itimerspec *value); }
SYS_NTP_GETTIME = 448 // { int|sys|50|ntp_gettime(struct ntptimeval *ntvp); }
SYS_WAIT4 = 449 // { int|sys|50|wait4(pid_t pid, int *status, int options, struct rusage *rusage); }
SYS_MKNOD = 450 // { int|sys|50|mknod(const char *path, mode_t mode, dev_t dev); }
SYS_FHSTAT = 451 // { int|sys|50|fhstat(const void *fhp, size_t fh_size, struct stat *sb); }
SYS_PIPE2 = 453 // { int|sys||pipe2(int *fildes, int flags); }
SYS_DUP3 = 454 // { int|sys||dup3(int from, int to, int flags); }
SYS_KQUEUE1 = 455 // { int|sys||kqueue1(int flags); }
SYS_PACCEPT = 456 // { int|sys||paccept(int s, struct sockaddr *name, socklen_t *anamelen, const sigset_t *mask, int flags); }
SYS_LINKAT = 457 // { int|sys||linkat(int fd1, const char *name1, int fd2, const char *name2, int flags); }
SYS_RENAMEAT = 458 // { int|sys||renameat(int fromfd, const char *from, int tofd, const char *to); }
SYS_MKFIFOAT = 459 // { int|sys||mkfifoat(int fd, const char *path, mode_t mode); }
SYS_MKNODAT = 460 // { int|sys||mknodat(int fd, const char *path, mode_t mode, uint32_t dev); }
SYS_MKDIRAT = 461 // { int|sys||mkdirat(int fd, const char *path, mode_t mode); }
SYS_FACCESSAT = 462 // { int|sys||faccessat(int fd, const char *path, int amode, int flag); }
SYS_FCHMODAT = 463 // { int|sys||fchmodat(int fd, const char *path, mode_t mode, int flag); }
SYS_FCHOWNAT = 464 // { int|sys||fchownat(int fd, const char *path, uid_t owner, gid_t group, int flag); }
SYS_FEXECVE = 465 // { int|sys||fexecve(int fd, char * const *argp, char * const *envp); }
SYS_FSTATAT = 466 // { int|sys||fstatat(int fd, const char *path, struct stat *buf, int flag); }
SYS_UTIMENSAT = 467 // { int|sys||utimensat(int fd, const char *path, const struct timespec *tptr, int flag); }
SYS_OPENAT = 468 // { int|sys||openat(int fd, const char *path, int oflags, ... mode_t mode); }
SYS_READLINKAT = 469 // { int|sys||readlinkat(int fd, const char *path, char *buf, size_t bufsize); }
SYS_SYMLINKAT = 470 // { int|sys||symlinkat(const char *path1, int fd, const char *path2); }
SYS_UNLINKAT = 471 // { int|sys||unlinkat(int fd, const char *path, int flag); }
SYS_FUTIMENS = 472 // { int|sys||futimens(int fd, const struct timespec *tptr); }
SYS___QUOTACTL = 473 // { int|sys||__quotactl(const char *path, struct quotactl_args *args); }
SYS_POSIX_SPAWN = 474 // { int|sys||posix_spawn(pid_t *pid, const char *path, const struct posix_spawn_file_actions *file_actions, const struct posix_spawnattr *attrp, char *const *argv, char *const *envp); }
SYS_RECVMMSG = 475 // { int|sys||recvmmsg(int s, struct mmsghdr *mmsg, unsigned int vlen, unsigned int flags, struct timespec *timeout); }
SYS_SENDMMSG = 476 // { int|sys||sendmmsg(int s, struct mmsghdr *mmsg, unsigned int vlen, unsigned int flags); }
)
| _vendor/src/golang.org/x/sys/unix/zsysnum_netbsd_amd64.go | 0 | https://github.com/pingcap/tidb/commit/b35e024ace8a525b61fd53eae22e9723d06454ca | [
0.0012295861961320043,
0.00028736350941471756,
0.00016616626817267388,
0.00017944305727723986,
0.00023713665723334998
] |
{
"id": 1,
"code_window": [
"\t\tbaseExecutor: newBaseExecutor(nil, b.ctx),\n",
"\t\tName: v.Name,\n",
"\t}\n",
"}\n",
"\n",
"func (b *executorBuilder) buildSelectLock(v *plan.PhysicalLock) Executor {\n",
"\tsrc := b.build(v.Children()[0])\n"
],
"labels": [
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\te.supportChk = true\n",
"\treturn e\n"
],
"file_path": "executor/builder.go",
"type": "add",
"edit_start_line_idx": 211
} | // Copyright 2015 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package executor
import (
"math"
"sort"
"sync"
"time"
"github.com/cznic/sortutil"
"github.com/juju/errors"
"github.com/pingcap/tidb/ast"
"github.com/pingcap/tidb/context"
"github.com/pingcap/tidb/distsql"
"github.com/pingcap/tidb/domain"
"github.com/pingcap/tidb/expression"
"github.com/pingcap/tidb/infoschema"
"github.com/pingcap/tidb/model"
"github.com/pingcap/tidb/plan"
"github.com/pingcap/tidb/table"
"github.com/pingcap/tidb/types"
"github.com/pingcap/tidb/util/admin"
"github.com/pingcap/tipb/go-tipb"
goctx "golang.org/x/net/context"
)
// executorBuilder builds an Executor from a Plan.
// The InfoSchema must not change during execution.
type executorBuilder struct {
ctx context.Context
is infoschema.InfoSchema
priority int
startTS uint64 // cached when the first time getStartTS() is called
// err is set when there is error happened during Executor building process.
err error
}
func newExecutorBuilder(ctx context.Context, is infoschema.InfoSchema, priority int) *executorBuilder {
return &executorBuilder{
ctx: ctx,
is: is,
priority: priority,
}
}
func (b *executorBuilder) build(p plan.Plan) Executor {
switch v := p.(type) {
case nil:
return nil
case *plan.CheckTable:
return b.buildCheckTable(v)
case *plan.DDL:
return b.buildDDL(v)
case *plan.Deallocate:
return b.buildDeallocate(v)
case *plan.Delete:
return b.buildDelete(v)
case *plan.Execute:
return b.buildExecute(v)
case *plan.Explain:
return b.buildExplain(v)
case *plan.Insert:
return b.buildInsert(v)
case *plan.LoadData:
return b.buildLoadData(v)
case *plan.PhysicalLimit:
return b.buildLimit(v)
case *plan.Prepare:
return b.buildPrepare(v)
case *plan.PhysicalLock:
return b.buildSelectLock(v)
case *plan.CancelDDLJobs:
return b.buildCancelDDLJobs(v)
case *plan.ShowDDL:
return b.buildShowDDL(v)
case *plan.ShowDDLJobs:
return b.buildShowDDLJobs(v)
case *plan.Show:
return b.buildShow(v)
case *plan.Simple:
return b.buildSimple(v)
case *plan.Set:
return b.buildSet(v)
case *plan.PhysicalSort:
return b.buildSort(v)
case *plan.PhysicalTopN:
return b.buildTopN(v)
case *plan.PhysicalUnionAll:
return b.buildUnionAll(v)
case *plan.Update:
return b.buildUpdate(v)
case *plan.PhysicalUnionScan:
return b.buildUnionScanExec(v)
case *plan.PhysicalHashJoin:
return b.buildHashJoin(v)
case *plan.PhysicalMergeJoin:
return b.buildMergeJoin(v)
case *plan.PhysicalHashSemiJoin:
return b.buildSemiJoin(v)
case *plan.PhysicalIndexJoin:
return b.buildIndexLookUpJoin(v)
case *plan.PhysicalSelection:
return b.buildSelection(v)
case *plan.PhysicalHashAgg:
return b.buildHashAgg(v)
case *plan.PhysicalStreamAgg:
return b.buildStreamAgg(v)
case *plan.PhysicalProjection:
return b.buildProjection(v)
case *plan.PhysicalMemTable:
return b.buildMemTable(v)
case *plan.PhysicalTableDual:
return b.buildTableDual(v)
case *plan.PhysicalApply:
return b.buildApply(v)
case *plan.PhysicalExists:
return b.buildExists(v)
case *plan.PhysicalMaxOneRow:
return b.buildMaxOneRow(v)
case *plan.Analyze:
return b.buildAnalyze(v)
case *plan.PhysicalTableReader:
return b.buildTableReader(v)
case *plan.PhysicalIndexReader:
return b.buildIndexReader(v)
case *plan.PhysicalIndexLookUpReader:
return b.buildIndexLookUpReader(v)
default:
b.err = ErrUnknownPlan.Gen("Unknown Plan %T", p)
return nil
}
}
func (b *executorBuilder) buildCancelDDLJobs(v *plan.CancelDDLJobs) Executor {
e := &CancelDDLJobsExec{
baseExecutor: newBaseExecutor(v.Schema(), b.ctx),
jobIDs: v.JobIDs,
}
e.errs, b.err = admin.CancelJobs(e.ctx.Txn(), e.jobIDs)
if b.err != nil {
b.err = errors.Trace(b.err)
return nil
}
e.supportChk = true
return e
}
func (b *executorBuilder) buildShowDDL(v *plan.ShowDDL) Executor {
// We get DDLInfo here because for Executors that returns result set,
// next will be called after transaction has been committed.
// We need the transaction to get DDLInfo.
e := &ShowDDLExec{
baseExecutor: newBaseExecutor(v.Schema(), b.ctx),
}
var err error
ownerManager := domain.GetDomain(e.ctx).DDL().OwnerManager()
ctx, cancel := goctx.WithTimeout(goctx.Background(), 3*time.Second)
e.ddlOwnerID, err = ownerManager.GetOwnerID(ctx)
cancel()
if err != nil {
b.err = errors.Trace(err)
return nil
}
ddlInfo, err := admin.GetDDLInfo(e.ctx.Txn())
if err != nil {
b.err = errors.Trace(err)
return nil
}
e.ddlInfo = ddlInfo
e.selfID = ownerManager.ID()
e.supportChk = true
return e
}
func (b *executorBuilder) buildShowDDLJobs(v *plan.ShowDDLJobs) Executor {
e := &ShowDDLJobsExec{
baseExecutor: newBaseExecutor(v.Schema(), b.ctx),
}
e.supportChk = true
return e
}
func (b *executorBuilder) buildCheckTable(v *plan.CheckTable) Executor {
e := &CheckTableExec{
baseExecutor: newBaseExecutor(v.Schema(), b.ctx),
tables: v.Tables,
is: b.is,
}
e.supportChk = true
return e
}
func (b *executorBuilder) buildDeallocate(v *plan.Deallocate) Executor {
return &DeallocateExec{
baseExecutor: newBaseExecutor(nil, b.ctx),
Name: v.Name,
}
}
func (b *executorBuilder) buildSelectLock(v *plan.PhysicalLock) Executor {
src := b.build(v.Children()[0])
if b.err != nil {
b.err = errors.Trace(b.err)
return nil
}
if !b.ctx.GetSessionVars().InTxn() {
// Locking of rows for update using SELECT FOR UPDATE only applies when autocommit
// is disabled (either by beginning transaction with START TRANSACTION or by setting
// autocommit to 0. If autocommit is enabled, the rows matching the specification are not locked.
// See https://dev.mysql.com/doc/refman/5.7/en/innodb-locking-reads.html
return src
}
e := &SelectLockExec{
baseExecutor: newBaseExecutor(v.Schema(), b.ctx, src),
Lock: v.Lock,
}
e.supportChk = true
return e
}
func (b *executorBuilder) buildLimit(v *plan.PhysicalLimit) Executor {
childExec := b.build(v.Children()[0])
if b.err != nil {
b.err = errors.Trace(b.err)
return nil
}
e := &LimitExec{
baseExecutor: newBaseExecutor(v.Schema(), b.ctx, childExec),
begin: v.Offset,
end: v.Offset + v.Count,
}
e.supportChk = true
return e
}
func (b *executorBuilder) buildPrepare(v *plan.Prepare) Executor {
e := &PrepareExec{
baseExecutor: newBaseExecutor(v.Schema(), b.ctx),
is: b.is,
name: v.Name,
sqlText: v.SQLText,
}
e.supportChk = true
return e
}
func (b *executorBuilder) buildExecute(v *plan.Execute) Executor {
return &ExecuteExec{
baseExecutor: newBaseExecutor(nil, b.ctx),
IS: b.is,
Name: v.Name,
UsingVars: v.UsingVars,
ID: v.ExecID,
Stmt: v.Stmt,
Plan: v.Plan,
}
}
func (b *executorBuilder) buildShow(v *plan.Show) Executor {
e := &ShowExec{
baseExecutor: newBaseExecutor(v.Schema(), b.ctx),
Tp: v.Tp,
DBName: model.NewCIStr(v.DBName),
Table: v.Table,
Column: v.Column,
User: v.User,
Flag: v.Flag,
Full: v.Full,
GlobalScope: v.GlobalScope,
is: b.is,
}
if e.Tp == ast.ShowGrants && e.User == nil {
e.User = e.ctx.GetSessionVars().User
}
if len(v.Conditions) == 0 {
return e
}
sel := &SelectionExec{
baseExecutor: newBaseExecutor(v.Schema(), b.ctx, e),
filters: v.Conditions,
}
return sel
}
func (b *executorBuilder) buildSimple(v *plan.Simple) Executor {
switch s := v.Statement.(type) {
case *ast.GrantStmt:
return b.buildGrant(s)
case *ast.RevokeStmt:
return b.buildRevoke(s)
}
return &SimpleExec{
baseExecutor: newBaseExecutor(v.Schema(), b.ctx),
Statement: v.Statement,
is: b.is,
}
}
func (b *executorBuilder) buildSet(v *plan.Set) Executor {
e := &SetExecutor{
baseExecutor: newBaseExecutor(v.Schema(), b.ctx),
vars: v.VarAssigns,
}
e.supportChk = true
return e
}
func (b *executorBuilder) buildInsert(v *plan.Insert) Executor {
ivs := &InsertValues{
baseExecutor: newBaseExecutor(nil, b.ctx),
Columns: v.Columns,
Lists: v.Lists,
Setlist: v.Setlist,
GenColumns: v.GenCols.Columns,
GenExprs: v.GenCols.Exprs,
needFillDefaultValues: v.NeedFillDefaultValue,
}
ivs.SelectExec = b.build(v.SelectPlan)
if b.err != nil {
b.err = errors.Trace(b.err)
return nil
}
ivs.Table = v.Table
if v.IsReplace {
return b.buildReplace(ivs)
}
insert := &InsertExec{
InsertValues: ivs,
OnDuplicate: append(v.OnDuplicate, v.GenCols.OnDuplicates...),
Priority: v.Priority,
IgnoreErr: v.IgnoreErr,
}
insert.supportChk = true
return insert
}
func (b *executorBuilder) buildLoadData(v *plan.LoadData) Executor {
tbl, ok := b.is.TableByID(v.Table.TableInfo.ID)
if !ok {
b.err = errors.Errorf("Can not get table %d", v.Table.TableInfo.ID)
return nil
}
insertVal := &InsertValues{
baseExecutor: newBaseExecutor(nil, b.ctx),
Table: tbl,
Columns: v.Columns,
GenColumns: v.GenCols.Columns,
GenExprs: v.GenCols.Exprs,
}
tableCols := tbl.Cols()
columns, err := insertVal.getColumns(tableCols)
if err != nil {
b.err = errors.Trace(err)
return nil
}
return &LoadData{
baseExecutor: newBaseExecutor(nil, b.ctx),
IsLocal: v.IsLocal,
loadDataInfo: &LoadDataInfo{
row: make([]types.Datum, len(columns)),
insertVal: insertVal,
Path: v.Path,
Table: tbl,
FieldsInfo: v.FieldsInfo,
LinesInfo: v.LinesInfo,
Ctx: b.ctx,
columns: columns,
},
}
}
func (b *executorBuilder) buildReplace(vals *InsertValues) Executor {
return &ReplaceExec{
InsertValues: vals,
}
}
func (b *executorBuilder) buildGrant(grant *ast.GrantStmt) Executor {
return &GrantExec{
baseExecutor: newBaseExecutor(nil, b.ctx),
Privs: grant.Privs,
ObjectType: grant.ObjectType,
Level: grant.Level,
Users: grant.Users,
WithGrant: grant.WithGrant,
is: b.is,
}
}
func (b *executorBuilder) buildRevoke(revoke *ast.RevokeStmt) Executor {
e := &RevokeExec{
ctx: b.ctx,
Privs: revoke.Privs,
ObjectType: revoke.ObjectType,
Level: revoke.Level,
Users: revoke.Users,
is: b.is,
}
e.supportChk = true
return e
}
func (b *executorBuilder) buildDDL(v *plan.DDL) Executor {
e := &DDLExec{
baseExecutor: newBaseExecutor(v.Schema(), b.ctx),
stmt: v.Statement,
is: b.is,
}
e.supportChk = true
return e
}
func (b *executorBuilder) buildExplain(v *plan.Explain) Executor {
e := &ExplainExec{
baseExecutor: newBaseExecutor(v.Schema(), b.ctx),
}
e.rows = make([][]string, 0, len(v.Rows))
for _, row := range v.Rows {
e.rows = append(e.rows, row)
}
e.supportChk = true
return e
}
func (b *executorBuilder) buildUnionScanExec(v *plan.PhysicalUnionScan) Executor {
src := b.build(v.Children()[0])
if b.err != nil {
return nil
}
us := &UnionScanExec{baseExecutor: newBaseExecutor(v.Schema(), b.ctx, src)}
// Get the handle column index of the below plan.
// We can guarantee that there must be only one col in the map.
for _, cols := range v.Children()[0].Schema().TblID2Handle {
us.belowHandleIndex = cols[0].Index
}
switch x := src.(type) {
case *TableReaderExecutor:
us.desc = x.desc
us.dirty = getDirtyDB(b.ctx).getDirtyTable(x.table.Meta().ID)
us.conditions = v.Conditions
us.columns = x.columns
b.err = us.buildAndSortAddedRows(x.table)
case *IndexReaderExecutor:
us.desc = x.desc
for _, ic := range x.index.Columns {
for i, col := range x.schema.Columns {
if col.ColName.L == ic.Name.L {
us.usedIndex = append(us.usedIndex, i)
break
}
}
}
us.dirty = getDirtyDB(b.ctx).getDirtyTable(x.table.Meta().ID)
us.conditions = v.Conditions
us.columns = x.columns
b.err = us.buildAndSortAddedRows(x.table)
case *IndexLookUpExecutor:
us.desc = x.desc
for _, ic := range x.index.Columns {
for i, col := range x.schema.Columns {
if col.ColName.L == ic.Name.L {
us.usedIndex = append(us.usedIndex, i)
break
}
}
}
us.dirty = getDirtyDB(b.ctx).getDirtyTable(x.table.Meta().ID)
us.conditions = v.Conditions
us.columns = x.columns
b.err = us.buildAndSortAddedRows(x.table)
default:
// The mem table will not be written by sql directly, so we can omit the union scan to avoid err reporting.
return src
}
if b.err != nil {
return nil
}
return us
}
// buildMergeJoin builds MergeJoinExec executor.
func (b *executorBuilder) buildMergeJoin(v *plan.PhysicalMergeJoin) Executor {
leftExec := b.build(v.Children()[0])
if b.err != nil {
b.err = errors.Trace(b.err)
return nil
}
rightExec := b.build(v.Children()[1])
if b.err != nil {
b.err = errors.Trace(b.err)
return nil
}
leftKeys := make([]*expression.Column, 0, len(v.EqualConditions))
rightKeys := make([]*expression.Column, 0, len(v.EqualConditions))
for _, eqCond := range v.EqualConditions {
if len(eqCond.GetArgs()) != 2 {
b.err = errors.Annotate(ErrBuildExecutor, "invalid join key for equal condition")
return nil
}
leftKey, ok := eqCond.GetArgs()[0].(*expression.Column)
if !ok {
b.err = errors.Annotate(ErrBuildExecutor, "left side of join key must be column for merge join")
return nil
}
rightKey, ok := eqCond.GetArgs()[1].(*expression.Column)
if !ok {
b.err = errors.Annotate(ErrBuildExecutor, "right side of join key must be column for merge join")
return nil
}
leftKeys = append(leftKeys, leftKey)
rightKeys = append(rightKeys, rightKey)
}
leftRowBlock := &rowBlockIterator{
ctx: b.ctx,
reader: leftExec,
filter: v.LeftConditions,
joinKeys: leftKeys,
}
rightRowBlock := &rowBlockIterator{
ctx: b.ctx,
reader: rightExec,
filter: v.RightConditions,
joinKeys: rightKeys,
}
defaultValues := v.DefaultValues
if defaultValues == nil {
defaultValues = make([]types.Datum, rightExec.Schema().Len())
}
e := &MergeJoinExec{
baseExecutor: newBaseExecutor(v.Schema(), b.ctx, leftExec, rightExec),
resultGenerator: newJoinResultGenerator(b.ctx, v.JoinType, false, defaultValues, v.OtherConditions, nil, nil),
stmtCtx: b.ctx.GetSessionVars().StmtCtx,
// left is the outer side by default.
outerKeys: leftKeys,
innerKeys: rightKeys,
outerIter: leftRowBlock,
innerIter: rightRowBlock,
}
if v.JoinType == plan.RightOuterJoin {
e.outerKeys, e.innerKeys = e.innerKeys, e.outerKeys
e.outerIter, e.innerIter = e.innerIter, e.outerIter
}
if v.JoinType != plan.InnerJoin {
e.outerFilter = e.outerIter.filter
e.outerIter.filter = nil
}
return e
}
func (b *executorBuilder) buildHashJoin(v *plan.PhysicalHashJoin) Executor {
leftHashKey := make([]*expression.Column, 0, len(v.EqualConditions))
rightHashKey := make([]*expression.Column, 0, len(v.EqualConditions))
for _, eqCond := range v.EqualConditions {
ln, _ := eqCond.GetArgs()[0].(*expression.Column)
rn, _ := eqCond.GetArgs()[1].(*expression.Column)
leftHashKey = append(leftHashKey, ln)
rightHashKey = append(rightHashKey, rn)
}
leftExec := b.build(v.Children()[0])
rightExec := b.build(v.Children()[1])
// for hash join, inner table is always the smaller one.
e := &HashJoinExec{
baseExecutor: newBaseExecutor(v.Schema(), b.ctx, leftExec, rightExec),
concurrency: v.Concurrency,
joinType: v.JoinType,
}
defaultValues := v.DefaultValues
if v.SmallChildIdx == 0 {
e.innerExec = leftExec
e.outerExec = rightExec
e.innerFilter = v.LeftConditions
e.outerFilter = v.RightConditions
e.innerKeys = leftHashKey
e.outerKeys = rightHashKey
if defaultValues == nil {
defaultValues = make([]types.Datum, e.innerExec.Schema().Len())
}
e.resultGenerator = newJoinResultGenerator(b.ctx, v.JoinType, v.SmallChildIdx == 0, defaultValues,
v.OtherConditions, nil, nil)
} else {
e.innerExec = rightExec
e.outerExec = leftExec
e.innerFilter = v.RightConditions
e.outerFilter = v.LeftConditions
e.innerKeys = rightHashKey
e.outerKeys = leftHashKey
if defaultValues == nil {
defaultValues = make([]types.Datum, e.innerExec.Schema().Len())
}
e.resultGenerator = newJoinResultGenerator(b.ctx, v.JoinType, v.SmallChildIdx == 0,
defaultValues, v.OtherConditions, nil, nil)
}
return e
}
func (b *executorBuilder) buildSemiJoin(v *plan.PhysicalHashSemiJoin) *HashSemiJoinExec {
leftHashKey := make([]*expression.Column, 0, len(v.EqualConditions))
rightHashKey := make([]*expression.Column, 0, len(v.EqualConditions))
for _, eqCond := range v.EqualConditions {
ln, _ := eqCond.GetArgs()[0].(*expression.Column)
rn, _ := eqCond.GetArgs()[1].(*expression.Column)
leftHashKey = append(leftHashKey, ln)
rightHashKey = append(rightHashKey, rn)
}
e := &HashSemiJoinExec{
baseExecutor: newBaseExecutor(v.Schema(), b.ctx),
otherFilter: v.OtherConditions,
bigFilter: v.LeftConditions,
smallFilter: v.RightConditions,
bigExec: b.build(v.Children()[0]),
smallExec: b.build(v.Children()[1]),
prepared: false,
bigHashKey: leftHashKey,
smallHashKey: rightHashKey,
auxMode: v.WithAux,
anti: v.Anti,
}
return e
}
func (b *executorBuilder) buildHashAgg(v *plan.PhysicalHashAgg) Executor {
src := b.build(v.Children()[0])
if b.err != nil {
b.err = errors.Trace(b.err)
return nil
}
return &HashAggExec{
baseExecutor: newBaseExecutor(v.Schema(), b.ctx, src),
sc: b.ctx.GetSessionVars().StmtCtx,
AggFuncs: v.AggFuncs,
GroupByItems: v.GroupByItems,
}
}
func (b *executorBuilder) buildStreamAgg(v *plan.PhysicalStreamAgg) Executor {
src := b.build(v.Children()[0])
if b.err != nil {
b.err = errors.Trace(b.err)
return nil
}
return &StreamAggExec{
baseExecutor: newBaseExecutor(v.Schema(), b.ctx, src),
StmtCtx: b.ctx.GetSessionVars().StmtCtx,
AggFuncs: v.AggFuncs,
GroupByItems: v.GroupByItems,
}
}
func (b *executorBuilder) buildSelection(v *plan.PhysicalSelection) Executor {
childExec := b.build(v.Children()[0])
if b.err != nil {
b.err = errors.Trace(b.err)
return nil
}
e := &SelectionExec{
baseExecutor: newBaseExecutor(v.Schema(), b.ctx, childExec),
filters: v.Conditions,
}
e.supportChk = true
return e
}
func (b *executorBuilder) buildProjection(v *plan.PhysicalProjection) Executor {
childExec := b.build(v.Children()[0])
if b.err != nil {
b.err = errors.Trace(b.err)
return nil
}
e := &ProjectionExec{
baseExecutor: newBaseExecutor(v.Schema(), b.ctx, childExec),
exprs: v.Exprs,
}
e.baseExecutor.supportChk = true
return e
}
func (b *executorBuilder) buildTableDual(v *plan.PhysicalTableDual) Executor {
return &TableDualExec{
baseExecutor: newBaseExecutor(v.Schema(), b.ctx),
rowCount: v.RowCount,
}
}
func (b *executorBuilder) getStartTS() uint64 {
if b.startTS != 0 {
// Return the cached value.
return b.startTS
}
startTS := b.ctx.GetSessionVars().SnapshotTS
if startTS == 0 {
startTS = b.ctx.Txn().StartTS()
}
b.startTS = startTS
return startTS
}
func (b *executorBuilder) buildMemTable(v *plan.PhysicalMemTable) Executor {
tb, _ := b.is.TableByID(v.Table.ID)
ts := &TableScanExec{
baseExecutor: newBaseExecutor(v.Schema(), b.ctx),
t: tb,
columns: v.Columns,
seekHandle: math.MinInt64,
ranges: v.Ranges,
isVirtualTable: tb.Type() == table.VirtualTable,
}
return ts
}
func (b *executorBuilder) buildSort(v *plan.PhysicalSort) Executor {
childExec := b.build(v.Children()[0])
if b.err != nil {
b.err = errors.Trace(b.err)
return nil
}
sortExec := SortExec{
baseExecutor: newBaseExecutor(v.Schema(), b.ctx, childExec),
ByItems: v.ByItems,
schema: v.Schema(),
}
sortExec.supportChk = true
return &sortExec
}
func (b *executorBuilder) buildTopN(v *plan.PhysicalTopN) Executor {
childExec := b.build(v.Children()[0])
if b.err != nil {
b.err = errors.Trace(b.err)
return nil
}
sortExec := SortExec{
baseExecutor: newBaseExecutor(v.Schema(), b.ctx, childExec),
ByItems: v.ByItems,
schema: v.Schema(),
}
sortExec.supportChk = true
return &TopNExec{
SortExec: sortExec,
limit: &plan.PhysicalLimit{Count: v.Count, Offset: v.Offset},
}
}
func (b *executorBuilder) buildNestedLoopJoin(v *plan.PhysicalHashJoin) *NestedLoopJoinExec {
for _, cond := range v.EqualConditions {
cond.GetArgs()[0].(*expression.Column).ResolveIndices(v.Schema())
cond.GetArgs()[1].(*expression.Column).ResolveIndices(v.Schema())
}
defaultValues := v.DefaultValues
if v.SmallChildIdx == 1 {
if defaultValues == nil {
defaultValues = make([]types.Datum, v.Children()[1].Schema().Len())
}
return &NestedLoopJoinExec{
baseExecutor: newBaseExecutor(v.Schema(), b.ctx),
SmallExec: b.build(v.Children()[1]),
BigExec: b.build(v.Children()[0]),
BigFilter: v.LeftConditions,
SmallFilter: v.RightConditions,
OtherFilter: append(expression.ScalarFuncs2Exprs(v.EqualConditions), v.OtherConditions...),
outer: v.JoinType != plan.InnerJoin,
defaultValues: defaultValues,
}
}
if defaultValues == nil {
defaultValues = make([]types.Datum, v.Children()[0].Schema().Len())
}
return &NestedLoopJoinExec{
baseExecutor: newBaseExecutor(v.Schema(), b.ctx),
SmallExec: b.build(v.Children()[0]),
BigExec: b.build(v.Children()[1]),
leftSmall: true,
BigFilter: v.RightConditions,
SmallFilter: v.LeftConditions,
OtherFilter: append(expression.ScalarFuncs2Exprs(v.EqualConditions), v.OtherConditions...),
outer: v.JoinType != plan.InnerJoin,
defaultValues: defaultValues,
}
}
func (b *executorBuilder) buildApply(v *plan.PhysicalApply) Executor {
var join joinExec
switch x := v.PhysicalJoin.(type) {
case *plan.PhysicalHashSemiJoin:
join = b.buildSemiJoin(x)
case *plan.PhysicalHashJoin:
if x.JoinType == plan.InnerJoin || x.JoinType == plan.LeftOuterJoin || x.JoinType == plan.RightOuterJoin {
join = b.buildNestedLoopJoin(x)
} else {
b.err = errors.Errorf("Unsupported join type %v in nested loop join", x.JoinType)
}
default:
b.err = errors.Errorf("Unsupported plan type %T in apply", v)
}
apply := &ApplyJoinExec{
baseExecutor: newBaseExecutor(v.Schema(), b.ctx),
join: join,
outerSchema: v.OuterSchema,
}
return apply
}
func (b *executorBuilder) buildExists(v *plan.PhysicalExists) Executor {
childExec := b.build(v.Children()[0])
if b.err != nil {
b.err = errors.Trace(b.err)
return nil
}
e := &ExistsExec{
baseExecutor: newBaseExecutor(v.Schema(), b.ctx, childExec),
}
e.supportChk = true
return e
}
func (b *executorBuilder) buildMaxOneRow(v *plan.PhysicalMaxOneRow) Executor {
childExec := b.build(v.Children()[0])
if b.err != nil {
b.err = errors.Trace(b.err)
return nil
}
return &MaxOneRowExec{
baseExecutor: newBaseExecutor(v.Schema(), b.ctx, childExec),
}
}
func (b *executorBuilder) buildUnionAll(v *plan.PhysicalUnionAll) Executor {
childExecs := make([]Executor, len(v.Children()))
for i, child := range v.Children() {
childExecs[i] = b.build(child)
if b.err != nil {
b.err = errors.Trace(b.err)
return nil
}
}
e := &UnionExec{
baseExecutor: newBaseExecutor(v.Schema(), b.ctx, childExecs...),
}
e.supportChk = true
return e
}
func (b *executorBuilder) buildUpdate(v *plan.Update) Executor {
tblID2table := make(map[int64]table.Table)
for id := range v.SelectPlan.Schema().TblID2Handle {
tblID2table[id], _ = b.is.TableByID(id)
}
selExec := b.build(v.SelectPlan)
if b.err != nil {
b.err = errors.Trace(b.err)
return nil
}
updateExec := &UpdateExec{
baseExecutor: newBaseExecutor(nil, b.ctx, selExec),
SelectExec: selExec,
OrderedList: v.OrderedList,
tblID2table: tblID2table,
IgnoreErr: v.IgnoreErr,
}
updateExec.supportChk = true
return updateExec
}
func (b *executorBuilder) buildDelete(v *plan.Delete) Executor {
tblID2table := make(map[int64]table.Table)
for id := range v.SelectPlan.Schema().TblID2Handle {
tblID2table[id], _ = b.is.TableByID(id)
}
selExec := b.build(v.SelectPlan)
if b.err != nil {
b.err = errors.Trace(b.err)
return nil
}
return &DeleteExec{
baseExecutor: newBaseExecutor(nil, b.ctx),
SelectExec: selExec,
Tables: v.Tables,
IsMultiTable: v.IsMultiTable,
tblID2Table: tblID2table,
}
}
func (b *executorBuilder) buildAnalyzeIndexPushdown(task plan.AnalyzeIndexTask) *AnalyzeIndexExec {
e := &AnalyzeIndexExec{
ctx: b.ctx,
tblInfo: task.TableInfo,
idxInfo: task.IndexInfo,
concurrency: b.ctx.GetSessionVars().IndexSerialScanConcurrency,
priority: b.priority,
analyzePB: &tipb.AnalyzeReq{
Tp: tipb.AnalyzeType_TypeIndex,
StartTs: math.MaxUint64,
Flags: statementContextToFlags(b.ctx.GetSessionVars().StmtCtx),
TimeZoneOffset: timeZoneOffset(b.ctx),
},
}
e.analyzePB.IdxReq = &tipb.AnalyzeIndexReq{
BucketSize: maxBucketSize,
NumColumns: int32(len(task.IndexInfo.Columns)),
}
if !task.IndexInfo.Unique {
depth := int32(defaultCMSketchDepth)
width := int32(defaultCMSketchWidth)
e.analyzePB.IdxReq.CmsketchDepth = &depth
e.analyzePB.IdxReq.CmsketchWidth = &width
}
return e
}
func (b *executorBuilder) buildAnalyzeColumnsPushdown(task plan.AnalyzeColumnsTask) *AnalyzeColumnsExec {
cols := task.ColsInfo
keepOrder := false
if task.PKInfo != nil {
keepOrder = true
cols = append([]*model.ColumnInfo{task.PKInfo}, cols...)
}
e := &AnalyzeColumnsExec{
ctx: b.ctx,
tblInfo: task.TableInfo,
colsInfo: task.ColsInfo,
pkInfo: task.PKInfo,
concurrency: b.ctx.GetSessionVars().DistSQLScanConcurrency,
priority: b.priority,
keepOrder: keepOrder,
analyzePB: &tipb.AnalyzeReq{
Tp: tipb.AnalyzeType_TypeColumn,
StartTs: math.MaxUint64,
Flags: statementContextToFlags(b.ctx.GetSessionVars().StmtCtx),
TimeZoneOffset: timeZoneOffset(b.ctx),
},
}
depth := int32(defaultCMSketchDepth)
width := int32(defaultCMSketchWidth)
e.analyzePB.ColReq = &tipb.AnalyzeColumnsReq{
BucketSize: maxBucketSize,
SampleSize: maxRegionSampleSize,
SketchSize: maxSketchSize,
ColumnsInfo: distsql.ColumnsToProto(cols, task.TableInfo.PKIsHandle),
CmsketchDepth: &depth,
CmsketchWidth: &width,
}
b.err = setPBColumnsDefaultValue(b.ctx, e.analyzePB.ColReq.ColumnsInfo, cols)
return e
}
func (b *executorBuilder) buildAnalyze(v *plan.Analyze) Executor {
e := &AnalyzeExec{
baseExecutor: newBaseExecutor(nil, b.ctx),
tasks: make([]*analyzeTask, 0, len(v.Children())),
}
for _, task := range v.ColTasks {
e.tasks = append(e.tasks, &analyzeTask{
taskType: colTask,
colExec: b.buildAnalyzeColumnsPushdown(task),
})
}
for _, task := range v.IdxTasks {
e.tasks = append(e.tasks, &analyzeTask{
taskType: idxTask,
idxExec: b.buildAnalyzeIndexPushdown(task),
})
}
return e
}
func (b *executorBuilder) constructDAGReq(plans []plan.PhysicalPlan) (*tipb.DAGRequest, error) {
dagReq := &tipb.DAGRequest{}
dagReq.StartTs = b.getStartTS()
dagReq.TimeZoneOffset = timeZoneOffset(b.ctx)
sc := b.ctx.GetSessionVars().StmtCtx
dagReq.Flags = statementContextToFlags(sc)
for _, p := range plans {
execPB, err := p.ToPB(b.ctx)
if err != nil {
return nil, errors.Trace(err)
}
dagReq.Executors = append(dagReq.Executors, execPB)
}
return dagReq, nil
}
func (b *executorBuilder) buildIndexLookUpJoin(v *plan.PhysicalIndexJoin) Executor {
outerExec := b.build(v.Children()[v.OuterIndex])
if b.err != nil {
b.err = errors.Trace(b.err)
return nil
}
if outerExec.supportChunk() {
// All inner data reader supports chunk(TableReader, IndexReader, IndexLookUpReadfer),
// we only need to check outer.
return b.buildNewIndexLookUpJoin(v, outerExec)
}
batchSize := 1
if !v.KeepOrder {
batchSize = b.ctx.GetSessionVars().IndexJoinBatchSize
}
innerExecBuilder := &dataReaderBuilder{v.Children()[1-v.OuterIndex], b}
defaultValues := v.DefaultValues
if defaultValues == nil {
defaultValues = make([]types.Datum, innerExecBuilder.Schema().Len())
}
return &IndexLookUpJoin{
baseExecutor: newBaseExecutor(v.Schema(), b.ctx, outerExec),
outerExec: outerExec,
innerExecBuilder: innerExecBuilder,
outerKeys: v.OuterJoinKeys,
innerKeys: v.InnerJoinKeys,
outerFilter: v.LeftConditions,
innerFilter: v.RightConditions,
resultGenerator: newJoinResultGenerator(b.ctx, v.JoinType, v.OuterIndex == 1, defaultValues, v.OtherConditions, nil, nil),
maxBatchSize: batchSize,
}
}
func (b *executorBuilder) buildNewIndexLookUpJoin(v *plan.PhysicalIndexJoin, outerExec Executor) Executor {
outerFilter, innerFilter := v.LeftConditions, v.RightConditions
leftTypes, rightTypes := v.Children()[0].Schema().GetTypes(), v.Children()[1].Schema().GetTypes()
outerTypes, innerTypes := leftTypes, rightTypes
if v.OuterIndex == 1 {
outerFilter, innerFilter = v.RightConditions, v.LeftConditions
outerTypes, innerTypes = rightTypes, leftTypes
}
defaultValues := v.DefaultValues
if defaultValues == nil {
defaultValues = make([]types.Datum, len(innerTypes))
}
innerPlan := v.Children()[1-v.OuterIndex]
e := &NewIndexLookUpJoin{
baseExecutor: newBaseExecutor(v.Schema(), b.ctx, outerExec),
outerCtx: outerCtx{
rowTypes: outerTypes,
filter: outerFilter,
},
innerCtx: innerCtx{
readerBuilder: &dataReaderBuilder{innerPlan, b},
rowTypes: innerTypes,
filter: innerFilter,
},
workerWg: new(sync.WaitGroup),
resultGenerator: newJoinResultGenerator(b.ctx, v.JoinType, v.OuterIndex == 1, defaultValues, v.OtherConditions, leftTypes, rightTypes),
}
e.supportChk = true
outerKeyCols := make([]int, len(v.OuterJoinKeys))
for i := 0; i < len(v.OuterJoinKeys); i++ {
outerKeyCols[i] = v.OuterJoinKeys[i].Index
}
e.outerCtx.keyCols = outerKeyCols
innerKeyCols := make([]int, len(v.InnerJoinKeys))
for i := 0; i < len(v.InnerJoinKeys); i++ {
innerKeyCols[i] = v.InnerJoinKeys[i].Index
}
e.innerCtx.keyCols = innerKeyCols
e.joinResult = e.newChunk()
return e
}
func buildNoRangeTableReader(b *executorBuilder, v *plan.PhysicalTableReader) (*TableReaderExecutor, error) {
dagReq, err := b.constructDAGReq(v.TablePlans)
if err != nil {
return nil, errors.Trace(err)
}
ts := v.TablePlans[0].(*plan.PhysicalTableScan)
table, _ := b.is.TableByID(ts.Table.ID)
e := &TableReaderExecutor{
baseExecutor: newBaseExecutor(v.Schema(), b.ctx),
dagPB: dagReq,
tableID: ts.Table.ID,
table: table,
keepOrder: ts.KeepOrder,
desc: ts.Desc,
columns: ts.Columns,
priority: b.priority,
}
e.baseExecutor.supportChk = true
for i := range v.Schema().Columns {
dagReq.OutputOffsets = append(dagReq.OutputOffsets, uint32(i))
}
return e, nil
}
func (b *executorBuilder) buildTableReader(v *plan.PhysicalTableReader) *TableReaderExecutor {
ret, err := buildNoRangeTableReader(b, v)
if err != nil {
b.err = errors.Trace(err)
return nil
}
ts := v.TablePlans[0].(*plan.PhysicalTableScan)
ret.ranges = ts.Ranges
return ret
}
func buildNoRangeIndexReader(b *executorBuilder, v *plan.PhysicalIndexReader) (*IndexReaderExecutor, error) {
dagReq, err := b.constructDAGReq(v.IndexPlans)
if err != nil {
return nil, errors.Trace(err)
}
is := v.IndexPlans[0].(*plan.PhysicalIndexScan)
table, _ := b.is.TableByID(is.Table.ID)
e := &IndexReaderExecutor{
baseExecutor: newBaseExecutor(v.Schema(), b.ctx),
dagPB: dagReq,
tableID: is.Table.ID,
table: table,
index: is.Index,
keepOrder: !is.OutOfOrder,
desc: is.Desc,
columns: is.Columns,
priority: b.priority,
}
e.supportChk = true
for _, col := range v.OutputColumns {
dagReq.OutputOffsets = append(dagReq.OutputOffsets, uint32(col.Index))
}
return e, nil
}
func (b *executorBuilder) buildIndexReader(v *plan.PhysicalIndexReader) *IndexReaderExecutor {
ret, err := buildNoRangeIndexReader(b, v)
if err != nil {
b.err = errors.Trace(err)
return nil
}
is := v.IndexPlans[0].(*plan.PhysicalIndexScan)
ret.ranges = is.Ranges
return ret
}
func buildNoRangeIndexLookUpReader(b *executorBuilder, v *plan.PhysicalIndexLookUpReader) (*IndexLookUpExecutor, error) {
indexReq, err := b.constructDAGReq(v.IndexPlans)
if err != nil {
return nil, errors.Trace(err)
}
tableReq, err := b.constructDAGReq(v.TablePlans)
if err != nil {
return nil, errors.Trace(err)
}
is := v.IndexPlans[0].(*plan.PhysicalIndexScan)
indexReq.OutputOffsets = []uint32{uint32(len(is.Index.Columns))}
table, _ := b.is.TableByID(is.Table.ID)
for i := 0; i < v.Schema().Len(); i++ {
tableReq.OutputOffsets = append(tableReq.OutputOffsets, uint32(i))
}
e := &IndexLookUpExecutor{
baseExecutor: newBaseExecutor(v.Schema(), b.ctx),
dagPB: indexReq,
tableID: is.Table.ID,
table: table,
index: is.Index,
keepOrder: !is.OutOfOrder,
desc: is.Desc,
tableRequest: tableReq,
columns: is.Columns,
priority: b.priority,
dataReaderBuilder: &dataReaderBuilder{executorBuilder: b},
}
e.supportChk = true
if cols, ok := v.Schema().TblID2Handle[is.Table.ID]; ok {
e.handleIdx = cols[0].Index
}
return e, nil
}
func (b *executorBuilder) buildIndexLookUpReader(v *plan.PhysicalIndexLookUpReader) *IndexLookUpExecutor {
ret, err := buildNoRangeIndexLookUpReader(b, v)
if err != nil {
b.err = errors.Trace(err)
return nil
}
is := v.IndexPlans[0].(*plan.PhysicalIndexScan)
ret.ranges = is.Ranges
return ret
}
// dataReaderBuilder build an executor.
// The executor can be used to read data in the ranges which are constructed by datums.
// Differences from executorBuilder:
// 1. dataReaderBuilder calculate data range from argument, rather than plan.
// 2. the result executor is already opened.
type dataReaderBuilder struct {
plan.Plan
*executorBuilder
}
func (builder *dataReaderBuilder) buildExecutorForDatums(goCtx goctx.Context, datums [][]types.Datum) (Executor, error) {
switch v := builder.Plan.(type) {
case *plan.PhysicalIndexReader:
return builder.buildIndexReaderForDatums(goCtx, v, datums)
case *plan.PhysicalTableReader:
return builder.buildTableReaderForDatums(goCtx, v, datums)
case *plan.PhysicalIndexLookUpReader:
return builder.buildIndexLookUpReaderForDatums(goCtx, v, datums)
}
return nil, errors.New("Wrong plan type for dataReaderBuilder")
}
func (builder *dataReaderBuilder) buildTableReaderForDatums(goCtx goctx.Context, v *plan.PhysicalTableReader, datums [][]types.Datum) (Executor, error) {
e, err := buildNoRangeTableReader(builder.executorBuilder, v)
if err != nil {
return nil, errors.Trace(err)
}
handles := make([]int64, 0, len(datums))
for _, datum := range datums {
handles = append(handles, datum[0].GetInt64())
}
return builder.buildTableReaderFromHandles(goCtx, e, handles)
}
func (builder *dataReaderBuilder) buildTableReaderFromHandles(goCtx goctx.Context, e *TableReaderExecutor, handles []int64) (Executor, error) {
sort.Sort(sortutil.Int64Slice(handles))
var b requestBuilder
kvReq, err := b.SetTableHandles(e.tableID, handles).
SetDAGRequest(e.dagPB).
SetDesc(e.desc).
SetKeepOrder(e.keepOrder).
SetPriority(e.priority).
SetFromSessionVars(e.ctx.GetSessionVars()).
Build()
if err != nil {
return nil, errors.Trace(err)
}
e.result, err = distsql.SelectDAG(goCtx, builder.ctx, kvReq, e.schema.GetTypes())
if err != nil {
return nil, errors.Trace(err)
}
e.result.Fetch(goCtx)
return e, nil
}
func (builder *dataReaderBuilder) buildIndexReaderForDatums(goCtx goctx.Context, v *plan.PhysicalIndexReader, values [][]types.Datum) (Executor, error) {
e, err := buildNoRangeIndexReader(builder.executorBuilder, v)
if err != nil {
return nil, errors.Trace(err)
}
var b requestBuilder
kvReq, err := b.SetIndexValues(e.tableID, e.index.ID, values).
SetDAGRequest(e.dagPB).
SetDesc(e.desc).
SetKeepOrder(e.keepOrder).
SetPriority(e.priority).
SetFromSessionVars(e.ctx.GetSessionVars()).
Build()
if err != nil {
return nil, errors.Trace(err)
}
e.result, err = distsql.SelectDAG(goCtx, builder.ctx, kvReq, e.schema.GetTypes())
if err != nil {
return nil, errors.Trace(err)
}
e.result.Fetch(goCtx)
return e, nil
}
func (builder *dataReaderBuilder) buildIndexLookUpReaderForDatums(goCtx goctx.Context, v *plan.PhysicalIndexLookUpReader, values [][]types.Datum) (Executor, error) {
e, err := buildNoRangeIndexLookUpReader(builder.executorBuilder, v)
if err != nil {
return nil, errors.Trace(err)
}
kvRanges, err := indexValuesToKVRanges(e.tableID, e.index.ID, values)
if err != nil {
return nil, errors.Trace(err)
}
err = e.open(goCtx, kvRanges)
return e, errors.Trace(err)
}
| executor/builder.go | 1 | https://github.com/pingcap/tidb/commit/b35e024ace8a525b61fd53eae22e9723d06454ca | [
0.9990227222442627,
0.5302607417106628,
0.00016938053886406124,
0.5688372254371643,
0.41876140236854553
] |
{
"id": 1,
"code_window": [
"\t\tbaseExecutor: newBaseExecutor(nil, b.ctx),\n",
"\t\tName: v.Name,\n",
"\t}\n",
"}\n",
"\n",
"func (b *executorBuilder) buildSelectLock(v *plan.PhysicalLock) Executor {\n",
"\tsrc := b.build(v.Children()[0])\n"
],
"labels": [
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\te.supportChk = true\n",
"\treturn e\n"
],
"file_path": "executor/builder.go",
"type": "add",
"edit_start_line_idx": 211
} | Copyright (c) 2009 The Go Authors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
| _vendor/src/golang.org/x/sys/LICENSE | 0 | https://github.com/pingcap/tidb/commit/b35e024ace8a525b61fd53eae22e9723d06454ca | [
0.00017239821318071336,
0.0001670654019108042,
0.00016100511129479855,
0.00016779283760115504,
0.000004679571247834247
] |
{
"id": 1,
"code_window": [
"\t\tbaseExecutor: newBaseExecutor(nil, b.ctx),\n",
"\t\tName: v.Name,\n",
"\t}\n",
"}\n",
"\n",
"func (b *executorBuilder) buildSelectLock(v *plan.PhysicalLock) Executor {\n",
"\tsrc := b.build(v.Children()[0])\n"
],
"labels": [
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\te.supportChk = true\n",
"\treturn e\n"
],
"file_path": "executor/builder.go",
"type": "add",
"edit_start_line_idx": 211
} | // Copyright 2016 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package tikv
import (
"bytes"
"sync"
"sync/atomic"
"time"
"github.com/google/btree"
"github.com/juju/errors"
"github.com/pingcap/kvproto/pkg/kvrpcpb"
"github.com/pingcap/kvproto/pkg/metapb"
"github.com/pingcap/pd/pd-client"
log "github.com/sirupsen/logrus"
goctx "golang.org/x/net/context"
)
const (
btreeDegree = 32
rcDefaultRegionCacheTTL = time.Minute * 10
)
// CachedRegion encapsulates {Region, TTL}
type CachedRegion struct {
region *Region
lastAccess int64
}
func (c *CachedRegion) isValid() bool {
lastAccess := atomic.LoadInt64(&c.lastAccess)
lastAccessTime := time.Unix(lastAccess, 0)
return time.Since(lastAccessTime) < rcDefaultRegionCacheTTL
}
// RegionCache caches Regions loaded from PD.
type RegionCache struct {
pdClient pd.Client
mu struct {
sync.RWMutex
regions map[RegionVerID]*CachedRegion
sorted *btree.BTree
}
storeMu struct {
sync.RWMutex
stores map[uint64]*Store
}
}
// NewRegionCache creates a RegionCache.
func NewRegionCache(pdClient pd.Client) *RegionCache {
c := &RegionCache{
pdClient: pdClient,
}
c.mu.regions = make(map[RegionVerID]*CachedRegion)
c.mu.sorted = btree.New(btreeDegree)
c.storeMu.stores = make(map[uint64]*Store)
return c
}
// RPCContext contains data that is needed to send RPC to a region.
type RPCContext struct {
Region RegionVerID
Meta *metapb.Region
Peer *metapb.Peer
Addr string
}
// GetStoreID returns StoreID.
func (c *RPCContext) GetStoreID() uint64 {
if c.Peer != nil {
return c.Peer.StoreId
}
return 0
}
// GetRPCContext returns RPCContext for a region. If it returns nil, the region
// must be out of date and already dropped from cache.
func (c *RegionCache) GetRPCContext(bo *Backoffer, id RegionVerID) (*RPCContext, error) {
c.mu.RLock()
region := c.getCachedRegion(id)
if region == nil {
c.mu.RUnlock()
return nil, nil
}
// Note: it is safe to use region.meta and region.peer without clone after
// unlock, because region cache will never update the content of region's meta
// or peer. On the contrary, if we want to use `region` after unlock, then we
// need to clone it to avoid data race.
meta, peer := region.meta, region.peer
c.mu.RUnlock()
addr, err := c.GetStoreAddr(bo, peer.GetStoreId())
if err != nil {
return nil, errors.Trace(err)
}
if addr == "" {
// Store not found, region must be out of date.
c.DropRegion(id)
return nil, nil
}
return &RPCContext{
Region: id,
Meta: meta,
Peer: peer,
Addr: addr,
}, nil
}
// KeyLocation is the region and range that a key is located.
type KeyLocation struct {
Region RegionVerID
StartKey []byte
EndKey []byte
}
// Contains checks if key is in [StartKey, EndKey).
func (l *KeyLocation) Contains(key []byte) bool {
return bytes.Compare(l.StartKey, key) <= 0 &&
(bytes.Compare(key, l.EndKey) < 0 || len(l.EndKey) == 0)
}
// LocateKey searches for the region and range that the key is located.
func (c *RegionCache) LocateKey(bo *Backoffer, key []byte) (*KeyLocation, error) {
c.mu.RLock()
r := c.searchCachedRegion(key)
if r != nil {
loc := &KeyLocation{
Region: r.VerID(),
StartKey: r.StartKey(),
EndKey: r.EndKey(),
}
c.mu.RUnlock()
return loc, nil
}
c.mu.RUnlock()
r, err := c.loadRegion(bo, key)
if err != nil {
return nil, errors.Trace(err)
}
c.mu.Lock()
defer c.mu.Unlock()
c.insertRegionToCache(r)
return &KeyLocation{
Region: r.VerID(),
StartKey: r.StartKey(),
EndKey: r.EndKey(),
}, nil
}
// LocateRegionByID searches for the region with ID.
func (c *RegionCache) LocateRegionByID(bo *Backoffer, regionID uint64) (*KeyLocation, error) {
c.mu.RLock()
r := c.getRegionByIDFromCache(regionID)
if r != nil {
loc := &KeyLocation{
Region: r.VerID(),
StartKey: r.StartKey(),
EndKey: r.EndKey(),
}
c.mu.RUnlock()
return loc, nil
}
c.mu.RUnlock()
r, err := c.loadRegionByID(bo, regionID)
if err != nil {
return nil, errors.Trace(err)
}
c.mu.Lock()
defer c.mu.Unlock()
c.insertRegionToCache(r)
return &KeyLocation{
Region: r.VerID(),
StartKey: r.StartKey(),
EndKey: r.EndKey(),
}, nil
}
// GroupKeysByRegion separates keys into groups by their belonging Regions.
// Specially it also returns the first key's region which may be used as the
// 'PrimaryLockKey' and should be committed ahead of others.
func (c *RegionCache) GroupKeysByRegion(bo *Backoffer, keys [][]byte) (map[RegionVerID][][]byte, RegionVerID, error) {
groups := make(map[RegionVerID][][]byte)
var first RegionVerID
var lastLoc *KeyLocation
for i, k := range keys {
if lastLoc == nil || !lastLoc.Contains(k) {
var err error
lastLoc, err = c.LocateKey(bo, k)
if err != nil {
return nil, first, errors.Trace(err)
}
}
id := lastLoc.Region
if i == 0 {
first = id
}
groups[id] = append(groups[id], k)
}
return groups, first, nil
}
// ListRegionIDsInKeyRange lists ids of regions in [start_key,end_key].
func (c *RegionCache) ListRegionIDsInKeyRange(bo *Backoffer, startKey, endKey []byte) (regionIDs []uint64, err error) {
for {
curRegion, err := c.LocateKey(bo, startKey)
if err != nil {
return nil, errors.Trace(err)
}
regionIDs = append(regionIDs, curRegion.Region.id)
if curRegion.Contains(endKey) {
break
}
startKey = curRegion.EndKey
}
return regionIDs, nil
}
// DropRegion removes a cached Region.
func (c *RegionCache) DropRegion(id RegionVerID) {
c.mu.Lock()
defer c.mu.Unlock()
c.dropRegionFromCache(id)
}
// UpdateLeader update some region cache with newer leader info.
func (c *RegionCache) UpdateLeader(regionID RegionVerID, leaderStoreID uint64) {
c.mu.Lock()
defer c.mu.Unlock()
r := c.getCachedRegion(regionID)
if r == nil {
log.Debugf("regionCache: cannot find region when updating leader %d,%d", regionID, leaderStoreID)
return
}
if !r.SwitchPeer(leaderStoreID) {
log.Debugf("regionCache: cannot find peer when updating leader %d,%d", regionID, leaderStoreID)
c.dropRegionFromCache(r.VerID())
}
}
// insertRegionToCache tries to insert the Region to cache.
func (c *RegionCache) insertRegionToCache(r *Region) {
old := c.mu.sorted.ReplaceOrInsert(newBtreeItem(r))
if old != nil {
delete(c.mu.regions, old.(*btreeItem).region.VerID())
}
c.mu.regions[r.VerID()] = &CachedRegion{
region: r,
lastAccess: time.Now().Unix(),
}
}
// getCachedRegion loads a region from cache. It also checks if the region has
// not been accessed for a long time (maybe out of date). In this case, it
// returns nil so the region will be loaded from PD again.
// Note that it should be called with c.mu.RLock(), and the returned Region
// should not be used after c.mu is RUnlock().
func (c *RegionCache) getCachedRegion(id RegionVerID) *Region {
cachedRegion, ok := c.mu.regions[id]
if !ok {
return nil
}
if cachedRegion.isValid() {
atomic.StoreInt64(&cachedRegion.lastAccess, time.Now().Unix())
return cachedRegion.region
}
return nil
}
// searchCachedRegion finds a region from cache by key. Like `getCachedRegion`,
// it should be called with c.mu.RLock(), and the returned Region should not be
// used after c.mu is RUnlock().
func (c *RegionCache) searchCachedRegion(key []byte) *Region {
var r *Region
c.mu.sorted.DescendLessOrEqual(newBtreeSearchItem(key), func(item btree.Item) bool {
r = item.(*btreeItem).region
return false
})
if r != nil && r.Contains(key) {
return c.getCachedRegion(r.VerID())
}
return nil
}
// getRegionByIDFromCache tries to get region by regionID from cache. Like
// `getCachedRegion`, it should be called with c.mu.RLock(), and the returned
// Region should not be used after c.mu is RUnlock().
func (c *RegionCache) getRegionByIDFromCache(regionID uint64) *Region {
for v, r := range c.mu.regions {
if v.id == regionID {
return r.region
}
}
return nil
}
func (c *RegionCache) dropRegionFromCache(verID RegionVerID) {
r, ok := c.mu.regions[verID]
if !ok {
return
}
c.mu.sorted.Delete(newBtreeItem(r.region))
delete(c.mu.regions, verID)
}
// loadRegion loads region from pd client, and picks the first peer as leader.
func (c *RegionCache) loadRegion(bo *Backoffer, key []byte) (*Region, error) {
var backoffErr error
for {
if backoffErr != nil {
err := bo.Backoff(boPDRPC, backoffErr)
if err != nil {
return nil, errors.Trace(err)
}
}
meta, leader, err := c.pdClient.GetRegion(bo, key)
if err != nil {
backoffErr = errors.Errorf("loadRegion from PD failed, key: %q, err: %v", key, err)
continue
}
if meta == nil {
backoffErr = errors.Errorf("region not found for key %q", key)
continue
}
if len(meta.Peers) == 0 {
return nil, errors.New("receive Region with no peer")
}
region := &Region{
meta: meta,
peer: meta.Peers[0],
}
if leader != nil {
region.SwitchPeer(leader.GetStoreId())
}
return region, nil
}
}
// loadRegionByID loads region from pd client, and picks the first peer as leader.
func (c *RegionCache) loadRegionByID(bo *Backoffer, regionID uint64) (*Region, error) {
var backoffErr error
for {
if backoffErr != nil {
err := bo.Backoff(boPDRPC, backoffErr)
if err != nil {
return nil, errors.Trace(err)
}
}
meta, leader, err := c.pdClient.GetRegionByID(bo, regionID)
if err != nil {
backoffErr = errors.Errorf("loadRegion from PD failed, regionID: %v, err: %v", regionID, err)
continue
}
if meta == nil {
backoffErr = errors.Errorf("region not found for regionID %q", regionID)
continue
}
if len(meta.Peers) == 0 {
return nil, errors.New("receive Region with no peer")
}
region := &Region{
meta: meta,
peer: meta.Peers[0],
}
if leader != nil {
region.SwitchPeer(leader.GetStoreId())
}
return region, nil
}
}
// GetStoreAddr returns a tikv server's address by its storeID. It checks cache
// first, sends request to pd server when necessary.
func (c *RegionCache) GetStoreAddr(bo *Backoffer, id uint64) (string, error) {
c.storeMu.RLock()
if store, ok := c.storeMu.stores[id]; ok {
c.storeMu.RUnlock()
return store.Addr, nil
}
c.storeMu.RUnlock()
return c.ReloadStoreAddr(bo, id)
}
// ReloadStoreAddr reloads store's address.
func (c *RegionCache) ReloadStoreAddr(bo *Backoffer, id uint64) (string, error) {
addr, err := c.loadStoreAddr(bo, id)
if err != nil || addr == "" {
return "", errors.Trace(err)
}
c.storeMu.Lock()
defer c.storeMu.Unlock()
c.storeMu.stores[id] = &Store{
ID: id,
Addr: addr,
}
return addr, nil
}
// ClearStoreByID clears store from cache with storeID.
func (c *RegionCache) ClearStoreByID(id uint64) {
c.storeMu.Lock()
defer c.storeMu.Unlock()
delete(c.storeMu.stores, id)
}
func (c *RegionCache) loadStoreAddr(bo *Backoffer, id uint64) (string, error) {
for {
store, err := c.pdClient.GetStore(bo, id)
if err != nil {
if errors.Cause(err) == goctx.Canceled {
return "", errors.Trace(err)
}
err = errors.Errorf("loadStore from PD failed, id: %d, err: %v", id, err)
if err = bo.Backoff(boPDRPC, err); err != nil {
return "", errors.Trace(err)
}
continue
}
if store == nil {
return "", nil
}
return store.GetAddress(), nil
}
}
// OnRequestFail is used for clearing cache when a tikv server does not respond.
func (c *RegionCache) OnRequestFail(ctx *RPCContext, err error) {
// Switch region's leader peer to next one.
regionID := ctx.Region
c.mu.Lock()
if cachedregion, ok := c.mu.regions[regionID]; ok {
region := cachedregion.region
if !region.OnRequestFail(ctx.Peer.GetStoreId()) {
c.dropRegionFromCache(regionID)
}
}
c.mu.Unlock()
// Store's meta may be out of date.
storeID := ctx.Peer.GetStoreId()
c.storeMu.Lock()
delete(c.storeMu.stores, storeID)
c.storeMu.Unlock()
log.Infof("drop regions of store %d from cache due to request fail, err: %v", storeID, err)
c.mu.Lock()
for id, r := range c.mu.regions {
if r.region.peer.GetStoreId() == storeID {
c.dropRegionFromCache(id)
}
}
c.mu.Unlock()
}
// OnRegionStale removes the old region and inserts new regions into the cache.
func (c *RegionCache) OnRegionStale(ctx *RPCContext, newRegions []*metapb.Region) error {
c.mu.Lock()
defer c.mu.Unlock()
c.dropRegionFromCache(ctx.Region)
for _, meta := range newRegions {
if _, ok := c.pdClient.(*codecPDClient); ok {
if err := decodeRegionMetaKey(meta); err != nil {
return errors.Errorf("newRegion's range key is not encoded: %v, %v", meta, err)
}
}
region := &Region{
meta: meta,
peer: meta.Peers[0],
}
region.SwitchPeer(ctx.Peer.GetStoreId())
c.insertRegionToCache(region)
}
return nil
}
// PDClient returns the pd.Client in RegionCache.
func (c *RegionCache) PDClient() pd.Client {
return c.pdClient
}
// btreeItem is BTree's Item that uses []byte to compare.
type btreeItem struct {
key []byte
region *Region
}
func newBtreeItem(r *Region) *btreeItem {
return &btreeItem{
key: r.StartKey(),
region: r,
}
}
func newBtreeSearchItem(key []byte) *btreeItem {
return &btreeItem{
key: key,
}
}
func (item *btreeItem) Less(other btree.Item) bool {
return bytes.Compare(item.key, other.(*btreeItem).key) < 0
}
// Region stores region's meta and its leader peer.
type Region struct {
meta *metapb.Region
peer *metapb.Peer
unreachableStores []uint64
}
// GetID returns id.
func (r *Region) GetID() uint64 {
return r.meta.GetId()
}
// RegionVerID is a unique ID that can identify a Region at a specific version.
type RegionVerID struct {
id uint64
confVer uint64
ver uint64
}
// VerID returns the Region's RegionVerID.
func (r *Region) VerID() RegionVerID {
return RegionVerID{
id: r.meta.GetId(),
confVer: r.meta.GetRegionEpoch().GetConfVer(),
ver: r.meta.GetRegionEpoch().GetVersion(),
}
}
// StartKey returns StartKey.
func (r *Region) StartKey() []byte {
return r.meta.StartKey
}
// EndKey returns EndKey.
func (r *Region) EndKey() []byte {
return r.meta.EndKey
}
// GetContext constructs kvprotopb.Context from region info.
func (r *Region) GetContext() *kvrpcpb.Context {
return &kvrpcpb.Context{
RegionId: r.meta.Id,
RegionEpoch: r.meta.RegionEpoch,
Peer: r.peer,
}
}
// OnRequestFail records unreachable peer and tries to select another valid peer.
// It returns false if all peers are unreachable.
func (r *Region) OnRequestFail(storeID uint64) bool {
if r.peer.GetStoreId() != storeID {
return true
}
r.unreachableStores = append(r.unreachableStores, storeID)
L:
for _, p := range r.meta.Peers {
for _, id := range r.unreachableStores {
if p.GetStoreId() == id {
continue L
}
}
r.peer = p
return true
}
return false
}
// SwitchPeer switches current peer to the one on specific store. It returns
// false if no peer matches the storeID.
func (r *Region) SwitchPeer(storeID uint64) bool {
for _, p := range r.meta.Peers {
if p.GetStoreId() == storeID {
r.peer = p
return true
}
}
return false
}
// Contains checks whether the key is in the region, for the maximum region endKey is empty.
// startKey <= key < endKey.
func (r *Region) Contains(key []byte) bool {
return bytes.Compare(r.meta.GetStartKey(), key) <= 0 &&
(bytes.Compare(key, r.meta.GetEndKey()) < 0 || len(r.meta.GetEndKey()) == 0)
}
// Store contains a tikv server's address.
type Store struct {
ID uint64
Addr string
}
| store/tikv/region_cache.go | 0 | https://github.com/pingcap/tidb/commit/b35e024ace8a525b61fd53eae22e9723d06454ca | [
0.015367231331765652,
0.0004489794955588877,
0.00016492936993017793,
0.00017311976989731193,
0.0019141511293128133
] |
{
"id": 1,
"code_window": [
"\t\tbaseExecutor: newBaseExecutor(nil, b.ctx),\n",
"\t\tName: v.Name,\n",
"\t}\n",
"}\n",
"\n",
"func (b *executorBuilder) buildSelectLock(v *plan.PhysicalLock) Executor {\n",
"\tsrc := b.build(v.Children()[0])\n"
],
"labels": [
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\te.supportChk = true\n",
"\treturn e\n"
],
"file_path": "executor/builder.go",
"type": "add",
"edit_start_line_idx": 211
} | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package thrift
import (
"log"
"runtime/debug"
"sync"
)
// Simple, non-concurrent server for testing.
type TSimpleServer struct {
quit chan struct{}
processorFactory TProcessorFactory
serverTransport TServerTransport
inputTransportFactory TTransportFactory
outputTransportFactory TTransportFactory
inputProtocolFactory TProtocolFactory
outputProtocolFactory TProtocolFactory
}
func NewTSimpleServer2(processor TProcessor, serverTransport TServerTransport) *TSimpleServer {
return NewTSimpleServerFactory2(NewTProcessorFactory(processor), serverTransport)
}
func NewTSimpleServer4(processor TProcessor, serverTransport TServerTransport, transportFactory TTransportFactory, protocolFactory TProtocolFactory) *TSimpleServer {
return NewTSimpleServerFactory4(NewTProcessorFactory(processor),
serverTransport,
transportFactory,
protocolFactory,
)
}
func NewTSimpleServer6(processor TProcessor, serverTransport TServerTransport, inputTransportFactory TTransportFactory, outputTransportFactory TTransportFactory, inputProtocolFactory TProtocolFactory, outputProtocolFactory TProtocolFactory) *TSimpleServer {
return NewTSimpleServerFactory6(NewTProcessorFactory(processor),
serverTransport,
inputTransportFactory,
outputTransportFactory,
inputProtocolFactory,
outputProtocolFactory,
)
}
func NewTSimpleServerFactory2(processorFactory TProcessorFactory, serverTransport TServerTransport) *TSimpleServer {
return NewTSimpleServerFactory6(processorFactory,
serverTransport,
NewTTransportFactory(),
NewTTransportFactory(),
NewTBinaryProtocolFactoryDefault(),
NewTBinaryProtocolFactoryDefault(),
)
}
func NewTSimpleServerFactory4(processorFactory TProcessorFactory, serverTransport TServerTransport, transportFactory TTransportFactory, protocolFactory TProtocolFactory) *TSimpleServer {
return NewTSimpleServerFactory6(processorFactory,
serverTransport,
transportFactory,
transportFactory,
protocolFactory,
protocolFactory,
)
}
func NewTSimpleServerFactory6(processorFactory TProcessorFactory, serverTransport TServerTransport, inputTransportFactory TTransportFactory, outputTransportFactory TTransportFactory, inputProtocolFactory TProtocolFactory, outputProtocolFactory TProtocolFactory) *TSimpleServer {
return &TSimpleServer{
processorFactory: processorFactory,
serverTransport: serverTransport,
inputTransportFactory: inputTransportFactory,
outputTransportFactory: outputTransportFactory,
inputProtocolFactory: inputProtocolFactory,
outputProtocolFactory: outputProtocolFactory,
quit: make(chan struct{}, 1),
}
}
func (p *TSimpleServer) ProcessorFactory() TProcessorFactory {
return p.processorFactory
}
func (p *TSimpleServer) ServerTransport() TServerTransport {
return p.serverTransport
}
func (p *TSimpleServer) InputTransportFactory() TTransportFactory {
return p.inputTransportFactory
}
func (p *TSimpleServer) OutputTransportFactory() TTransportFactory {
return p.outputTransportFactory
}
func (p *TSimpleServer) InputProtocolFactory() TProtocolFactory {
return p.inputProtocolFactory
}
func (p *TSimpleServer) OutputProtocolFactory() TProtocolFactory {
return p.outputProtocolFactory
}
func (p *TSimpleServer) Listen() error {
return p.serverTransport.Listen()
}
func (p *TSimpleServer) AcceptLoop() error {
for {
client, err := p.serverTransport.Accept()
if err != nil {
select {
case <-p.quit:
return nil
default:
}
return err
}
if client != nil {
go func() {
if err := p.processRequests(client); err != nil {
log.Println("error processing request:", err)
}
}()
}
}
}
func (p *TSimpleServer) Serve() error {
err := p.Listen()
if err != nil {
return err
}
p.AcceptLoop()
return nil
}
var once sync.Once
func (p *TSimpleServer) Stop() error {
q := func() {
p.quit <- struct{}{}
p.serverTransport.Interrupt()
}
once.Do(q)
return nil
}
func (p *TSimpleServer) processRequests(client TTransport) error {
processor := p.processorFactory.GetProcessor(client)
inputTransport := p.inputTransportFactory.GetTransport(client)
outputTransport := p.outputTransportFactory.GetTransport(client)
inputProtocol := p.inputProtocolFactory.GetProtocol(inputTransport)
outputProtocol := p.outputProtocolFactory.GetProtocol(outputTransport)
defer func() {
if e := recover(); e != nil {
log.Printf("panic in processor: %s: %s", e, debug.Stack())
}
}()
if inputTransport != nil {
defer inputTransport.Close()
}
if outputTransport != nil {
defer outputTransport.Close()
}
for {
ok, err := processor.Process(inputProtocol, outputProtocol)
if err, ok := err.(TTransportException); ok && err.TypeId() == END_OF_FILE {
return nil
} else if err != nil {
log.Printf("error processing request: %s", err)
return err
}
if err, ok := err.(TApplicationException); ok && err.TypeId() == UNKNOWN_METHOD {
continue
}
if !ok {
break
}
}
return nil
}
| _vendor/src/github.com/apache/thrift/lib/go/thrift/simple_server.go | 0 | https://github.com/pingcap/tidb/commit/b35e024ace8a525b61fd53eae22e9723d06454ca | [
0.0005626889760605991,
0.0001975458872038871,
0.00016736953693907708,
0.00017410953296348453,
0.00008457837247988209
] |
{
"id": 2,
"code_window": [
"\tName string\n",
"}\n",
"\n",
"// Next implements the Executor Next interface.\n",
"func (e *DeallocateExec) Next(goCtx goctx.Context) (Row, error) {\n",
"\tvars := e.ctx.GetSessionVars()\n",
"\tid, ok := vars.PreparedStmtNameToID[e.Name]\n",
"\tif !ok {\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep"
],
"after_edit": [
"\treturn nil, errors.Trace(e.run(goCtx))\n",
"}\n",
"\n",
"// NextChunk implements the Executor NextChunk interface.\n",
"func (e *DeallocateExec) NextChunk(goCtx goctx.Context, chk *chunk.Chunk) error {\n",
"\treturn errors.Trace(e.run(goCtx))\n",
"}\n",
"\n",
"func (e *DeallocateExec) run(goCtx goctx.Context) error {\n"
],
"file_path": "executor/prepared.go",
"type": "add",
"edit_start_line_idx": 240
} | // Copyright 2015 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package executor
import (
"math"
"sort"
"sync"
"time"
"github.com/cznic/sortutil"
"github.com/juju/errors"
"github.com/pingcap/tidb/ast"
"github.com/pingcap/tidb/context"
"github.com/pingcap/tidb/distsql"
"github.com/pingcap/tidb/domain"
"github.com/pingcap/tidb/expression"
"github.com/pingcap/tidb/infoschema"
"github.com/pingcap/tidb/model"
"github.com/pingcap/tidb/plan"
"github.com/pingcap/tidb/table"
"github.com/pingcap/tidb/types"
"github.com/pingcap/tidb/util/admin"
"github.com/pingcap/tipb/go-tipb"
goctx "golang.org/x/net/context"
)
// executorBuilder builds an Executor from a Plan.
// The InfoSchema must not change during execution.
type executorBuilder struct {
ctx context.Context
is infoschema.InfoSchema
priority int
startTS uint64 // cached when the first time getStartTS() is called
// err is set when there is error happened during Executor building process.
err error
}
func newExecutorBuilder(ctx context.Context, is infoschema.InfoSchema, priority int) *executorBuilder {
return &executorBuilder{
ctx: ctx,
is: is,
priority: priority,
}
}
func (b *executorBuilder) build(p plan.Plan) Executor {
switch v := p.(type) {
case nil:
return nil
case *plan.CheckTable:
return b.buildCheckTable(v)
case *plan.DDL:
return b.buildDDL(v)
case *plan.Deallocate:
return b.buildDeallocate(v)
case *plan.Delete:
return b.buildDelete(v)
case *plan.Execute:
return b.buildExecute(v)
case *plan.Explain:
return b.buildExplain(v)
case *plan.Insert:
return b.buildInsert(v)
case *plan.LoadData:
return b.buildLoadData(v)
case *plan.PhysicalLimit:
return b.buildLimit(v)
case *plan.Prepare:
return b.buildPrepare(v)
case *plan.PhysicalLock:
return b.buildSelectLock(v)
case *plan.CancelDDLJobs:
return b.buildCancelDDLJobs(v)
case *plan.ShowDDL:
return b.buildShowDDL(v)
case *plan.ShowDDLJobs:
return b.buildShowDDLJobs(v)
case *plan.Show:
return b.buildShow(v)
case *plan.Simple:
return b.buildSimple(v)
case *plan.Set:
return b.buildSet(v)
case *plan.PhysicalSort:
return b.buildSort(v)
case *plan.PhysicalTopN:
return b.buildTopN(v)
case *plan.PhysicalUnionAll:
return b.buildUnionAll(v)
case *plan.Update:
return b.buildUpdate(v)
case *plan.PhysicalUnionScan:
return b.buildUnionScanExec(v)
case *plan.PhysicalHashJoin:
return b.buildHashJoin(v)
case *plan.PhysicalMergeJoin:
return b.buildMergeJoin(v)
case *plan.PhysicalHashSemiJoin:
return b.buildSemiJoin(v)
case *plan.PhysicalIndexJoin:
return b.buildIndexLookUpJoin(v)
case *plan.PhysicalSelection:
return b.buildSelection(v)
case *plan.PhysicalHashAgg:
return b.buildHashAgg(v)
case *plan.PhysicalStreamAgg:
return b.buildStreamAgg(v)
case *plan.PhysicalProjection:
return b.buildProjection(v)
case *plan.PhysicalMemTable:
return b.buildMemTable(v)
case *plan.PhysicalTableDual:
return b.buildTableDual(v)
case *plan.PhysicalApply:
return b.buildApply(v)
case *plan.PhysicalExists:
return b.buildExists(v)
case *plan.PhysicalMaxOneRow:
return b.buildMaxOneRow(v)
case *plan.Analyze:
return b.buildAnalyze(v)
case *plan.PhysicalTableReader:
return b.buildTableReader(v)
case *plan.PhysicalIndexReader:
return b.buildIndexReader(v)
case *plan.PhysicalIndexLookUpReader:
return b.buildIndexLookUpReader(v)
default:
b.err = ErrUnknownPlan.Gen("Unknown Plan %T", p)
return nil
}
}
func (b *executorBuilder) buildCancelDDLJobs(v *plan.CancelDDLJobs) Executor {
e := &CancelDDLJobsExec{
baseExecutor: newBaseExecutor(v.Schema(), b.ctx),
jobIDs: v.JobIDs,
}
e.errs, b.err = admin.CancelJobs(e.ctx.Txn(), e.jobIDs)
if b.err != nil {
b.err = errors.Trace(b.err)
return nil
}
e.supportChk = true
return e
}
func (b *executorBuilder) buildShowDDL(v *plan.ShowDDL) Executor {
// We get DDLInfo here because for Executors that returns result set,
// next will be called after transaction has been committed.
// We need the transaction to get DDLInfo.
e := &ShowDDLExec{
baseExecutor: newBaseExecutor(v.Schema(), b.ctx),
}
var err error
ownerManager := domain.GetDomain(e.ctx).DDL().OwnerManager()
ctx, cancel := goctx.WithTimeout(goctx.Background(), 3*time.Second)
e.ddlOwnerID, err = ownerManager.GetOwnerID(ctx)
cancel()
if err != nil {
b.err = errors.Trace(err)
return nil
}
ddlInfo, err := admin.GetDDLInfo(e.ctx.Txn())
if err != nil {
b.err = errors.Trace(err)
return nil
}
e.ddlInfo = ddlInfo
e.selfID = ownerManager.ID()
e.supportChk = true
return e
}
func (b *executorBuilder) buildShowDDLJobs(v *plan.ShowDDLJobs) Executor {
e := &ShowDDLJobsExec{
baseExecutor: newBaseExecutor(v.Schema(), b.ctx),
}
e.supportChk = true
return e
}
func (b *executorBuilder) buildCheckTable(v *plan.CheckTable) Executor {
e := &CheckTableExec{
baseExecutor: newBaseExecutor(v.Schema(), b.ctx),
tables: v.Tables,
is: b.is,
}
e.supportChk = true
return e
}
func (b *executorBuilder) buildDeallocate(v *plan.Deallocate) Executor {
return &DeallocateExec{
baseExecutor: newBaseExecutor(nil, b.ctx),
Name: v.Name,
}
}
func (b *executorBuilder) buildSelectLock(v *plan.PhysicalLock) Executor {
src := b.build(v.Children()[0])
if b.err != nil {
b.err = errors.Trace(b.err)
return nil
}
if !b.ctx.GetSessionVars().InTxn() {
// Locking of rows for update using SELECT FOR UPDATE only applies when autocommit
// is disabled (either by beginning transaction with START TRANSACTION or by setting
// autocommit to 0. If autocommit is enabled, the rows matching the specification are not locked.
// See https://dev.mysql.com/doc/refman/5.7/en/innodb-locking-reads.html
return src
}
e := &SelectLockExec{
baseExecutor: newBaseExecutor(v.Schema(), b.ctx, src),
Lock: v.Lock,
}
e.supportChk = true
return e
}
func (b *executorBuilder) buildLimit(v *plan.PhysicalLimit) Executor {
childExec := b.build(v.Children()[0])
if b.err != nil {
b.err = errors.Trace(b.err)
return nil
}
e := &LimitExec{
baseExecutor: newBaseExecutor(v.Schema(), b.ctx, childExec),
begin: v.Offset,
end: v.Offset + v.Count,
}
e.supportChk = true
return e
}
func (b *executorBuilder) buildPrepare(v *plan.Prepare) Executor {
e := &PrepareExec{
baseExecutor: newBaseExecutor(v.Schema(), b.ctx),
is: b.is,
name: v.Name,
sqlText: v.SQLText,
}
e.supportChk = true
return e
}
func (b *executorBuilder) buildExecute(v *plan.Execute) Executor {
return &ExecuteExec{
baseExecutor: newBaseExecutor(nil, b.ctx),
IS: b.is,
Name: v.Name,
UsingVars: v.UsingVars,
ID: v.ExecID,
Stmt: v.Stmt,
Plan: v.Plan,
}
}
func (b *executorBuilder) buildShow(v *plan.Show) Executor {
e := &ShowExec{
baseExecutor: newBaseExecutor(v.Schema(), b.ctx),
Tp: v.Tp,
DBName: model.NewCIStr(v.DBName),
Table: v.Table,
Column: v.Column,
User: v.User,
Flag: v.Flag,
Full: v.Full,
GlobalScope: v.GlobalScope,
is: b.is,
}
if e.Tp == ast.ShowGrants && e.User == nil {
e.User = e.ctx.GetSessionVars().User
}
if len(v.Conditions) == 0 {
return e
}
sel := &SelectionExec{
baseExecutor: newBaseExecutor(v.Schema(), b.ctx, e),
filters: v.Conditions,
}
return sel
}
func (b *executorBuilder) buildSimple(v *plan.Simple) Executor {
switch s := v.Statement.(type) {
case *ast.GrantStmt:
return b.buildGrant(s)
case *ast.RevokeStmt:
return b.buildRevoke(s)
}
return &SimpleExec{
baseExecutor: newBaseExecutor(v.Schema(), b.ctx),
Statement: v.Statement,
is: b.is,
}
}
func (b *executorBuilder) buildSet(v *plan.Set) Executor {
e := &SetExecutor{
baseExecutor: newBaseExecutor(v.Schema(), b.ctx),
vars: v.VarAssigns,
}
e.supportChk = true
return e
}
func (b *executorBuilder) buildInsert(v *plan.Insert) Executor {
ivs := &InsertValues{
baseExecutor: newBaseExecutor(nil, b.ctx),
Columns: v.Columns,
Lists: v.Lists,
Setlist: v.Setlist,
GenColumns: v.GenCols.Columns,
GenExprs: v.GenCols.Exprs,
needFillDefaultValues: v.NeedFillDefaultValue,
}
ivs.SelectExec = b.build(v.SelectPlan)
if b.err != nil {
b.err = errors.Trace(b.err)
return nil
}
ivs.Table = v.Table
if v.IsReplace {
return b.buildReplace(ivs)
}
insert := &InsertExec{
InsertValues: ivs,
OnDuplicate: append(v.OnDuplicate, v.GenCols.OnDuplicates...),
Priority: v.Priority,
IgnoreErr: v.IgnoreErr,
}
insert.supportChk = true
return insert
}
func (b *executorBuilder) buildLoadData(v *plan.LoadData) Executor {
tbl, ok := b.is.TableByID(v.Table.TableInfo.ID)
if !ok {
b.err = errors.Errorf("Can not get table %d", v.Table.TableInfo.ID)
return nil
}
insertVal := &InsertValues{
baseExecutor: newBaseExecutor(nil, b.ctx),
Table: tbl,
Columns: v.Columns,
GenColumns: v.GenCols.Columns,
GenExprs: v.GenCols.Exprs,
}
tableCols := tbl.Cols()
columns, err := insertVal.getColumns(tableCols)
if err != nil {
b.err = errors.Trace(err)
return nil
}
return &LoadData{
baseExecutor: newBaseExecutor(nil, b.ctx),
IsLocal: v.IsLocal,
loadDataInfo: &LoadDataInfo{
row: make([]types.Datum, len(columns)),
insertVal: insertVal,
Path: v.Path,
Table: tbl,
FieldsInfo: v.FieldsInfo,
LinesInfo: v.LinesInfo,
Ctx: b.ctx,
columns: columns,
},
}
}
func (b *executorBuilder) buildReplace(vals *InsertValues) Executor {
return &ReplaceExec{
InsertValues: vals,
}
}
func (b *executorBuilder) buildGrant(grant *ast.GrantStmt) Executor {
return &GrantExec{
baseExecutor: newBaseExecutor(nil, b.ctx),
Privs: grant.Privs,
ObjectType: grant.ObjectType,
Level: grant.Level,
Users: grant.Users,
WithGrant: grant.WithGrant,
is: b.is,
}
}
func (b *executorBuilder) buildRevoke(revoke *ast.RevokeStmt) Executor {
e := &RevokeExec{
ctx: b.ctx,
Privs: revoke.Privs,
ObjectType: revoke.ObjectType,
Level: revoke.Level,
Users: revoke.Users,
is: b.is,
}
e.supportChk = true
return e
}
func (b *executorBuilder) buildDDL(v *plan.DDL) Executor {
e := &DDLExec{
baseExecutor: newBaseExecutor(v.Schema(), b.ctx),
stmt: v.Statement,
is: b.is,
}
e.supportChk = true
return e
}
func (b *executorBuilder) buildExplain(v *plan.Explain) Executor {
e := &ExplainExec{
baseExecutor: newBaseExecutor(v.Schema(), b.ctx),
}
e.rows = make([][]string, 0, len(v.Rows))
for _, row := range v.Rows {
e.rows = append(e.rows, row)
}
e.supportChk = true
return e
}
func (b *executorBuilder) buildUnionScanExec(v *plan.PhysicalUnionScan) Executor {
src := b.build(v.Children()[0])
if b.err != nil {
return nil
}
us := &UnionScanExec{baseExecutor: newBaseExecutor(v.Schema(), b.ctx, src)}
// Get the handle column index of the below plan.
// We can guarantee that there must be only one col in the map.
for _, cols := range v.Children()[0].Schema().TblID2Handle {
us.belowHandleIndex = cols[0].Index
}
switch x := src.(type) {
case *TableReaderExecutor:
us.desc = x.desc
us.dirty = getDirtyDB(b.ctx).getDirtyTable(x.table.Meta().ID)
us.conditions = v.Conditions
us.columns = x.columns
b.err = us.buildAndSortAddedRows(x.table)
case *IndexReaderExecutor:
us.desc = x.desc
for _, ic := range x.index.Columns {
for i, col := range x.schema.Columns {
if col.ColName.L == ic.Name.L {
us.usedIndex = append(us.usedIndex, i)
break
}
}
}
us.dirty = getDirtyDB(b.ctx).getDirtyTable(x.table.Meta().ID)
us.conditions = v.Conditions
us.columns = x.columns
b.err = us.buildAndSortAddedRows(x.table)
case *IndexLookUpExecutor:
us.desc = x.desc
for _, ic := range x.index.Columns {
for i, col := range x.schema.Columns {
if col.ColName.L == ic.Name.L {
us.usedIndex = append(us.usedIndex, i)
break
}
}
}
us.dirty = getDirtyDB(b.ctx).getDirtyTable(x.table.Meta().ID)
us.conditions = v.Conditions
us.columns = x.columns
b.err = us.buildAndSortAddedRows(x.table)
default:
// The mem table will not be written by sql directly, so we can omit the union scan to avoid err reporting.
return src
}
if b.err != nil {
return nil
}
return us
}
// buildMergeJoin builds MergeJoinExec executor.
func (b *executorBuilder) buildMergeJoin(v *plan.PhysicalMergeJoin) Executor {
leftExec := b.build(v.Children()[0])
if b.err != nil {
b.err = errors.Trace(b.err)
return nil
}
rightExec := b.build(v.Children()[1])
if b.err != nil {
b.err = errors.Trace(b.err)
return nil
}
leftKeys := make([]*expression.Column, 0, len(v.EqualConditions))
rightKeys := make([]*expression.Column, 0, len(v.EqualConditions))
for _, eqCond := range v.EqualConditions {
if len(eqCond.GetArgs()) != 2 {
b.err = errors.Annotate(ErrBuildExecutor, "invalid join key for equal condition")
return nil
}
leftKey, ok := eqCond.GetArgs()[0].(*expression.Column)
if !ok {
b.err = errors.Annotate(ErrBuildExecutor, "left side of join key must be column for merge join")
return nil
}
rightKey, ok := eqCond.GetArgs()[1].(*expression.Column)
if !ok {
b.err = errors.Annotate(ErrBuildExecutor, "right side of join key must be column for merge join")
return nil
}
leftKeys = append(leftKeys, leftKey)
rightKeys = append(rightKeys, rightKey)
}
leftRowBlock := &rowBlockIterator{
ctx: b.ctx,
reader: leftExec,
filter: v.LeftConditions,
joinKeys: leftKeys,
}
rightRowBlock := &rowBlockIterator{
ctx: b.ctx,
reader: rightExec,
filter: v.RightConditions,
joinKeys: rightKeys,
}
defaultValues := v.DefaultValues
if defaultValues == nil {
defaultValues = make([]types.Datum, rightExec.Schema().Len())
}
e := &MergeJoinExec{
baseExecutor: newBaseExecutor(v.Schema(), b.ctx, leftExec, rightExec),
resultGenerator: newJoinResultGenerator(b.ctx, v.JoinType, false, defaultValues, v.OtherConditions, nil, nil),
stmtCtx: b.ctx.GetSessionVars().StmtCtx,
// left is the outer side by default.
outerKeys: leftKeys,
innerKeys: rightKeys,
outerIter: leftRowBlock,
innerIter: rightRowBlock,
}
if v.JoinType == plan.RightOuterJoin {
e.outerKeys, e.innerKeys = e.innerKeys, e.outerKeys
e.outerIter, e.innerIter = e.innerIter, e.outerIter
}
if v.JoinType != plan.InnerJoin {
e.outerFilter = e.outerIter.filter
e.outerIter.filter = nil
}
return e
}
func (b *executorBuilder) buildHashJoin(v *plan.PhysicalHashJoin) Executor {
leftHashKey := make([]*expression.Column, 0, len(v.EqualConditions))
rightHashKey := make([]*expression.Column, 0, len(v.EqualConditions))
for _, eqCond := range v.EqualConditions {
ln, _ := eqCond.GetArgs()[0].(*expression.Column)
rn, _ := eqCond.GetArgs()[1].(*expression.Column)
leftHashKey = append(leftHashKey, ln)
rightHashKey = append(rightHashKey, rn)
}
leftExec := b.build(v.Children()[0])
rightExec := b.build(v.Children()[1])
// for hash join, inner table is always the smaller one.
e := &HashJoinExec{
baseExecutor: newBaseExecutor(v.Schema(), b.ctx, leftExec, rightExec),
concurrency: v.Concurrency,
joinType: v.JoinType,
}
defaultValues := v.DefaultValues
if v.SmallChildIdx == 0 {
e.innerExec = leftExec
e.outerExec = rightExec
e.innerFilter = v.LeftConditions
e.outerFilter = v.RightConditions
e.innerKeys = leftHashKey
e.outerKeys = rightHashKey
if defaultValues == nil {
defaultValues = make([]types.Datum, e.innerExec.Schema().Len())
}
e.resultGenerator = newJoinResultGenerator(b.ctx, v.JoinType, v.SmallChildIdx == 0, defaultValues,
v.OtherConditions, nil, nil)
} else {
e.innerExec = rightExec
e.outerExec = leftExec
e.innerFilter = v.RightConditions
e.outerFilter = v.LeftConditions
e.innerKeys = rightHashKey
e.outerKeys = leftHashKey
if defaultValues == nil {
defaultValues = make([]types.Datum, e.innerExec.Schema().Len())
}
e.resultGenerator = newJoinResultGenerator(b.ctx, v.JoinType, v.SmallChildIdx == 0,
defaultValues, v.OtherConditions, nil, nil)
}
return e
}
func (b *executorBuilder) buildSemiJoin(v *plan.PhysicalHashSemiJoin) *HashSemiJoinExec {
leftHashKey := make([]*expression.Column, 0, len(v.EqualConditions))
rightHashKey := make([]*expression.Column, 0, len(v.EqualConditions))
for _, eqCond := range v.EqualConditions {
ln, _ := eqCond.GetArgs()[0].(*expression.Column)
rn, _ := eqCond.GetArgs()[1].(*expression.Column)
leftHashKey = append(leftHashKey, ln)
rightHashKey = append(rightHashKey, rn)
}
e := &HashSemiJoinExec{
baseExecutor: newBaseExecutor(v.Schema(), b.ctx),
otherFilter: v.OtherConditions,
bigFilter: v.LeftConditions,
smallFilter: v.RightConditions,
bigExec: b.build(v.Children()[0]),
smallExec: b.build(v.Children()[1]),
prepared: false,
bigHashKey: leftHashKey,
smallHashKey: rightHashKey,
auxMode: v.WithAux,
anti: v.Anti,
}
return e
}
func (b *executorBuilder) buildHashAgg(v *plan.PhysicalHashAgg) Executor {
src := b.build(v.Children()[0])
if b.err != nil {
b.err = errors.Trace(b.err)
return nil
}
return &HashAggExec{
baseExecutor: newBaseExecutor(v.Schema(), b.ctx, src),
sc: b.ctx.GetSessionVars().StmtCtx,
AggFuncs: v.AggFuncs,
GroupByItems: v.GroupByItems,
}
}
func (b *executorBuilder) buildStreamAgg(v *plan.PhysicalStreamAgg) Executor {
src := b.build(v.Children()[0])
if b.err != nil {
b.err = errors.Trace(b.err)
return nil
}
return &StreamAggExec{
baseExecutor: newBaseExecutor(v.Schema(), b.ctx, src),
StmtCtx: b.ctx.GetSessionVars().StmtCtx,
AggFuncs: v.AggFuncs,
GroupByItems: v.GroupByItems,
}
}
func (b *executorBuilder) buildSelection(v *plan.PhysicalSelection) Executor {
childExec := b.build(v.Children()[0])
if b.err != nil {
b.err = errors.Trace(b.err)
return nil
}
e := &SelectionExec{
baseExecutor: newBaseExecutor(v.Schema(), b.ctx, childExec),
filters: v.Conditions,
}
e.supportChk = true
return e
}
func (b *executorBuilder) buildProjection(v *plan.PhysicalProjection) Executor {
childExec := b.build(v.Children()[0])
if b.err != nil {
b.err = errors.Trace(b.err)
return nil
}
e := &ProjectionExec{
baseExecutor: newBaseExecutor(v.Schema(), b.ctx, childExec),
exprs: v.Exprs,
}
e.baseExecutor.supportChk = true
return e
}
func (b *executorBuilder) buildTableDual(v *plan.PhysicalTableDual) Executor {
return &TableDualExec{
baseExecutor: newBaseExecutor(v.Schema(), b.ctx),
rowCount: v.RowCount,
}
}
func (b *executorBuilder) getStartTS() uint64 {
if b.startTS != 0 {
// Return the cached value.
return b.startTS
}
startTS := b.ctx.GetSessionVars().SnapshotTS
if startTS == 0 {
startTS = b.ctx.Txn().StartTS()
}
b.startTS = startTS
return startTS
}
func (b *executorBuilder) buildMemTable(v *plan.PhysicalMemTable) Executor {
tb, _ := b.is.TableByID(v.Table.ID)
ts := &TableScanExec{
baseExecutor: newBaseExecutor(v.Schema(), b.ctx),
t: tb,
columns: v.Columns,
seekHandle: math.MinInt64,
ranges: v.Ranges,
isVirtualTable: tb.Type() == table.VirtualTable,
}
return ts
}
func (b *executorBuilder) buildSort(v *plan.PhysicalSort) Executor {
childExec := b.build(v.Children()[0])
if b.err != nil {
b.err = errors.Trace(b.err)
return nil
}
sortExec := SortExec{
baseExecutor: newBaseExecutor(v.Schema(), b.ctx, childExec),
ByItems: v.ByItems,
schema: v.Schema(),
}
sortExec.supportChk = true
return &sortExec
}
func (b *executorBuilder) buildTopN(v *plan.PhysicalTopN) Executor {
childExec := b.build(v.Children()[0])
if b.err != nil {
b.err = errors.Trace(b.err)
return nil
}
sortExec := SortExec{
baseExecutor: newBaseExecutor(v.Schema(), b.ctx, childExec),
ByItems: v.ByItems,
schema: v.Schema(),
}
sortExec.supportChk = true
return &TopNExec{
SortExec: sortExec,
limit: &plan.PhysicalLimit{Count: v.Count, Offset: v.Offset},
}
}
func (b *executorBuilder) buildNestedLoopJoin(v *plan.PhysicalHashJoin) *NestedLoopJoinExec {
for _, cond := range v.EqualConditions {
cond.GetArgs()[0].(*expression.Column).ResolveIndices(v.Schema())
cond.GetArgs()[1].(*expression.Column).ResolveIndices(v.Schema())
}
defaultValues := v.DefaultValues
if v.SmallChildIdx == 1 {
if defaultValues == nil {
defaultValues = make([]types.Datum, v.Children()[1].Schema().Len())
}
return &NestedLoopJoinExec{
baseExecutor: newBaseExecutor(v.Schema(), b.ctx),
SmallExec: b.build(v.Children()[1]),
BigExec: b.build(v.Children()[0]),
BigFilter: v.LeftConditions,
SmallFilter: v.RightConditions,
OtherFilter: append(expression.ScalarFuncs2Exprs(v.EqualConditions), v.OtherConditions...),
outer: v.JoinType != plan.InnerJoin,
defaultValues: defaultValues,
}
}
if defaultValues == nil {
defaultValues = make([]types.Datum, v.Children()[0].Schema().Len())
}
return &NestedLoopJoinExec{
baseExecutor: newBaseExecutor(v.Schema(), b.ctx),
SmallExec: b.build(v.Children()[0]),
BigExec: b.build(v.Children()[1]),
leftSmall: true,
BigFilter: v.RightConditions,
SmallFilter: v.LeftConditions,
OtherFilter: append(expression.ScalarFuncs2Exprs(v.EqualConditions), v.OtherConditions...),
outer: v.JoinType != plan.InnerJoin,
defaultValues: defaultValues,
}
}
func (b *executorBuilder) buildApply(v *plan.PhysicalApply) Executor {
var join joinExec
switch x := v.PhysicalJoin.(type) {
case *plan.PhysicalHashSemiJoin:
join = b.buildSemiJoin(x)
case *plan.PhysicalHashJoin:
if x.JoinType == plan.InnerJoin || x.JoinType == plan.LeftOuterJoin || x.JoinType == plan.RightOuterJoin {
join = b.buildNestedLoopJoin(x)
} else {
b.err = errors.Errorf("Unsupported join type %v in nested loop join", x.JoinType)
}
default:
b.err = errors.Errorf("Unsupported plan type %T in apply", v)
}
apply := &ApplyJoinExec{
baseExecutor: newBaseExecutor(v.Schema(), b.ctx),
join: join,
outerSchema: v.OuterSchema,
}
return apply
}
func (b *executorBuilder) buildExists(v *plan.PhysicalExists) Executor {
childExec := b.build(v.Children()[0])
if b.err != nil {
b.err = errors.Trace(b.err)
return nil
}
e := &ExistsExec{
baseExecutor: newBaseExecutor(v.Schema(), b.ctx, childExec),
}
e.supportChk = true
return e
}
func (b *executorBuilder) buildMaxOneRow(v *plan.PhysicalMaxOneRow) Executor {
childExec := b.build(v.Children()[0])
if b.err != nil {
b.err = errors.Trace(b.err)
return nil
}
return &MaxOneRowExec{
baseExecutor: newBaseExecutor(v.Schema(), b.ctx, childExec),
}
}
func (b *executorBuilder) buildUnionAll(v *plan.PhysicalUnionAll) Executor {
childExecs := make([]Executor, len(v.Children()))
for i, child := range v.Children() {
childExecs[i] = b.build(child)
if b.err != nil {
b.err = errors.Trace(b.err)
return nil
}
}
e := &UnionExec{
baseExecutor: newBaseExecutor(v.Schema(), b.ctx, childExecs...),
}
e.supportChk = true
return e
}
func (b *executorBuilder) buildUpdate(v *plan.Update) Executor {
tblID2table := make(map[int64]table.Table)
for id := range v.SelectPlan.Schema().TblID2Handle {
tblID2table[id], _ = b.is.TableByID(id)
}
selExec := b.build(v.SelectPlan)
if b.err != nil {
b.err = errors.Trace(b.err)
return nil
}
updateExec := &UpdateExec{
baseExecutor: newBaseExecutor(nil, b.ctx, selExec),
SelectExec: selExec,
OrderedList: v.OrderedList,
tblID2table: tblID2table,
IgnoreErr: v.IgnoreErr,
}
updateExec.supportChk = true
return updateExec
}
func (b *executorBuilder) buildDelete(v *plan.Delete) Executor {
tblID2table := make(map[int64]table.Table)
for id := range v.SelectPlan.Schema().TblID2Handle {
tblID2table[id], _ = b.is.TableByID(id)
}
selExec := b.build(v.SelectPlan)
if b.err != nil {
b.err = errors.Trace(b.err)
return nil
}
return &DeleteExec{
baseExecutor: newBaseExecutor(nil, b.ctx),
SelectExec: selExec,
Tables: v.Tables,
IsMultiTable: v.IsMultiTable,
tblID2Table: tblID2table,
}
}
func (b *executorBuilder) buildAnalyzeIndexPushdown(task plan.AnalyzeIndexTask) *AnalyzeIndexExec {
e := &AnalyzeIndexExec{
ctx: b.ctx,
tblInfo: task.TableInfo,
idxInfo: task.IndexInfo,
concurrency: b.ctx.GetSessionVars().IndexSerialScanConcurrency,
priority: b.priority,
analyzePB: &tipb.AnalyzeReq{
Tp: tipb.AnalyzeType_TypeIndex,
StartTs: math.MaxUint64,
Flags: statementContextToFlags(b.ctx.GetSessionVars().StmtCtx),
TimeZoneOffset: timeZoneOffset(b.ctx),
},
}
e.analyzePB.IdxReq = &tipb.AnalyzeIndexReq{
BucketSize: maxBucketSize,
NumColumns: int32(len(task.IndexInfo.Columns)),
}
if !task.IndexInfo.Unique {
depth := int32(defaultCMSketchDepth)
width := int32(defaultCMSketchWidth)
e.analyzePB.IdxReq.CmsketchDepth = &depth
e.analyzePB.IdxReq.CmsketchWidth = &width
}
return e
}
func (b *executorBuilder) buildAnalyzeColumnsPushdown(task plan.AnalyzeColumnsTask) *AnalyzeColumnsExec {
cols := task.ColsInfo
keepOrder := false
if task.PKInfo != nil {
keepOrder = true
cols = append([]*model.ColumnInfo{task.PKInfo}, cols...)
}
e := &AnalyzeColumnsExec{
ctx: b.ctx,
tblInfo: task.TableInfo,
colsInfo: task.ColsInfo,
pkInfo: task.PKInfo,
concurrency: b.ctx.GetSessionVars().DistSQLScanConcurrency,
priority: b.priority,
keepOrder: keepOrder,
analyzePB: &tipb.AnalyzeReq{
Tp: tipb.AnalyzeType_TypeColumn,
StartTs: math.MaxUint64,
Flags: statementContextToFlags(b.ctx.GetSessionVars().StmtCtx),
TimeZoneOffset: timeZoneOffset(b.ctx),
},
}
depth := int32(defaultCMSketchDepth)
width := int32(defaultCMSketchWidth)
e.analyzePB.ColReq = &tipb.AnalyzeColumnsReq{
BucketSize: maxBucketSize,
SampleSize: maxRegionSampleSize,
SketchSize: maxSketchSize,
ColumnsInfo: distsql.ColumnsToProto(cols, task.TableInfo.PKIsHandle),
CmsketchDepth: &depth,
CmsketchWidth: &width,
}
b.err = setPBColumnsDefaultValue(b.ctx, e.analyzePB.ColReq.ColumnsInfo, cols)
return e
}
func (b *executorBuilder) buildAnalyze(v *plan.Analyze) Executor {
e := &AnalyzeExec{
baseExecutor: newBaseExecutor(nil, b.ctx),
tasks: make([]*analyzeTask, 0, len(v.Children())),
}
for _, task := range v.ColTasks {
e.tasks = append(e.tasks, &analyzeTask{
taskType: colTask,
colExec: b.buildAnalyzeColumnsPushdown(task),
})
}
for _, task := range v.IdxTasks {
e.tasks = append(e.tasks, &analyzeTask{
taskType: idxTask,
idxExec: b.buildAnalyzeIndexPushdown(task),
})
}
return e
}
func (b *executorBuilder) constructDAGReq(plans []plan.PhysicalPlan) (*tipb.DAGRequest, error) {
dagReq := &tipb.DAGRequest{}
dagReq.StartTs = b.getStartTS()
dagReq.TimeZoneOffset = timeZoneOffset(b.ctx)
sc := b.ctx.GetSessionVars().StmtCtx
dagReq.Flags = statementContextToFlags(sc)
for _, p := range plans {
execPB, err := p.ToPB(b.ctx)
if err != nil {
return nil, errors.Trace(err)
}
dagReq.Executors = append(dagReq.Executors, execPB)
}
return dagReq, nil
}
func (b *executorBuilder) buildIndexLookUpJoin(v *plan.PhysicalIndexJoin) Executor {
outerExec := b.build(v.Children()[v.OuterIndex])
if b.err != nil {
b.err = errors.Trace(b.err)
return nil
}
if outerExec.supportChunk() {
// All inner data reader supports chunk(TableReader, IndexReader, IndexLookUpReadfer),
// we only need to check outer.
return b.buildNewIndexLookUpJoin(v, outerExec)
}
batchSize := 1
if !v.KeepOrder {
batchSize = b.ctx.GetSessionVars().IndexJoinBatchSize
}
innerExecBuilder := &dataReaderBuilder{v.Children()[1-v.OuterIndex], b}
defaultValues := v.DefaultValues
if defaultValues == nil {
defaultValues = make([]types.Datum, innerExecBuilder.Schema().Len())
}
return &IndexLookUpJoin{
baseExecutor: newBaseExecutor(v.Schema(), b.ctx, outerExec),
outerExec: outerExec,
innerExecBuilder: innerExecBuilder,
outerKeys: v.OuterJoinKeys,
innerKeys: v.InnerJoinKeys,
outerFilter: v.LeftConditions,
innerFilter: v.RightConditions,
resultGenerator: newJoinResultGenerator(b.ctx, v.JoinType, v.OuterIndex == 1, defaultValues, v.OtherConditions, nil, nil),
maxBatchSize: batchSize,
}
}
func (b *executorBuilder) buildNewIndexLookUpJoin(v *plan.PhysicalIndexJoin, outerExec Executor) Executor {
outerFilter, innerFilter := v.LeftConditions, v.RightConditions
leftTypes, rightTypes := v.Children()[0].Schema().GetTypes(), v.Children()[1].Schema().GetTypes()
outerTypes, innerTypes := leftTypes, rightTypes
if v.OuterIndex == 1 {
outerFilter, innerFilter = v.RightConditions, v.LeftConditions
outerTypes, innerTypes = rightTypes, leftTypes
}
defaultValues := v.DefaultValues
if defaultValues == nil {
defaultValues = make([]types.Datum, len(innerTypes))
}
innerPlan := v.Children()[1-v.OuterIndex]
e := &NewIndexLookUpJoin{
baseExecutor: newBaseExecutor(v.Schema(), b.ctx, outerExec),
outerCtx: outerCtx{
rowTypes: outerTypes,
filter: outerFilter,
},
innerCtx: innerCtx{
readerBuilder: &dataReaderBuilder{innerPlan, b},
rowTypes: innerTypes,
filter: innerFilter,
},
workerWg: new(sync.WaitGroup),
resultGenerator: newJoinResultGenerator(b.ctx, v.JoinType, v.OuterIndex == 1, defaultValues, v.OtherConditions, leftTypes, rightTypes),
}
e.supportChk = true
outerKeyCols := make([]int, len(v.OuterJoinKeys))
for i := 0; i < len(v.OuterJoinKeys); i++ {
outerKeyCols[i] = v.OuterJoinKeys[i].Index
}
e.outerCtx.keyCols = outerKeyCols
innerKeyCols := make([]int, len(v.InnerJoinKeys))
for i := 0; i < len(v.InnerJoinKeys); i++ {
innerKeyCols[i] = v.InnerJoinKeys[i].Index
}
e.innerCtx.keyCols = innerKeyCols
e.joinResult = e.newChunk()
return e
}
func buildNoRangeTableReader(b *executorBuilder, v *plan.PhysicalTableReader) (*TableReaderExecutor, error) {
dagReq, err := b.constructDAGReq(v.TablePlans)
if err != nil {
return nil, errors.Trace(err)
}
ts := v.TablePlans[0].(*plan.PhysicalTableScan)
table, _ := b.is.TableByID(ts.Table.ID)
e := &TableReaderExecutor{
baseExecutor: newBaseExecutor(v.Schema(), b.ctx),
dagPB: dagReq,
tableID: ts.Table.ID,
table: table,
keepOrder: ts.KeepOrder,
desc: ts.Desc,
columns: ts.Columns,
priority: b.priority,
}
e.baseExecutor.supportChk = true
for i := range v.Schema().Columns {
dagReq.OutputOffsets = append(dagReq.OutputOffsets, uint32(i))
}
return e, nil
}
func (b *executorBuilder) buildTableReader(v *plan.PhysicalTableReader) *TableReaderExecutor {
ret, err := buildNoRangeTableReader(b, v)
if err != nil {
b.err = errors.Trace(err)
return nil
}
ts := v.TablePlans[0].(*plan.PhysicalTableScan)
ret.ranges = ts.Ranges
return ret
}
func buildNoRangeIndexReader(b *executorBuilder, v *plan.PhysicalIndexReader) (*IndexReaderExecutor, error) {
dagReq, err := b.constructDAGReq(v.IndexPlans)
if err != nil {
return nil, errors.Trace(err)
}
is := v.IndexPlans[0].(*plan.PhysicalIndexScan)
table, _ := b.is.TableByID(is.Table.ID)
e := &IndexReaderExecutor{
baseExecutor: newBaseExecutor(v.Schema(), b.ctx),
dagPB: dagReq,
tableID: is.Table.ID,
table: table,
index: is.Index,
keepOrder: !is.OutOfOrder,
desc: is.Desc,
columns: is.Columns,
priority: b.priority,
}
e.supportChk = true
for _, col := range v.OutputColumns {
dagReq.OutputOffsets = append(dagReq.OutputOffsets, uint32(col.Index))
}
return e, nil
}
func (b *executorBuilder) buildIndexReader(v *plan.PhysicalIndexReader) *IndexReaderExecutor {
ret, err := buildNoRangeIndexReader(b, v)
if err != nil {
b.err = errors.Trace(err)
return nil
}
is := v.IndexPlans[0].(*plan.PhysicalIndexScan)
ret.ranges = is.Ranges
return ret
}
func buildNoRangeIndexLookUpReader(b *executorBuilder, v *plan.PhysicalIndexLookUpReader) (*IndexLookUpExecutor, error) {
indexReq, err := b.constructDAGReq(v.IndexPlans)
if err != nil {
return nil, errors.Trace(err)
}
tableReq, err := b.constructDAGReq(v.TablePlans)
if err != nil {
return nil, errors.Trace(err)
}
is := v.IndexPlans[0].(*plan.PhysicalIndexScan)
indexReq.OutputOffsets = []uint32{uint32(len(is.Index.Columns))}
table, _ := b.is.TableByID(is.Table.ID)
for i := 0; i < v.Schema().Len(); i++ {
tableReq.OutputOffsets = append(tableReq.OutputOffsets, uint32(i))
}
e := &IndexLookUpExecutor{
baseExecutor: newBaseExecutor(v.Schema(), b.ctx),
dagPB: indexReq,
tableID: is.Table.ID,
table: table,
index: is.Index,
keepOrder: !is.OutOfOrder,
desc: is.Desc,
tableRequest: tableReq,
columns: is.Columns,
priority: b.priority,
dataReaderBuilder: &dataReaderBuilder{executorBuilder: b},
}
e.supportChk = true
if cols, ok := v.Schema().TblID2Handle[is.Table.ID]; ok {
e.handleIdx = cols[0].Index
}
return e, nil
}
func (b *executorBuilder) buildIndexLookUpReader(v *plan.PhysicalIndexLookUpReader) *IndexLookUpExecutor {
ret, err := buildNoRangeIndexLookUpReader(b, v)
if err != nil {
b.err = errors.Trace(err)
return nil
}
is := v.IndexPlans[0].(*plan.PhysicalIndexScan)
ret.ranges = is.Ranges
return ret
}
// dataReaderBuilder build an executor.
// The executor can be used to read data in the ranges which are constructed by datums.
// Differences from executorBuilder:
// 1. dataReaderBuilder calculate data range from argument, rather than plan.
// 2. the result executor is already opened.
type dataReaderBuilder struct {
plan.Plan
*executorBuilder
}
func (builder *dataReaderBuilder) buildExecutorForDatums(goCtx goctx.Context, datums [][]types.Datum) (Executor, error) {
switch v := builder.Plan.(type) {
case *plan.PhysicalIndexReader:
return builder.buildIndexReaderForDatums(goCtx, v, datums)
case *plan.PhysicalTableReader:
return builder.buildTableReaderForDatums(goCtx, v, datums)
case *plan.PhysicalIndexLookUpReader:
return builder.buildIndexLookUpReaderForDatums(goCtx, v, datums)
}
return nil, errors.New("Wrong plan type for dataReaderBuilder")
}
func (builder *dataReaderBuilder) buildTableReaderForDatums(goCtx goctx.Context, v *plan.PhysicalTableReader, datums [][]types.Datum) (Executor, error) {
e, err := buildNoRangeTableReader(builder.executorBuilder, v)
if err != nil {
return nil, errors.Trace(err)
}
handles := make([]int64, 0, len(datums))
for _, datum := range datums {
handles = append(handles, datum[0].GetInt64())
}
return builder.buildTableReaderFromHandles(goCtx, e, handles)
}
func (builder *dataReaderBuilder) buildTableReaderFromHandles(goCtx goctx.Context, e *TableReaderExecutor, handles []int64) (Executor, error) {
sort.Sort(sortutil.Int64Slice(handles))
var b requestBuilder
kvReq, err := b.SetTableHandles(e.tableID, handles).
SetDAGRequest(e.dagPB).
SetDesc(e.desc).
SetKeepOrder(e.keepOrder).
SetPriority(e.priority).
SetFromSessionVars(e.ctx.GetSessionVars()).
Build()
if err != nil {
return nil, errors.Trace(err)
}
e.result, err = distsql.SelectDAG(goCtx, builder.ctx, kvReq, e.schema.GetTypes())
if err != nil {
return nil, errors.Trace(err)
}
e.result.Fetch(goCtx)
return e, nil
}
func (builder *dataReaderBuilder) buildIndexReaderForDatums(goCtx goctx.Context, v *plan.PhysicalIndexReader, values [][]types.Datum) (Executor, error) {
e, err := buildNoRangeIndexReader(builder.executorBuilder, v)
if err != nil {
return nil, errors.Trace(err)
}
var b requestBuilder
kvReq, err := b.SetIndexValues(e.tableID, e.index.ID, values).
SetDAGRequest(e.dagPB).
SetDesc(e.desc).
SetKeepOrder(e.keepOrder).
SetPriority(e.priority).
SetFromSessionVars(e.ctx.GetSessionVars()).
Build()
if err != nil {
return nil, errors.Trace(err)
}
e.result, err = distsql.SelectDAG(goCtx, builder.ctx, kvReq, e.schema.GetTypes())
if err != nil {
return nil, errors.Trace(err)
}
e.result.Fetch(goCtx)
return e, nil
}
func (builder *dataReaderBuilder) buildIndexLookUpReaderForDatums(goCtx goctx.Context, v *plan.PhysicalIndexLookUpReader, values [][]types.Datum) (Executor, error) {
e, err := buildNoRangeIndexLookUpReader(builder.executorBuilder, v)
if err != nil {
return nil, errors.Trace(err)
}
kvRanges, err := indexValuesToKVRanges(e.tableID, e.index.ID, values)
if err != nil {
return nil, errors.Trace(err)
}
err = e.open(goCtx, kvRanges)
return e, errors.Trace(err)
}
| executor/builder.go | 1 | https://github.com/pingcap/tidb/commit/b35e024ace8a525b61fd53eae22e9723d06454ca | [
0.9677202105522156,
0.04144486412405968,
0.00016724732995498925,
0.0007422252092510462,
0.15434657037258148
] |
{
"id": 2,
"code_window": [
"\tName string\n",
"}\n",
"\n",
"// Next implements the Executor Next interface.\n",
"func (e *DeallocateExec) Next(goCtx goctx.Context) (Row, error) {\n",
"\tvars := e.ctx.GetSessionVars()\n",
"\tid, ok := vars.PreparedStmtNameToID[e.Name]\n",
"\tif !ok {\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep"
],
"after_edit": [
"\treturn nil, errors.Trace(e.run(goCtx))\n",
"}\n",
"\n",
"// NextChunk implements the Executor NextChunk interface.\n",
"func (e *DeallocateExec) NextChunk(goCtx goctx.Context, chk *chunk.Chunk) error {\n",
"\treturn errors.Trace(e.run(goCtx))\n",
"}\n",
"\n",
"func (e *DeallocateExec) run(goCtx goctx.Context) error {\n"
],
"file_path": "executor/prepared.go",
"type": "add",
"edit_start_line_idx": 240
} | // Copyright 2015 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package plan_test
import (
"github.com/juju/errors"
. "github.com/pingcap/check"
"github.com/pingcap/tidb"
"github.com/pingcap/tidb/context"
"github.com/pingcap/tidb/infoschema"
"github.com/pingcap/tidb/model"
"github.com/pingcap/tidb/parser"
"github.com/pingcap/tidb/plan"
"github.com/pingcap/tidb/terror"
"github.com/pingcap/tidb/types"
"github.com/pingcap/tidb/util/testleak"
goctx "golang.org/x/net/context"
)
var _ = Suite(&testValidatorSuite{})
type testValidatorSuite struct {
}
func (s *testValidatorSuite) TestValidator(c *C) {
defer testleak.AfterTest(c)()
tests := []struct {
sql string
inPrepare bool
err error
}{
{"select ?", false, parser.ErrSyntax},
{"select ?", true, nil},
{"create table t(id int not null auto_increment default 2, key (id))", true,
errors.New("Invalid default value for 'id'")},
{"create table t(id int not null default 2 auto_increment, key (id))", true,
errors.New("Invalid default value for 'id'")},
// Default value can be null when the column is primary key in MySQL 5.6.
// But it can't be null in MySQL 5.7.
{"create table t(id int auto_increment default null, primary key (id))", true, nil},
{"create table t(id int default null auto_increment, primary key (id))", true, nil},
{"create table t(id int not null auto_increment)", true,
errors.New("Incorrect table definition; there can be only one auto column and it must be defined as a key")},
{"create table t(id int not null auto_increment, c int auto_increment, key (id, c))", true,
errors.New("Incorrect table definition; there can be only one auto column and it must be defined as a key")},
{"create table t(id int not null auto_increment, c int, key (c, id))", true,
errors.New("Incorrect table definition; there can be only one auto column and it must be defined as a key")},
{"create table t(id decimal auto_increment, key (id))", true,
errors.New("Incorrect column specifier for column 'id'")},
{"create table t(id float auto_increment, key (id))", true, nil},
{"create table t(id int auto_increment) ENGINE=MYISAM", true, nil},
{"create table t(a int primary key, b int, c varchar(10), d char(256));", true,
errors.New("[types:1074]Column length too big for column 'd' (max = 255); use BLOB or TEXT instead")},
{"create index ib on t(b,a,b);", true, errors.New("[schema:1060]Duplicate column name 'b'")},
{"alter table t add index idx(a, b, A)", true, errors.New("[schema:1060]Duplicate column name 'A'")},
{"create table t (a int, b int, index(a, b, A))", true, errors.New("[schema:1060]Duplicate column name 'A'")},
{"create table t (a int, b int, key(a, b, A))", true, errors.New("[schema:1060]Duplicate column name 'A'")},
{"create table t (a int, b int, unique(a, b, A))", true, errors.New("[schema:1060]Duplicate column name 'A'")},
{"create table t (a int, b int, unique key(a, b, A))", true, errors.New("[schema:1060]Duplicate column name 'A'")},
{"create table t (a int, b int, unique index(a, b, A))", true, errors.New("[schema:1060]Duplicate column name 'A'")},
{"create table t(c1 int not null primary key, c2 int not null primary key)", true,
errors.New("[schema:1068]Multiple primary key defined")},
{"create table t(c1 int not null primary key, c2 int not null, primary key(c1))", true,
errors.New("[schema:1068]Multiple primary key defined")},
{"create table t(c1 int not null, c2 int not null, primary key(c1), primary key(c2))", true,
errors.New("[schema:1068]Multiple primary key defined")},
{"alter table t auto_increment=1", true, errors.New("[autoid:3]No support for setting auto_increment using alter_table")},
{"alter table t add column c int auto_increment key, auto_increment=10", true,
errors.New("[autoid:3]No support for setting auto_increment using alter_table")},
{"alter table t add column c int auto_increment key", true, nil},
{"alter table t add column char4294967295 char(255)", true, nil},
{"create table t (c float(53))", true, nil},
{"alter table t add column c float(53)", true, nil},
{"create table t (c set ('1','2','3','4','5','6','7','8','9','10','11','12','13','14','15','16','17','18','19','20','21','22','23','24','25','26','27','28','29','30','31','32','33','34','35','36','37','38','39','40','41','42','43','44','45','46','47','48','49','50','51','52','53','54','55','56','57','58','59','60','61','62','63','64'))", true, nil},
{"alter table t add column c set ('1','2','3','4','5','6','7','8','9','10','11','12','13','14','15','16','17','18','19','20','21','22','23','24','25','26','27','28','29','30','31','32','33','34','35','36','37','38','39','40','41','42','43','44','45','46','47','48','49','50','51','52','53','54','55','56','57','58','59','60','61','62','63','64')", true, nil},
{"create table t (c varchar(21845) CHARACTER SET utf8)", true, nil},
{"create table t (c varchar(16383) CHARACTER SET utf8mb4)", true, nil},
{"create table t (c varchar(65535) CHARACTER SET ascii)", true, nil},
{"alter table t add column c varchar(21845) CHARACTER SET utf8", true, nil},
{"alter table t add column c varchar(16383) CHARACTER SET utf8mb4", true, nil},
{"alter table t add column c varchar(65535) CHARACTER SET ascii", true, nil},
{"alter table t add column char4294967295 char(4294967295)", true,
errors.New("[types:1074]Column length too big for column 'char4294967295' (max = 255); use BLOB or TEXT instead")},
{"alter table t add column char4294967296 char(4294967296)", true,
errors.New("[types:1439]Display width out of range for column 'char4294967296' (max = 4294967295)")},
{"create table t (c float(4294967296))", true,
errors.New("[types:1439]Display width out of range for column 'c' (max = 4294967295)")},
{"alter table t add column c float(4294967296)", true,
errors.New("[types:1439]Display width out of range for column 'c' (max = 4294967295)")},
{"create table t (c float(54))", true,
errors.New("[types:1063]Incorrect column specifier for column 'c'")},
{"alter table t add column c float(54)", true,
errors.New("[types:1063]Incorrect column specifier for column 'c'")},
{"create table t (set65 set ('1','2','3','4','5','6','7','8','9','10','11','12','13','14','15','16','17','18','19','20','21','22','23','24','25','26','27','28','29','30','31','32','33','34','35','36','37','38','39','40','41','42','43','44','45','46','47','48','49','50','51','52','53','54','55','56','57','58','59','60','61','62','63','64','65'))", true,
errors.New("[types:1097]Too many strings for column set65 and SET")},
{"alter table t add column set65 set ('1','2','3','4','5','6','7','8','9','10','11','12','13','14','15','16','17','18','19','20','21','22','23','24','25','26','27','28','29','30','31','32','33','34','35','36','37','38','39','40','41','42','43','44','45','46','47','48','49','50','51','52','53','54','55','56','57','58','59','60','61','62','63','64','65')", true,
errors.New("[types:1097]Too many strings for column set65 and SET")},
{"create table t (c varchar(4294967295) CHARACTER SET utf8)", true,
errors.New("[types:1074]Column length too big for column 'c' (max = 21845); use BLOB or TEXT instead")},
{"create table t (c varchar(4294967295) CHARACTER SET utf8mb4)", true,
errors.New("[types:1074]Column length too big for column 'c' (max = 16383); use BLOB or TEXT instead")},
{"create table t (c varchar(4294967295) CHARACTER SET ascii)", true,
errors.New("[types:1074]Column length too big for column 'c' (max = 65535); use BLOB or TEXT instead")},
{"alter table t add column c varchar(4294967295) CHARACTER SET utf8", true,
errors.New("[types:1074]Column length too big for column 'c' (max = 21845); use BLOB or TEXT instead")},
{"alter table t add column c varchar(4294967295) CHARACTER SET utf8mb4;", true,
errors.New("[types:1074]Column length too big for column 'c' (max = 16383); use BLOB or TEXT instead")},
{"alter table t add column c varchar(4294967295) CHARACTER SET ascii", true,
errors.New("[types:1074]Column length too big for column 'c' (max = 65535); use BLOB or TEXT instead")},
{"create table `t ` (a int)", true, errors.New("[ddl:1103]Incorrect table name 't '")},
{"create table `` (a int)", true, errors.New("[ddl:1103]Incorrect table name ''")},
{"create table t (`` int)", true, errors.New("[ddl:1166]Incorrect column name ''")},
{"create table t (`a ` int)", true, errors.New("[ddl:1166]Incorrect column name 'a '")},
{"drop table if exists ``", true, errors.New("[ddl:1103]Incorrect table name ''")},
{"drop table `t `", true, errors.New("[ddl:1103]Incorrect table name 't '")},
{"create database ``", true, errors.New("[ddl:1102]Incorrect database name ''")},
{"create database `test `", true, errors.New("[ddl:1102]Incorrect database name 'test '")},
{"drop database ``", true, errors.New("[ddl:1102]Incorrect database name ''")},
{"drop database `test `", true, errors.New("[ddl:1102]Incorrect database name 'test '")},
{"alter table `t ` add column c int", true, errors.New("[ddl:1103]Incorrect table name 't '")},
{"alter table `` add column c int", true, errors.New("[ddl:1103]Incorrect table name ''")},
{"alter table t rename `t ` ", true, errors.New("[ddl:1103]Incorrect table name 't '")},
{"alter table t rename `` ", true, errors.New("[ddl:1103]Incorrect table name ''")},
{"alter table t add column `c ` int", true, errors.New("[ddl:1166]Incorrect column name 'c '")},
{"alter table t add column `` int", true, errors.New("[ddl:1166]Incorrect column name ''")},
{"alter table t change column a `` int", true, errors.New("[ddl:1166]Incorrect column name ''")},
{"alter table t change column a `a ` int", true, errors.New("[ddl:1166]Incorrect column name 'a '")},
{"create index idx on `t ` (a)", true, errors.New("[ddl:1103]Incorrect table name 't '")},
{"create index idx on `` (a)", true, errors.New("[ddl:1103]Incorrect table name ''")},
// issue 3844
{`create table t (a set("a, b", "c, d"))`, true, errors.New("[types:1367]Illegal set 'a, b' value found during parsing")},
{`alter table t add column a set("a, b", "c, d")`, true, errors.New("[types:1367]Illegal set 'a, b' value found during parsing")},
// issue 3843
{"create index `primary` on t (i)", true, errors.New("[ddl:1280]Incorrect index name 'primary'")},
{"alter table t add index `primary` (i)", true, errors.New("[ddl:1280]Incorrect index name 'primary'")},
// issue 2273
{"create table t(a char, b char, c char, d char, e char, f char, g char, h char ,i char, j char, k int, l char ,m char , n char, o char , p char, q char, index(a,b,c,d,e,f,g,h,i,j,k,l,m,n,o,p,q))", true, errors.New("[schema:1070]Too many key parts specified; max 16 parts allowed")},
// issue #4429
{"CREATE TABLE `t` (`a` date DEFAULT now());", false, types.ErrInvalidDefault},
{"CREATE TABLE `t` (`a` timestamp DEFAULT now());", false, nil},
{"CREATE TABLE `t` (`a` datetime DEFAULT now());", false, nil},
{"CREATE TABLE `t` (`a` int DEFAULT now());", false, types.ErrInvalidDefault},
{"CREATE TABLE `t` (`a` float DEFAULT now());", false, types.ErrInvalidDefault},
{"CREATE TABLE `t` (`a` varchar(10) DEFAULT now());", false, types.ErrInvalidDefault},
{"CREATE TABLE `t` (`a` double DEFAULT 1.0 DEFAULT now() DEFAULT 2.0 );", false, nil},
{`explain format = "xx" select 100;`, false, plan.ErrUnknownExplainFormat.GenByArgs("xx")},
// issue 4472
{`select sum(distinct(if('a', (select adddate(elt(999, count(*)), interval 1 day)), .1))) as foo;`, true, nil},
{`select sum(1 in (select count(1)))`, true, nil},
}
store, dom, err := newStoreWithBootstrap()
c.Assert(err, IsNil)
defer func() {
dom.Close()
store.Close()
}()
se, err := tidb.CreateSession4Test(store)
c.Assert(err, IsNil)
_, err = se.Execute(goctx.Background(), "use test")
c.Assert(err, IsNil)
ctx := se.(context.Context)
is := infoschema.MockInfoSchema([]*model.TableInfo{plan.MockTable()})
for _, tt := range tests {
stmts, err1 := tidb.Parse(ctx, tt.sql)
c.Assert(err1, IsNil)
c.Assert(stmts, HasLen, 1)
stmt := stmts[0]
err = plan.Preprocess(ctx, stmt, is, tt.inPrepare)
c.Assert(terror.ErrorEqual(err, tt.err), IsTrue)
}
}
| plan/preprocess_test.go | 0 | https://github.com/pingcap/tidb/commit/b35e024ace8a525b61fd53eae22e9723d06454ca | [
0.0005614881520159543,
0.00019756998517550528,
0.00016683338617440313,
0.00016981166845653206,
0.000090755034761969
] |
{
"id": 2,
"code_window": [
"\tName string\n",
"}\n",
"\n",
"// Next implements the Executor Next interface.\n",
"func (e *DeallocateExec) Next(goCtx goctx.Context) (Row, error) {\n",
"\tvars := e.ctx.GetSessionVars()\n",
"\tid, ok := vars.PreparedStmtNameToID[e.Name]\n",
"\tif !ok {\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep"
],
"after_edit": [
"\treturn nil, errors.Trace(e.run(goCtx))\n",
"}\n",
"\n",
"// NextChunk implements the Executor NextChunk interface.\n",
"func (e *DeallocateExec) NextChunk(goCtx goctx.Context, chk *chunk.Chunk) error {\n",
"\treturn errors.Trace(e.run(goCtx))\n",
"}\n",
"\n",
"func (e *DeallocateExec) run(goCtx goctx.Context) error {\n"
],
"file_path": "executor/prepared.go",
"type": "add",
"edit_start_line_idx": 240
} | // Copyright 2009,2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// NetBSD system calls.
// This file is compiled as ordinary Go code,
// but it is also input to mksyscall,
// which parses the //sys lines and generates system call stubs.
// Note that sometimes we use a lowercase //sys name and wrap
// it in our own nicer implementation, either here or in
// syscall_bsd.go or syscall_unix.go.
package unix
import (
"syscall"
"unsafe"
)
type SockaddrDatalink struct {
Len uint8
Family uint8
Index uint16
Type uint8
Nlen uint8
Alen uint8
Slen uint8
Data [12]int8
raw RawSockaddrDatalink
}
func Syscall9(trap, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, err syscall.Errno)
func sysctlNodes(mib []_C_int) (nodes []Sysctlnode, err error) {
var olen uintptr
// Get a list of all sysctl nodes below the given MIB by performing
// a sysctl for the given MIB with CTL_QUERY appended.
mib = append(mib, CTL_QUERY)
qnode := Sysctlnode{Flags: SYSCTL_VERS_1}
qp := (*byte)(unsafe.Pointer(&qnode))
sz := unsafe.Sizeof(qnode)
if err = sysctl(mib, nil, &olen, qp, sz); err != nil {
return nil, err
}
// Now that we know the size, get the actual nodes.
nodes = make([]Sysctlnode, olen/sz)
np := (*byte)(unsafe.Pointer(&nodes[0]))
if err = sysctl(mib, np, &olen, qp, sz); err != nil {
return nil, err
}
return nodes, nil
}
func nametomib(name string) (mib []_C_int, err error) {
// Split name into components.
var parts []string
last := 0
for i := 0; i < len(name); i++ {
if name[i] == '.' {
parts = append(parts, name[last:i])
last = i + 1
}
}
parts = append(parts, name[last:])
// Discover the nodes and construct the MIB OID.
for partno, part := range parts {
nodes, err := sysctlNodes(mib)
if err != nil {
return nil, err
}
for _, node := range nodes {
n := make([]byte, 0)
for i := range node.Name {
if node.Name[i] != 0 {
n = append(n, byte(node.Name[i]))
}
}
if string(n) == part {
mib = append(mib, _C_int(node.Num))
break
}
}
if len(mib) != partno+1 {
return nil, EINVAL
}
}
return mib, nil
}
// ParseDirent parses up to max directory entries in buf,
// appending the names to names. It returns the number
// bytes consumed from buf, the number of entries added
// to names, and the new names slice.
func ParseDirent(buf []byte, max int, names []string) (consumed int, count int, newnames []string) {
origlen := len(buf)
for max != 0 && len(buf) > 0 {
dirent := (*Dirent)(unsafe.Pointer(&buf[0]))
if dirent.Reclen == 0 {
buf = nil
break
}
buf = buf[dirent.Reclen:]
if dirent.Fileno == 0 { // File absent in directory.
continue
}
bytes := (*[10000]byte)(unsafe.Pointer(&dirent.Name[0]))
var name = string(bytes[0:dirent.Namlen])
if name == "." || name == ".." { // Useless names
continue
}
max--
count++
names = append(names, name)
}
return origlen - len(buf), count, names
}
//sysnb pipe() (fd1 int, fd2 int, err error)
func Pipe(p []int) (err error) {
if len(p) != 2 {
return EINVAL
}
p[0], p[1], err = pipe()
return
}
//sys getdents(fd int, buf []byte) (n int, err error)
func Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) {
return getdents(fd, buf)
}
// TODO
func sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) {
return -1, ENOSYS
}
/*
* Exposed directly
*/
//sys Access(path string, mode uint32) (err error)
//sys Adjtime(delta *Timeval, olddelta *Timeval) (err error)
//sys Chdir(path string) (err error)
//sys Chflags(path string, flags int) (err error)
//sys Chmod(path string, mode uint32) (err error)
//sys Chown(path string, uid int, gid int) (err error)
//sys Chroot(path string) (err error)
//sys Close(fd int) (err error)
//sys Dup(fd int) (nfd int, err error)
//sys Dup2(from int, to int) (err error)
//sys Exit(code int)
//sys Fchdir(fd int) (err error)
//sys Fchflags(fd int, flags int) (err error)
//sys Fchmod(fd int, mode uint32) (err error)
//sys Fchown(fd int, uid int, gid int) (err error)
//sys Flock(fd int, how int) (err error)
//sys Fpathconf(fd int, name int) (val int, err error)
//sys Fstat(fd int, stat *Stat_t) (err error)
//sys Fsync(fd int) (err error)
//sys Ftruncate(fd int, length int64) (err error)
//sysnb Getegid() (egid int)
//sysnb Geteuid() (uid int)
//sysnb Getgid() (gid int)
//sysnb Getpgid(pid int) (pgid int, err error)
//sysnb Getpgrp() (pgrp int)
//sysnb Getpid() (pid int)
//sysnb Getppid() (ppid int)
//sys Getpriority(which int, who int) (prio int, err error)
//sysnb Getrlimit(which int, lim *Rlimit) (err error)
//sysnb Getrusage(who int, rusage *Rusage) (err error)
//sysnb Getsid(pid int) (sid int, err error)
//sysnb Gettimeofday(tv *Timeval) (err error)
//sysnb Getuid() (uid int)
//sys Issetugid() (tainted bool)
//sys Kill(pid int, signum syscall.Signal) (err error)
//sys Kqueue() (fd int, err error)
//sys Lchown(path string, uid int, gid int) (err error)
//sys Link(path string, link string) (err error)
//sys Listen(s int, backlog int) (err error)
//sys Lstat(path string, stat *Stat_t) (err error)
//sys Mkdir(path string, mode uint32) (err error)
//sys Mkfifo(path string, mode uint32) (err error)
//sys Mknod(path string, mode uint32, dev int) (err error)
//sys Mlock(b []byte) (err error)
//sys Mlockall(flags int) (err error)
//sys Mprotect(b []byte, prot int) (err error)
//sys Munlock(b []byte) (err error)
//sys Munlockall() (err error)
//sys Nanosleep(time *Timespec, leftover *Timespec) (err error)
//sys Open(path string, mode int, perm uint32) (fd int, err error)
//sys Pathconf(path string, name int) (val int, err error)
//sys Pread(fd int, p []byte, offset int64) (n int, err error)
//sys Pwrite(fd int, p []byte, offset int64) (n int, err error)
//sys read(fd int, p []byte) (n int, err error)
//sys Readlink(path string, buf []byte) (n int, err error)
//sys Rename(from string, to string) (err error)
//sys Revoke(path string) (err error)
//sys Rmdir(path string) (err error)
//sys Seek(fd int, offset int64, whence int) (newoffset int64, err error) = SYS_LSEEK
//sys Select(n int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (err error)
//sysnb Setegid(egid int) (err error)
//sysnb Seteuid(euid int) (err error)
//sysnb Setgid(gid int) (err error)
//sysnb Setpgid(pid int, pgid int) (err error)
//sys Setpriority(which int, who int, prio int) (err error)
//sysnb Setregid(rgid int, egid int) (err error)
//sysnb Setreuid(ruid int, euid int) (err error)
//sysnb Setrlimit(which int, lim *Rlimit) (err error)
//sysnb Setsid() (pid int, err error)
//sysnb Settimeofday(tp *Timeval) (err error)
//sysnb Setuid(uid int) (err error)
//sys Stat(path string, stat *Stat_t) (err error)
//sys Symlink(path string, link string) (err error)
//sys Sync() (err error)
//sys Truncate(path string, length int64) (err error)
//sys Umask(newmask int) (oldmask int)
//sys Unlink(path string) (err error)
//sys Unmount(path string, flags int) (err error)
//sys write(fd int, p []byte) (n int, err error)
//sys mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (ret uintptr, err error)
//sys munmap(addr uintptr, length uintptr) (err error)
//sys readlen(fd int, buf *byte, nbuf int) (n int, err error) = SYS_READ
//sys writelen(fd int, buf *byte, nbuf int) (n int, err error) = SYS_WRITE
/*
* Unimplemented
*/
// ____semctl13
// __clone
// __fhopen40
// __fhstat40
// __fhstatvfs140
// __fstat30
// __getcwd
// __getfh30
// __getlogin
// __lstat30
// __mount50
// __msgctl13
// __msync13
// __ntp_gettime30
// __posix_chown
// __posix_fadvise50
// __posix_fchown
// __posix_lchown
// __posix_rename
// __setlogin
// __shmctl13
// __sigaction_sigtramp
// __sigaltstack14
// __sigpending14
// __sigprocmask14
// __sigsuspend14
// __sigtimedwait
// __stat30
// __syscall
// __vfork14
// _ksem_close
// _ksem_destroy
// _ksem_getvalue
// _ksem_init
// _ksem_open
// _ksem_post
// _ksem_trywait
// _ksem_unlink
// _ksem_wait
// _lwp_continue
// _lwp_create
// _lwp_ctl
// _lwp_detach
// _lwp_exit
// _lwp_getname
// _lwp_getprivate
// _lwp_kill
// _lwp_park
// _lwp_self
// _lwp_setname
// _lwp_setprivate
// _lwp_suspend
// _lwp_unpark
// _lwp_unpark_all
// _lwp_wait
// _lwp_wakeup
// _pset_bind
// _sched_getaffinity
// _sched_getparam
// _sched_setaffinity
// _sched_setparam
// acct
// aio_cancel
// aio_error
// aio_fsync
// aio_read
// aio_return
// aio_suspend
// aio_write
// break
// clock_getres
// clock_gettime
// clock_settime
// compat_09_ogetdomainname
// compat_09_osetdomainname
// compat_09_ouname
// compat_10_omsgsys
// compat_10_osemsys
// compat_10_oshmsys
// compat_12_fstat12
// compat_12_getdirentries
// compat_12_lstat12
// compat_12_msync
// compat_12_oreboot
// compat_12_oswapon
// compat_12_stat12
// compat_13_sigaction13
// compat_13_sigaltstack13
// compat_13_sigpending13
// compat_13_sigprocmask13
// compat_13_sigreturn13
// compat_13_sigsuspend13
// compat_14___semctl
// compat_14_msgctl
// compat_14_shmctl
// compat_16___sigaction14
// compat_16___sigreturn14
// compat_20_fhstatfs
// compat_20_fstatfs
// compat_20_getfsstat
// compat_20_statfs
// compat_30___fhstat30
// compat_30___fstat13
// compat_30___lstat13
// compat_30___stat13
// compat_30_fhopen
// compat_30_fhstat
// compat_30_fhstatvfs1
// compat_30_getdents
// compat_30_getfh
// compat_30_ntp_gettime
// compat_30_socket
// compat_40_mount
// compat_43_fstat43
// compat_43_lstat43
// compat_43_oaccept
// compat_43_ocreat
// compat_43_oftruncate
// compat_43_ogetdirentries
// compat_43_ogetdtablesize
// compat_43_ogethostid
// compat_43_ogethostname
// compat_43_ogetkerninfo
// compat_43_ogetpagesize
// compat_43_ogetpeername
// compat_43_ogetrlimit
// compat_43_ogetsockname
// compat_43_okillpg
// compat_43_olseek
// compat_43_ommap
// compat_43_oquota
// compat_43_orecv
// compat_43_orecvfrom
// compat_43_orecvmsg
// compat_43_osend
// compat_43_osendmsg
// compat_43_osethostid
// compat_43_osethostname
// compat_43_osetrlimit
// compat_43_osigblock
// compat_43_osigsetmask
// compat_43_osigstack
// compat_43_osigvec
// compat_43_otruncate
// compat_43_owait
// compat_43_stat43
// execve
// extattr_delete_fd
// extattr_delete_file
// extattr_delete_link
// extattr_get_fd
// extattr_get_file
// extattr_get_link
// extattr_list_fd
// extattr_list_file
// extattr_list_link
// extattr_set_fd
// extattr_set_file
// extattr_set_link
// extattrctl
// fchroot
// fdatasync
// fgetxattr
// fktrace
// flistxattr
// fork
// fremovexattr
// fsetxattr
// fstatvfs1
// fsync_range
// getcontext
// getitimer
// getvfsstat
// getxattr
// ioctl
// ktrace
// lchflags
// lchmod
// lfs_bmapv
// lfs_markv
// lfs_segclean
// lfs_segwait
// lgetxattr
// lio_listio
// listxattr
// llistxattr
// lremovexattr
// lseek
// lsetxattr
// lutimes
// madvise
// mincore
// minherit
// modctl
// mq_close
// mq_getattr
// mq_notify
// mq_open
// mq_receive
// mq_send
// mq_setattr
// mq_timedreceive
// mq_timedsend
// mq_unlink
// mremap
// msgget
// msgrcv
// msgsnd
// nfssvc
// ntp_adjtime
// pmc_control
// pmc_get_info
// poll
// pollts
// preadv
// profil
// pselect
// pset_assign
// pset_create
// pset_destroy
// ptrace
// pwritev
// quotactl
// rasctl
// readv
// reboot
// removexattr
// sa_enable
// sa_preempt
// sa_register
// sa_setconcurrency
// sa_stacks
// sa_yield
// sbrk
// sched_yield
// semconfig
// semget
// semop
// setcontext
// setitimer
// setxattr
// shmat
// shmdt
// shmget
// sstk
// statvfs1
// swapctl
// sysarch
// syscall
// timer_create
// timer_delete
// timer_getoverrun
// timer_gettime
// timer_settime
// undelete
// utrace
// uuidgen
// vadvise
// vfork
// writev
| _vendor/src/golang.org/x/sys/unix/syscall_netbsd.go | 0 | https://github.com/pingcap/tidb/commit/b35e024ace8a525b61fd53eae22e9723d06454ca | [
0.0003909270453732461,
0.00017845052934717387,
0.00016482904902659357,
0.00017202028539031744,
0.00003369623300386593
] |
{
"id": 2,
"code_window": [
"\tName string\n",
"}\n",
"\n",
"// Next implements the Executor Next interface.\n",
"func (e *DeallocateExec) Next(goCtx goctx.Context) (Row, error) {\n",
"\tvars := e.ctx.GetSessionVars()\n",
"\tid, ok := vars.PreparedStmtNameToID[e.Name]\n",
"\tif !ok {\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep"
],
"after_edit": [
"\treturn nil, errors.Trace(e.run(goCtx))\n",
"}\n",
"\n",
"// NextChunk implements the Executor NextChunk interface.\n",
"func (e *DeallocateExec) NextChunk(goCtx goctx.Context, chk *chunk.Chunk) error {\n",
"\treturn errors.Trace(e.run(goCtx))\n",
"}\n",
"\n",
"func (e *DeallocateExec) run(goCtx goctx.Context) error {\n"
],
"file_path": "executor/prepared.go",
"type": "add",
"edit_start_line_idx": 240
} | #!/bin/bash
# This script is used to checkout a TiDB PR branch in a forked repo.
if test -z $1; then
echo -e "Usage:\n"
echo -e "\tcheckout-pr-branch.sh [github-username]:[pr-branch]\n"
echo -e "The argument can be copied directly from github PR page."
echo -e "The local branch name would be [github-username]/[pr-branch]."
exit 0;
fi
username=$(echo $1 | cut -d':' -f1)
branch=$(echo $1 | cut -d':' -f2)
local_branch=$username/$branch
fork="https://github.com/$username/tidb"
exists=`git show-ref refs/heads/$local_branch`
if [ -n "$exists" ]; then
git pull $fork $branch:$local_branch
else
git fetch $fork $branch:$local_branch
git checkout $local_branch
fi
| checkout-pr-branch.sh | 0 | https://github.com/pingcap/tidb/commit/b35e024ace8a525b61fd53eae22e9723d06454ca | [
0.00017655918782111257,
0.00017369621491525322,
0.00017136470705736428,
0.00017316477897111326,
0.0000021536764052143553
] |
{
"id": 3,
"code_window": [
"\tvars := e.ctx.GetSessionVars()\n",
"\tid, ok := vars.PreparedStmtNameToID[e.Name]\n",
"\tif !ok {\n",
"\t\treturn nil, errors.Trace(plan.ErrStmtNotFound)\n",
"\t}\n",
"\tdelete(vars.PreparedStmtNameToID, e.Name)\n",
"\tdelete(vars.PreparedStmts, id)\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\treturn errors.Trace(plan.ErrStmtNotFound)\n"
],
"file_path": "executor/prepared.go",
"type": "replace",
"edit_start_line_idx": 243
} | // Copyright 2015 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package executor
import (
"math"
"sort"
"github.com/juju/errors"
"github.com/pingcap/tidb/ast"
"github.com/pingcap/tidb/context"
"github.com/pingcap/tidb/expression"
"github.com/pingcap/tidb/infoschema"
"github.com/pingcap/tidb/kv"
"github.com/pingcap/tidb/parser"
"github.com/pingcap/tidb/plan"
"github.com/pingcap/tidb/sessionctx/stmtctx"
"github.com/pingcap/tidb/types"
"github.com/pingcap/tidb/util/chunk"
"github.com/pingcap/tidb/util/sqlexec"
goctx "golang.org/x/net/context"
)
var (
_ Executor = &DeallocateExec{}
_ Executor = &ExecuteExec{}
_ Executor = &PrepareExec{}
)
type paramMarkerSorter struct {
markers []*ast.ParamMarkerExpr
}
func (p *paramMarkerSorter) Len() int {
return len(p.markers)
}
func (p *paramMarkerSorter) Less(i, j int) bool {
return p.markers[i].Offset < p.markers[j].Offset
}
func (p *paramMarkerSorter) Swap(i, j int) {
p.markers[i], p.markers[j] = p.markers[j], p.markers[i]
}
type paramMarkerExtractor struct {
markers []*ast.ParamMarkerExpr
}
func (e *paramMarkerExtractor) Enter(in ast.Node) (ast.Node, bool) {
return in, false
}
func (e *paramMarkerExtractor) Leave(in ast.Node) (ast.Node, bool) {
if x, ok := in.(*ast.ParamMarkerExpr); ok {
e.markers = append(e.markers, x)
}
return in, true
}
// PrepareExec represents a PREPARE executor.
type PrepareExec struct {
baseExecutor
is infoschema.InfoSchema
name string
sqlText string
ID uint32
ParamCount int
Fields []*ast.ResultField
}
// NewPrepareExec creates a new PrepareExec.
func NewPrepareExec(ctx context.Context, is infoschema.InfoSchema, sqlTxt string) *PrepareExec {
return &PrepareExec{
baseExecutor: newBaseExecutor(nil, ctx),
is: is,
sqlText: sqlTxt,
}
}
// Next implements the Executor Next interface.
func (e *PrepareExec) Next(goCtx goctx.Context) (Row, error) {
return nil, errors.Trace(e.DoPrepare())
}
// NextChunk implements the Executor NextChunk interface.
func (e *PrepareExec) NextChunk(goCtx goctx.Context, chk *chunk.Chunk) error {
return errors.Trace(e.DoPrepare())
}
// DoPrepare prepares the statement, it can be called multiple times without side effect.
func (e *PrepareExec) DoPrepare() error {
vars := e.ctx.GetSessionVars()
if e.ID != 0 {
// Must be the case when we retry a prepare.
// Make sure it is idempotent.
_, ok := vars.PreparedStmts[e.ID]
if ok {
return nil
}
}
charset, collation := vars.GetCharsetInfo()
var (
stmts []ast.StmtNode
err error
)
if sqlParser, ok := e.ctx.(sqlexec.SQLParser); ok {
stmts, err = sqlParser.ParseSQL(e.sqlText, charset, collation)
} else {
stmts, err = parser.New().Parse(e.sqlText, charset, collation)
}
if err != nil {
return errors.Trace(err)
}
if len(stmts) != 1 {
return ErrPrepareMulti
}
stmt := stmts[0]
if _, ok := stmt.(ast.DDLNode); ok {
return ErrPrepareDDL
}
var extractor paramMarkerExtractor
stmt.Accept(&extractor)
err = plan.Preprocess(e.ctx, stmt, e.is, true)
if err != nil {
return errors.Trace(err)
}
// The parameter markers are appended in visiting order, which may not
// be the same as the position order in the query string. We need to
// sort it by position.
sorter := ¶mMarkerSorter{markers: extractor.markers}
sort.Sort(sorter)
e.ParamCount = len(sorter.markers)
for i := 0; i < e.ParamCount; i++ {
sorter.markers[i].Order = i
}
prepared := &plan.Prepared{
Stmt: stmt,
Params: sorter.markers,
SchemaVersion: e.is.SchemaMetaVersion(),
}
prepared.UseCache = plan.PreparedPlanCacheEnabled && plan.Cacheable(stmt)
// We try to build the real statement of preparedStmt.
for i := range prepared.Params {
prepared.Params[i].SetDatum(types.NewIntDatum(0))
}
_, err = plan.BuildLogicalPlan(e.ctx, stmt, e.is)
if err != nil {
return errors.Trace(err)
}
if e.ID == 0 {
e.ID = vars.GetNextPreparedStmtID()
}
if e.name != "" {
vars.PreparedStmtNameToID[e.name] = e.ID
}
vars.PreparedStmts[e.ID] = prepared
return nil
}
// ExecuteExec represents an EXECUTE executor.
// It cannot be executed by itself, all it needs to do is to build
// another Executor from a prepared statement.
type ExecuteExec struct {
baseExecutor
IS infoschema.InfoSchema
Name string
UsingVars []expression.Expression
ID uint32
StmtExec Executor
Stmt ast.StmtNode
Plan plan.Plan
}
// Next implements the Executor Next interface.
func (e *ExecuteExec) Next(goCtx goctx.Context) (Row, error) {
// Will never be called.
return nil, nil
}
// Open implements the Executor Open interface.
func (e *ExecuteExec) Open(goCtx goctx.Context) error {
return nil
}
// Close implements Executor Close interface.
func (e *ExecuteExec) Close() error {
// Will never be called.
return nil
}
// Build builds a prepared statement into an executor.
// After Build, e.StmtExec will be used to do the real execution.
func (e *ExecuteExec) Build() error {
var err error
if IsPointGetWithPKOrUniqueKeyByAutoCommit(e.ctx, e.Plan) {
err = e.ctx.InitTxnWithStartTS(math.MaxUint64)
} else {
err = e.ctx.ActivePendingTxn()
}
if err != nil {
return errors.Trace(err)
}
b := newExecutorBuilder(e.ctx, e.IS, kv.PriorityNormal)
stmtExec := b.build(e.Plan)
if b.err != nil {
return errors.Trace(b.err)
}
e.StmtExec = stmtExec
ResetStmtCtx(e.ctx, e.Stmt)
stmtCount(e.Stmt, e.Plan, e.ctx.GetSessionVars().InRestrictedSQL)
return nil
}
// DeallocateExec represent a DEALLOCATE executor.
type DeallocateExec struct {
baseExecutor
Name string
}
// Next implements the Executor Next interface.
func (e *DeallocateExec) Next(goCtx goctx.Context) (Row, error) {
vars := e.ctx.GetSessionVars()
id, ok := vars.PreparedStmtNameToID[e.Name]
if !ok {
return nil, errors.Trace(plan.ErrStmtNotFound)
}
delete(vars.PreparedStmtNameToID, e.Name)
delete(vars.PreparedStmts, id)
return nil, nil
}
// Close implements Executor Close interface.
func (e *DeallocateExec) Close() error {
return nil
}
// Open implements Executor Open interface.
func (e *DeallocateExec) Open(goCtx goctx.Context) error {
return nil
}
// CompileExecutePreparedStmt compiles a session Execute command to a stmt.Statement.
func CompileExecutePreparedStmt(ctx context.Context, ID uint32, args ...interface{}) (ast.Statement, error) {
execStmt := &ast.ExecuteStmt{ExecID: ID}
execStmt.UsingVars = make([]ast.ExprNode, len(args))
for i, val := range args {
execStmt.UsingVars[i] = ast.NewValueExpr(val)
}
is := GetInfoSchema(ctx)
execPlan, err := plan.Optimize(ctx, execStmt, is)
if err != nil {
return nil, errors.Trace(err)
}
stmt := &ExecStmt{
InfoSchema: GetInfoSchema(ctx),
Plan: execPlan,
StmtNode: execStmt,
Ctx: ctx,
}
if prepared, ok := ctx.GetSessionVars().PreparedStmts[ID].(*plan.Prepared); ok {
stmt.Text = prepared.Stmt.Text()
}
return stmt, nil
}
// ResetStmtCtx resets the StmtContext.
// Before every execution, we must clear statement context.
func ResetStmtCtx(ctx context.Context, s ast.StmtNode) {
sessVars := ctx.GetSessionVars()
sc := new(stmtctx.StatementContext)
sc.TimeZone = sessVars.GetTimeZone()
switch stmt := s.(type) {
case *ast.UpdateStmt:
sc.IgnoreTruncate = false
sc.OverflowAsWarning = false
sc.TruncateAsWarning = !sessVars.StrictSQLMode || stmt.IgnoreErr
sc.InUpdateOrDeleteStmt = true
sc.DividedByZeroAsWarning = stmt.IgnoreErr
sc.IgnoreZeroInDate = !sessVars.StrictSQLMode || stmt.IgnoreErr
case *ast.DeleteStmt:
sc.IgnoreTruncate = false
sc.OverflowAsWarning = false
sc.TruncateAsWarning = !sessVars.StrictSQLMode || stmt.IgnoreErr
sc.InUpdateOrDeleteStmt = true
sc.DividedByZeroAsWarning = stmt.IgnoreErr
sc.IgnoreZeroInDate = !sessVars.StrictSQLMode || stmt.IgnoreErr
case *ast.InsertStmt:
sc.IgnoreTruncate = false
sc.TruncateAsWarning = !sessVars.StrictSQLMode || stmt.IgnoreErr
sc.InInsertStmt = true
sc.DividedByZeroAsWarning = stmt.IgnoreErr
sc.IgnoreZeroInDate = !sessVars.StrictSQLMode || stmt.IgnoreErr
case *ast.CreateTableStmt, *ast.AlterTableStmt:
// Make sure the sql_mode is strict when checking column default value.
sc.IgnoreTruncate = false
sc.OverflowAsWarning = false
sc.TruncateAsWarning = false
case *ast.LoadDataStmt:
sc.IgnoreTruncate = false
sc.OverflowAsWarning = false
sc.TruncateAsWarning = !sessVars.StrictSQLMode
case *ast.SelectStmt:
sc.InSelectStmt = true
// see https://dev.mysql.com/doc/refman/5.7/en/sql-mode.html#sql-mode-strict
// said "For statements such as SELECT that do not change data, invalid values
// generate a warning in strict mode, not an error."
// and https://dev.mysql.com/doc/refman/5.7/en/out-of-range-and-overflow.html
sc.OverflowAsWarning = true
// Return warning for truncate error in selection.
sc.IgnoreTruncate = false
sc.TruncateAsWarning = true
sc.IgnoreZeroInDate = true
if opts := stmt.SelectStmtOpts; opts != nil {
sc.Priority = opts.Priority
sc.NotFillCache = !opts.SQLCache
}
sc.PadCharToFullLength = ctx.GetSessionVars().SQLMode.HasPadCharToFullLengthMode()
default:
sc.IgnoreTruncate = true
sc.OverflowAsWarning = false
if show, ok := s.(*ast.ShowStmt); ok {
if show.Tp == ast.ShowWarnings {
sc.InShowWarning = true
sc.SetWarnings(sessVars.StmtCtx.GetWarnings())
}
}
sc.IgnoreZeroInDate = true
}
if sessVars.LastInsertID > 0 {
sessVars.PrevLastInsertID = sessVars.LastInsertID
sessVars.LastInsertID = 0
}
sessVars.ResetPrevAffectedRows()
sessVars.InsertID = 0
sessVars.StmtCtx = sc
}
| executor/prepared.go | 1 | https://github.com/pingcap/tidb/commit/b35e024ace8a525b61fd53eae22e9723d06454ca | [
0.9984708428382874,
0.08756978064775467,
0.00016030747792683542,
0.0003491586248856038,
0.267147421836853
] |
{
"id": 3,
"code_window": [
"\tvars := e.ctx.GetSessionVars()\n",
"\tid, ok := vars.PreparedStmtNameToID[e.Name]\n",
"\tif !ok {\n",
"\t\treturn nil, errors.Trace(plan.ErrStmtNotFound)\n",
"\t}\n",
"\tdelete(vars.PreparedStmtNameToID, e.Name)\n",
"\tdelete(vars.PreparedStmts, id)\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\treturn errors.Trace(plan.ErrStmtNotFound)\n"
],
"file_path": "executor/prepared.go",
"type": "replace",
"edit_start_line_idx": 243
} | // mksyscall.pl syscall_linux.go syscall_linux_amd64.go
// MACHINE GENERATED BY THE COMMAND ABOVE; DO NOT EDIT
// +build amd64,linux
package unix
import (
"syscall"
"unsafe"
)
var _ syscall.Errno
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Linkat(olddirfd int, oldpath string, newdirfd int, newpath string, flags int) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(oldpath)
if err != nil {
return
}
var _p1 *byte
_p1, err = BytePtrFromString(newpath)
if err != nil {
return
}
_, _, e1 := Syscall6(SYS_LINKAT, uintptr(olddirfd), uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1)), uintptr(flags), 0)
use(unsafe.Pointer(_p0))
use(unsafe.Pointer(_p1))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func openat(dirfd int, path string, flags int, mode uint32) (fd int, err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
if err != nil {
return
}
r0, _, e1 := Syscall6(SYS_OPENAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags), uintptr(mode), 0, 0)
use(unsafe.Pointer(_p0))
fd = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func ppoll(fds *PollFd, nfds int, timeout *Timespec, sigmask *Sigset_t) (n int, err error) {
r0, _, e1 := Syscall6(SYS_PPOLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask)), 0, 0)
n = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func readlinkat(dirfd int, path string, buf []byte) (n int, err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
if err != nil {
return
}
var _p1 unsafe.Pointer
if len(buf) > 0 {
_p1 = unsafe.Pointer(&buf[0])
} else {
_p1 = unsafe.Pointer(&_zero)
}
r0, _, e1 := Syscall6(SYS_READLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf)), 0, 0)
use(unsafe.Pointer(_p0))
n = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func symlinkat(oldpath string, newdirfd int, newpath string) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(oldpath)
if err != nil {
return
}
var _p1 *byte
_p1, err = BytePtrFromString(newpath)
if err != nil {
return
}
_, _, e1 := Syscall(SYS_SYMLINKAT, uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1)))
use(unsafe.Pointer(_p0))
use(unsafe.Pointer(_p1))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func unlinkat(dirfd int, path string, flags int) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
if err != nil {
return
}
_, _, e1 := Syscall(SYS_UNLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags))
use(unsafe.Pointer(_p0))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func utimes(path string, times *[2]Timeval) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
if err != nil {
return
}
_, _, e1 := Syscall(SYS_UTIMES, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), 0)
use(unsafe.Pointer(_p0))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
if err != nil {
return
}
_, _, e1 := Syscall6(SYS_UTIMENSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), uintptr(flags), 0, 0)
use(unsafe.Pointer(_p0))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func futimesat(dirfd int, path *byte, times *[2]Timeval) (err error) {
_, _, e1 := Syscall(SYS_FUTIMESAT, uintptr(dirfd), uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(times)))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Getcwd(buf []byte) (n int, err error) {
var _p0 unsafe.Pointer
if len(buf) > 0 {
_p0 = unsafe.Pointer(&buf[0])
} else {
_p0 = unsafe.Pointer(&_zero)
}
r0, _, e1 := Syscall(SYS_GETCWD, uintptr(_p0), uintptr(len(buf)), 0)
n = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func wait4(pid int, wstatus *_C_int, options int, rusage *Rusage) (wpid int, err error) {
r0, _, e1 := Syscall6(SYS_WAIT4, uintptr(pid), uintptr(unsafe.Pointer(wstatus)), uintptr(options), uintptr(unsafe.Pointer(rusage)), 0, 0)
wpid = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) {
_, _, e1 := Syscall6(SYS_PTRACE, uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func reboot(magic1 uint, magic2 uint, cmd int, arg string) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(arg)
if err != nil {
return
}
_, _, e1 := Syscall6(SYS_REBOOT, uintptr(magic1), uintptr(magic2), uintptr(cmd), uintptr(unsafe.Pointer(_p0)), 0, 0)
use(unsafe.Pointer(_p0))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func mount(source string, target string, fstype string, flags uintptr, data *byte) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(source)
if err != nil {
return
}
var _p1 *byte
_p1, err = BytePtrFromString(target)
if err != nil {
return
}
var _p2 *byte
_p2, err = BytePtrFromString(fstype)
if err != nil {
return
}
_, _, e1 := Syscall6(SYS_MOUNT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(unsafe.Pointer(_p2)), uintptr(flags), uintptr(unsafe.Pointer(data)), 0)
use(unsafe.Pointer(_p0))
use(unsafe.Pointer(_p1))
use(unsafe.Pointer(_p2))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Acct(path string) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
if err != nil {
return
}
_, _, e1 := Syscall(SYS_ACCT, uintptr(unsafe.Pointer(_p0)), 0, 0)
use(unsafe.Pointer(_p0))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Adjtimex(buf *Timex) (state int, err error) {
r0, _, e1 := Syscall(SYS_ADJTIMEX, uintptr(unsafe.Pointer(buf)), 0, 0)
state = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Chdir(path string) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
if err != nil {
return
}
_, _, e1 := Syscall(SYS_CHDIR, uintptr(unsafe.Pointer(_p0)), 0, 0)
use(unsafe.Pointer(_p0))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Chroot(path string) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
if err != nil {
return
}
_, _, e1 := Syscall(SYS_CHROOT, uintptr(unsafe.Pointer(_p0)), 0, 0)
use(unsafe.Pointer(_p0))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func ClockGettime(clockid int32, time *Timespec) (err error) {
_, _, e1 := Syscall(SYS_CLOCK_GETTIME, uintptr(clockid), uintptr(unsafe.Pointer(time)), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Close(fd int) (err error) {
_, _, e1 := Syscall(SYS_CLOSE, uintptr(fd), 0, 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Dup(oldfd int) (fd int, err error) {
r0, _, e1 := Syscall(SYS_DUP, uintptr(oldfd), 0, 0)
fd = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Dup3(oldfd int, newfd int, flags int) (err error) {
_, _, e1 := Syscall(SYS_DUP3, uintptr(oldfd), uintptr(newfd), uintptr(flags))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func EpollCreate(size int) (fd int, err error) {
r0, _, e1 := RawSyscall(SYS_EPOLL_CREATE, uintptr(size), 0, 0)
fd = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func EpollCreate1(flag int) (fd int, err error) {
r0, _, e1 := RawSyscall(SYS_EPOLL_CREATE1, uintptr(flag), 0, 0)
fd = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func EpollCtl(epfd int, op int, fd int, event *EpollEvent) (err error) {
_, _, e1 := RawSyscall6(SYS_EPOLL_CTL, uintptr(epfd), uintptr(op), uintptr(fd), uintptr(unsafe.Pointer(event)), 0, 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Exit(code int) {
Syscall(SYS_EXIT_GROUP, uintptr(code), 0, 0)
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Faccessat(dirfd int, path string, mode uint32, flags int) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
if err != nil {
return
}
_, _, e1 := Syscall6(SYS_FACCESSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0)
use(unsafe.Pointer(_p0))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Fallocate(fd int, mode uint32, off int64, len int64) (err error) {
_, _, e1 := Syscall6(SYS_FALLOCATE, uintptr(fd), uintptr(mode), uintptr(off), uintptr(len), 0, 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Fchdir(fd int) (err error) {
_, _, e1 := Syscall(SYS_FCHDIR, uintptr(fd), 0, 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Fchmod(fd int, mode uint32) (err error) {
_, _, e1 := Syscall(SYS_FCHMOD, uintptr(fd), uintptr(mode), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
if err != nil {
return
}
_, _, e1 := Syscall6(SYS_FCHMODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0)
use(unsafe.Pointer(_p0))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
if err != nil {
return
}
_, _, e1 := Syscall6(SYS_FCHOWNAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid), uintptr(flags), 0)
use(unsafe.Pointer(_p0))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func fcntl(fd int, cmd int, arg int) (val int, err error) {
r0, _, e1 := Syscall(SYS_FCNTL, uintptr(fd), uintptr(cmd), uintptr(arg))
val = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Fdatasync(fd int) (err error) {
_, _, e1 := Syscall(SYS_FDATASYNC, uintptr(fd), 0, 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Flock(fd int, how int) (err error) {
_, _, e1 := Syscall(SYS_FLOCK, uintptr(fd), uintptr(how), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Fsync(fd int) (err error) {
_, _, e1 := Syscall(SYS_FSYNC, uintptr(fd), 0, 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Getdents(fd int, buf []byte) (n int, err error) {
var _p0 unsafe.Pointer
if len(buf) > 0 {
_p0 = unsafe.Pointer(&buf[0])
} else {
_p0 = unsafe.Pointer(&_zero)
}
r0, _, e1 := Syscall(SYS_GETDENTS64, uintptr(fd), uintptr(_p0), uintptr(len(buf)))
n = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Getpgid(pid int) (pgid int, err error) {
r0, _, e1 := RawSyscall(SYS_GETPGID, uintptr(pid), 0, 0)
pgid = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Getpid() (pid int) {
r0, _, _ := RawSyscall(SYS_GETPID, 0, 0, 0)
pid = int(r0)
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Getppid() (ppid int) {
r0, _, _ := RawSyscall(SYS_GETPPID, 0, 0, 0)
ppid = int(r0)
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Getpriority(which int, who int) (prio int, err error) {
r0, _, e1 := Syscall(SYS_GETPRIORITY, uintptr(which), uintptr(who), 0)
prio = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Getrusage(who int, rusage *Rusage) (err error) {
_, _, e1 := RawSyscall(SYS_GETRUSAGE, uintptr(who), uintptr(unsafe.Pointer(rusage)), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Gettid() (tid int) {
r0, _, _ := RawSyscall(SYS_GETTID, 0, 0, 0)
tid = int(r0)
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Getxattr(path string, attr string, dest []byte) (sz int, err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
if err != nil {
return
}
var _p1 *byte
_p1, err = BytePtrFromString(attr)
if err != nil {
return
}
var _p2 unsafe.Pointer
if len(dest) > 0 {
_p2 = unsafe.Pointer(&dest[0])
} else {
_p2 = unsafe.Pointer(&_zero)
}
r0, _, e1 := Syscall6(SYS_GETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(dest)), 0, 0)
use(unsafe.Pointer(_p0))
use(unsafe.Pointer(_p1))
sz = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func InotifyAddWatch(fd int, pathname string, mask uint32) (watchdesc int, err error) {
var _p0 *byte
_p0, err = BytePtrFromString(pathname)
if err != nil {
return
}
r0, _, e1 := Syscall(SYS_INOTIFY_ADD_WATCH, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(mask))
use(unsafe.Pointer(_p0))
watchdesc = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func InotifyInit1(flags int) (fd int, err error) {
r0, _, e1 := RawSyscall(SYS_INOTIFY_INIT1, uintptr(flags), 0, 0)
fd = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func InotifyRmWatch(fd int, watchdesc uint32) (success int, err error) {
r0, _, e1 := RawSyscall(SYS_INOTIFY_RM_WATCH, uintptr(fd), uintptr(watchdesc), 0)
success = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Kill(pid int, sig syscall.Signal) (err error) {
_, _, e1 := RawSyscall(SYS_KILL, uintptr(pid), uintptr(sig), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Klogctl(typ int, buf []byte) (n int, err error) {
var _p0 unsafe.Pointer
if len(buf) > 0 {
_p0 = unsafe.Pointer(&buf[0])
} else {
_p0 = unsafe.Pointer(&_zero)
}
r0, _, e1 := Syscall(SYS_SYSLOG, uintptr(typ), uintptr(_p0), uintptr(len(buf)))
n = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Listxattr(path string, dest []byte) (sz int, err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
if err != nil {
return
}
var _p1 unsafe.Pointer
if len(dest) > 0 {
_p1 = unsafe.Pointer(&dest[0])
} else {
_p1 = unsafe.Pointer(&_zero)
}
r0, _, e1 := Syscall(SYS_LISTXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest)))
use(unsafe.Pointer(_p0))
sz = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Mkdirat(dirfd int, path string, mode uint32) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
if err != nil {
return
}
_, _, e1 := Syscall(SYS_MKDIRAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode))
use(unsafe.Pointer(_p0))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Mknodat(dirfd int, path string, mode uint32, dev int) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
if err != nil {
return
}
_, _, e1 := Syscall6(SYS_MKNODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev), 0, 0)
use(unsafe.Pointer(_p0))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Nanosleep(time *Timespec, leftover *Timespec) (err error) {
_, _, e1 := Syscall(SYS_NANOSLEEP, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func PivotRoot(newroot string, putold string) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(newroot)
if err != nil {
return
}
var _p1 *byte
_p1, err = BytePtrFromString(putold)
if err != nil {
return
}
_, _, e1 := Syscall(SYS_PIVOT_ROOT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0)
use(unsafe.Pointer(_p0))
use(unsafe.Pointer(_p1))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func prlimit(pid int, resource int, old *Rlimit, newlimit *Rlimit) (err error) {
_, _, e1 := RawSyscall6(SYS_PRLIMIT64, uintptr(pid), uintptr(resource), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(newlimit)), 0, 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Prctl(option int, arg2 uintptr, arg3 uintptr, arg4 uintptr, arg5 uintptr) (err error) {
_, _, e1 := Syscall6(SYS_PRCTL, uintptr(option), uintptr(arg2), uintptr(arg3), uintptr(arg4), uintptr(arg5), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func read(fd int, p []byte) (n int, err error) {
var _p0 unsafe.Pointer
if len(p) > 0 {
_p0 = unsafe.Pointer(&p[0])
} else {
_p0 = unsafe.Pointer(&_zero)
}
r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(_p0), uintptr(len(p)))
n = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Removexattr(path string, attr string) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
if err != nil {
return
}
var _p1 *byte
_p1, err = BytePtrFromString(attr)
if err != nil {
return
}
_, _, e1 := Syscall(SYS_REMOVEXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0)
use(unsafe.Pointer(_p0))
use(unsafe.Pointer(_p1))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Renameat(olddirfd int, oldpath string, newdirfd int, newpath string) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(oldpath)
if err != nil {
return
}
var _p1 *byte
_p1, err = BytePtrFromString(newpath)
if err != nil {
return
}
_, _, e1 := Syscall6(SYS_RENAMEAT, uintptr(olddirfd), uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1)), 0, 0)
use(unsafe.Pointer(_p0))
use(unsafe.Pointer(_p1))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Setdomainname(p []byte) (err error) {
var _p0 unsafe.Pointer
if len(p) > 0 {
_p0 = unsafe.Pointer(&p[0])
} else {
_p0 = unsafe.Pointer(&_zero)
}
_, _, e1 := Syscall(SYS_SETDOMAINNAME, uintptr(_p0), uintptr(len(p)), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Sethostname(p []byte) (err error) {
var _p0 unsafe.Pointer
if len(p) > 0 {
_p0 = unsafe.Pointer(&p[0])
} else {
_p0 = unsafe.Pointer(&_zero)
}
_, _, e1 := Syscall(SYS_SETHOSTNAME, uintptr(_p0), uintptr(len(p)), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Setpgid(pid int, pgid int) (err error) {
_, _, e1 := RawSyscall(SYS_SETPGID, uintptr(pid), uintptr(pgid), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Setsid() (pid int, err error) {
r0, _, e1 := RawSyscall(SYS_SETSID, 0, 0, 0)
pid = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Settimeofday(tv *Timeval) (err error) {
_, _, e1 := RawSyscall(SYS_SETTIMEOFDAY, uintptr(unsafe.Pointer(tv)), 0, 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Setns(fd int, nstype int) (err error) {
_, _, e1 := Syscall(SYS_SETNS, uintptr(fd), uintptr(nstype), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Setpriority(which int, who int, prio int) (err error) {
_, _, e1 := Syscall(SYS_SETPRIORITY, uintptr(which), uintptr(who), uintptr(prio))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Setxattr(path string, attr string, data []byte, flags int) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
if err != nil {
return
}
var _p1 *byte
_p1, err = BytePtrFromString(attr)
if err != nil {
return
}
var _p2 unsafe.Pointer
if len(data) > 0 {
_p2 = unsafe.Pointer(&data[0])
} else {
_p2 = unsafe.Pointer(&_zero)
}
_, _, e1 := Syscall6(SYS_SETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(data)), uintptr(flags), 0)
use(unsafe.Pointer(_p0))
use(unsafe.Pointer(_p1))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Sync() {
Syscall(SYS_SYNC, 0, 0, 0)
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Sysinfo(info *Sysinfo_t) (err error) {
_, _, e1 := RawSyscall(SYS_SYSINFO, uintptr(unsafe.Pointer(info)), 0, 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Tee(rfd int, wfd int, len int, flags int) (n int64, err error) {
r0, _, e1 := Syscall6(SYS_TEE, uintptr(rfd), uintptr(wfd), uintptr(len), uintptr(flags), 0, 0)
n = int64(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Tgkill(tgid int, tid int, sig syscall.Signal) (err error) {
_, _, e1 := RawSyscall(SYS_TGKILL, uintptr(tgid), uintptr(tid), uintptr(sig))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Times(tms *Tms) (ticks uintptr, err error) {
r0, _, e1 := RawSyscall(SYS_TIMES, uintptr(unsafe.Pointer(tms)), 0, 0)
ticks = uintptr(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Umask(mask int) (oldmask int) {
r0, _, _ := RawSyscall(SYS_UMASK, uintptr(mask), 0, 0)
oldmask = int(r0)
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Uname(buf *Utsname) (err error) {
_, _, e1 := RawSyscall(SYS_UNAME, uintptr(unsafe.Pointer(buf)), 0, 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Unmount(target string, flags int) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(target)
if err != nil {
return
}
_, _, e1 := Syscall(SYS_UMOUNT2, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0)
use(unsafe.Pointer(_p0))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Unshare(flags int) (err error) {
_, _, e1 := Syscall(SYS_UNSHARE, uintptr(flags), 0, 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Ustat(dev int, ubuf *Ustat_t) (err error) {
_, _, e1 := Syscall(SYS_USTAT, uintptr(dev), uintptr(unsafe.Pointer(ubuf)), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func write(fd int, p []byte) (n int, err error) {
var _p0 unsafe.Pointer
if len(p) > 0 {
_p0 = unsafe.Pointer(&p[0])
} else {
_p0 = unsafe.Pointer(&_zero)
}
r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(_p0), uintptr(len(p)))
n = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func exitThread(code int) (err error) {
_, _, e1 := Syscall(SYS_EXIT, uintptr(code), 0, 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func readlen(fd int, p *byte, np int) (n int, err error) {
r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(unsafe.Pointer(p)), uintptr(np))
n = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func writelen(fd int, p *byte, np int) (n int, err error) {
r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(unsafe.Pointer(p)), uintptr(np))
n = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func munmap(addr uintptr, length uintptr) (err error) {
_, _, e1 := Syscall(SYS_MUNMAP, uintptr(addr), uintptr(length), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Madvise(b []byte, advice int) (err error) {
var _p0 unsafe.Pointer
if len(b) > 0 {
_p0 = unsafe.Pointer(&b[0])
} else {
_p0 = unsafe.Pointer(&_zero)
}
_, _, e1 := Syscall(SYS_MADVISE, uintptr(_p0), uintptr(len(b)), uintptr(advice))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Mprotect(b []byte, prot int) (err error) {
var _p0 unsafe.Pointer
if len(b) > 0 {
_p0 = unsafe.Pointer(&b[0])
} else {
_p0 = unsafe.Pointer(&_zero)
}
_, _, e1 := Syscall(SYS_MPROTECT, uintptr(_p0), uintptr(len(b)), uintptr(prot))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Mlock(b []byte) (err error) {
var _p0 unsafe.Pointer
if len(b) > 0 {
_p0 = unsafe.Pointer(&b[0])
} else {
_p0 = unsafe.Pointer(&_zero)
}
_, _, e1 := Syscall(SYS_MLOCK, uintptr(_p0), uintptr(len(b)), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Munlock(b []byte) (err error) {
var _p0 unsafe.Pointer
if len(b) > 0 {
_p0 = unsafe.Pointer(&b[0])
} else {
_p0 = unsafe.Pointer(&_zero)
}
_, _, e1 := Syscall(SYS_MUNLOCK, uintptr(_p0), uintptr(len(b)), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Mlockall(flags int) (err error) {
_, _, e1 := Syscall(SYS_MLOCKALL, uintptr(flags), 0, 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Munlockall() (err error) {
_, _, e1 := Syscall(SYS_MUNLOCKALL, 0, 0, 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Dup2(oldfd int, newfd int) (err error) {
_, _, e1 := Syscall(SYS_DUP2, uintptr(oldfd), uintptr(newfd), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) {
var _p0 unsafe.Pointer
if len(events) > 0 {
_p0 = unsafe.Pointer(&events[0])
} else {
_p0 = unsafe.Pointer(&_zero)
}
r0, _, e1 := Syscall6(SYS_EPOLL_WAIT, uintptr(epfd), uintptr(_p0), uintptr(len(events)), uintptr(msec), 0, 0)
n = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Fadvise(fd int, offset int64, length int64, advice int) (err error) {
_, _, e1 := Syscall6(SYS_FADVISE64, uintptr(fd), uintptr(offset), uintptr(length), uintptr(advice), 0, 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Fchown(fd int, uid int, gid int) (err error) {
_, _, e1 := Syscall(SYS_FCHOWN, uintptr(fd), uintptr(uid), uintptr(gid))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Fstat(fd int, stat *Stat_t) (err error) {
_, _, e1 := Syscall(SYS_FSTAT, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Fstatfs(fd int, buf *Statfs_t) (err error) {
_, _, e1 := Syscall(SYS_FSTATFS, uintptr(fd), uintptr(unsafe.Pointer(buf)), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Ftruncate(fd int, length int64) (err error) {
_, _, e1 := Syscall(SYS_FTRUNCATE, uintptr(fd), uintptr(length), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Getegid() (egid int) {
r0, _, _ := RawSyscall(SYS_GETEGID, 0, 0, 0)
egid = int(r0)
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Geteuid() (euid int) {
r0, _, _ := RawSyscall(SYS_GETEUID, 0, 0, 0)
euid = int(r0)
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Getgid() (gid int) {
r0, _, _ := RawSyscall(SYS_GETGID, 0, 0, 0)
gid = int(r0)
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Getrlimit(resource int, rlim *Rlimit) (err error) {
_, _, e1 := RawSyscall(SYS_GETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Getuid() (uid int) {
r0, _, _ := RawSyscall(SYS_GETUID, 0, 0, 0)
uid = int(r0)
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func InotifyInit() (fd int, err error) {
r0, _, e1 := RawSyscall(SYS_INOTIFY_INIT, 0, 0, 0)
fd = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Ioperm(from int, num int, on int) (err error) {
_, _, e1 := Syscall(SYS_IOPERM, uintptr(from), uintptr(num), uintptr(on))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Iopl(level int) (err error) {
_, _, e1 := Syscall(SYS_IOPL, uintptr(level), 0, 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Lchown(path string, uid int, gid int) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
if err != nil {
return
}
_, _, e1 := Syscall(SYS_LCHOWN, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid))
use(unsafe.Pointer(_p0))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Listen(s int, n int) (err error) {
_, _, e1 := Syscall(SYS_LISTEN, uintptr(s), uintptr(n), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Lstat(path string, stat *Stat_t) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
if err != nil {
return
}
_, _, e1 := Syscall(SYS_LSTAT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0)
use(unsafe.Pointer(_p0))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Pause() (err error) {
_, _, e1 := Syscall(SYS_PAUSE, 0, 0, 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Pread(fd int, p []byte, offset int64) (n int, err error) {
var _p0 unsafe.Pointer
if len(p) > 0 {
_p0 = unsafe.Pointer(&p[0])
} else {
_p0 = unsafe.Pointer(&_zero)
}
r0, _, e1 := Syscall6(SYS_PREAD64, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), 0, 0)
n = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Pwrite(fd int, p []byte, offset int64) (n int, err error) {
var _p0 unsafe.Pointer
if len(p) > 0 {
_p0 = unsafe.Pointer(&p[0])
} else {
_p0 = unsafe.Pointer(&_zero)
}
r0, _, e1 := Syscall6(SYS_PWRITE64, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), 0, 0)
n = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Seek(fd int, offset int64, whence int) (off int64, err error) {
r0, _, e1 := Syscall(SYS_LSEEK, uintptr(fd), uintptr(offset), uintptr(whence))
off = int64(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) {
r0, _, e1 := Syscall6(SYS_SELECT, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0)
n = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) {
r0, _, e1 := Syscall6(SYS_SENDFILE, uintptr(outfd), uintptr(infd), uintptr(unsafe.Pointer(offset)), uintptr(count), 0, 0)
written = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Setfsgid(gid int) (err error) {
_, _, e1 := Syscall(SYS_SETFSGID, uintptr(gid), 0, 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Setfsuid(uid int) (err error) {
_, _, e1 := Syscall(SYS_SETFSUID, uintptr(uid), 0, 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Setregid(rgid int, egid int) (err error) {
_, _, e1 := RawSyscall(SYS_SETREGID, uintptr(rgid), uintptr(egid), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Setresgid(rgid int, egid int, sgid int) (err error) {
_, _, e1 := RawSyscall(SYS_SETRESGID, uintptr(rgid), uintptr(egid), uintptr(sgid))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Setresuid(ruid int, euid int, suid int) (err error) {
_, _, e1 := RawSyscall(SYS_SETRESUID, uintptr(ruid), uintptr(euid), uintptr(suid))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Setrlimit(resource int, rlim *Rlimit) (err error) {
_, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Setreuid(ruid int, euid int) (err error) {
_, _, e1 := RawSyscall(SYS_SETREUID, uintptr(ruid), uintptr(euid), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Shutdown(fd int, how int) (err error) {
_, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(fd), uintptr(how), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int64, err error) {
r0, _, e1 := Syscall6(SYS_SPLICE, uintptr(rfd), uintptr(unsafe.Pointer(roff)), uintptr(wfd), uintptr(unsafe.Pointer(woff)), uintptr(len), uintptr(flags))
n = int64(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Stat(path string, stat *Stat_t) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
if err != nil {
return
}
_, _, e1 := Syscall(SYS_STAT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0)
use(unsafe.Pointer(_p0))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Statfs(path string, buf *Statfs_t) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
if err != nil {
return
}
_, _, e1 := Syscall(SYS_STATFS, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(buf)), 0)
use(unsafe.Pointer(_p0))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func SyncFileRange(fd int, off int64, n int64, flags int) (err error) {
_, _, e1 := Syscall6(SYS_SYNC_FILE_RANGE, uintptr(fd), uintptr(off), uintptr(n), uintptr(flags), 0, 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Truncate(path string, length int64) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
if err != nil {
return
}
_, _, e1 := Syscall(SYS_TRUNCATE, uintptr(unsafe.Pointer(_p0)), uintptr(length), 0)
use(unsafe.Pointer(_p0))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) {
r0, _, e1 := Syscall(SYS_ACCEPT, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)))
fd = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func accept4(s int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (fd int, err error) {
r0, _, e1 := Syscall6(SYS_ACCEPT4, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)), uintptr(flags), 0, 0)
fd = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) {
_, _, e1 := Syscall(SYS_BIND, uintptr(s), uintptr(addr), uintptr(addrlen))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) {
_, _, e1 := Syscall(SYS_CONNECT, uintptr(s), uintptr(addr), uintptr(addrlen))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func getgroups(n int, list *_Gid_t) (nn int, err error) {
r0, _, e1 := RawSyscall(SYS_GETGROUPS, uintptr(n), uintptr(unsafe.Pointer(list)), 0)
nn = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func setgroups(n int, list *_Gid_t) (err error) {
_, _, e1 := RawSyscall(SYS_SETGROUPS, uintptr(n), uintptr(unsafe.Pointer(list)), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen) (err error) {
_, _, e1 := Syscall6(SYS_GETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(unsafe.Pointer(vallen)), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) (err error) {
_, _, e1 := Syscall6(SYS_SETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(vallen), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func socket(domain int, typ int, proto int) (fd int, err error) {
r0, _, e1 := RawSyscall(SYS_SOCKET, uintptr(domain), uintptr(typ), uintptr(proto))
fd = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func socketpair(domain int, typ int, proto int, fd *[2]int32) (err error) {
_, _, e1 := RawSyscall6(SYS_SOCKETPAIR, uintptr(domain), uintptr(typ), uintptr(proto), uintptr(unsafe.Pointer(fd)), 0, 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) {
_, _, e1 := RawSyscall(SYS_GETPEERNAME, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func getsockname(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) {
_, _, e1 := RawSyscall(SYS_GETSOCKNAME, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error) {
var _p0 unsafe.Pointer
if len(p) > 0 {
_p0 = unsafe.Pointer(&p[0])
} else {
_p0 = unsafe.Pointer(&_zero)
}
r0, _, e1 := Syscall6(SYS_RECVFROM, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(flags), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen)))
n = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) (err error) {
var _p0 unsafe.Pointer
if len(buf) > 0 {
_p0 = unsafe.Pointer(&buf[0])
} else {
_p0 = unsafe.Pointer(&_zero)
}
_, _, e1 := Syscall6(SYS_SENDTO, uintptr(s), uintptr(_p0), uintptr(len(buf)), uintptr(flags), uintptr(to), uintptr(addrlen))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func recvmsg(s int, msg *Msghdr, flags int) (n int, err error) {
r0, _, e1 := Syscall(SYS_RECVMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags))
n = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func sendmsg(s int, msg *Msghdr, flags int) (n int, err error) {
r0, _, e1 := Syscall(SYS_SENDMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags))
n = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func mmap(addr uintptr, length uintptr, prot int, flags int, fd int, offset int64) (xaddr uintptr, err error) {
r0, _, e1 := Syscall6(SYS_MMAP, uintptr(addr), uintptr(length), uintptr(prot), uintptr(flags), uintptr(fd), uintptr(offset))
xaddr = uintptr(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Utime(path string, buf *Utimbuf) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
if err != nil {
return
}
_, _, e1 := Syscall(SYS_UTIME, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(buf)), 0)
use(unsafe.Pointer(_p0))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func pipe(p *[2]_C_int) (err error) {
_, _, e1 := RawSyscall(SYS_PIPE, uintptr(unsafe.Pointer(p)), 0, 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func pipe2(p *[2]_C_int, flags int) (err error) {
_, _, e1 := RawSyscall(SYS_PIPE2, uintptr(unsafe.Pointer(p)), uintptr(flags), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func poll(fds *PollFd, nfds int, timeout int) (n int, err error) {
r0, _, e1 := Syscall(SYS_POLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout))
n = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
| _vendor/src/golang.org/x/sys/unix/zsyscall_linux_amd64.go | 0 | https://github.com/pingcap/tidb/commit/b35e024ace8a525b61fd53eae22e9723d06454ca | [
0.0010806589853018522,
0.00018392135098110884,
0.00016136198246385902,
0.0001670334895607084,
0.00008643019828014076
] |
{
"id": 3,
"code_window": [
"\tvars := e.ctx.GetSessionVars()\n",
"\tid, ok := vars.PreparedStmtNameToID[e.Name]\n",
"\tif !ok {\n",
"\t\treturn nil, errors.Trace(plan.ErrStmtNotFound)\n",
"\t}\n",
"\tdelete(vars.PreparedStmtNameToID, e.Name)\n",
"\tdelete(vars.PreparedStmts, id)\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\treturn errors.Trace(plan.ErrStmtNotFound)\n"
],
"file_path": "executor/prepared.go",
"type": "replace",
"edit_start_line_idx": 243
} | // Copyright 2016 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package executor_test
import (
"fmt"
"strings"
. "github.com/pingcap/check"
"github.com/pingcap/tidb/mysql"
"github.com/pingcap/tidb/util/testkit"
)
func (s *testSuite) TestGrantGlobal(c *C) {
tk := testkit.NewTestKit(c, s.store)
// Create a new user.
createUserSQL := `CREATE USER 'testGlobal'@'localhost' IDENTIFIED BY '123';`
tk.MustExec(createUserSQL)
// Make sure all the global privs for new user is "N".
for _, v := range mysql.AllDBPrivs {
sql := fmt.Sprintf("SELECT %s FROM mysql.User WHERE User=\"testGlobal\" and host=\"localhost\";", mysql.Priv2UserCol[v])
r := tk.MustQuery(sql)
r.Check(testkit.Rows("N"))
}
// Grant each priv to the user.
for _, v := range mysql.AllGlobalPrivs {
sql := fmt.Sprintf("GRANT %s ON *.* TO 'testGlobal'@'localhost';", mysql.Priv2Str[v])
tk.MustExec(sql)
sql = fmt.Sprintf("SELECT %s FROM mysql.User WHERE User=\"testGlobal\" and host=\"localhost\"", mysql.Priv2UserCol[v])
tk.MustQuery(sql).Check(testkit.Rows("Y"))
}
// Create a new user.
createUserSQL = `CREATE USER 'testGlobal1'@'localhost' IDENTIFIED BY '123';`
tk.MustExec(createUserSQL)
tk.MustExec("GRANT ALL ON *.* TO 'testGlobal1'@'localhost';")
// Make sure all the global privs for granted user is "Y".
for _, v := range mysql.AllGlobalPrivs {
sql := fmt.Sprintf("SELECT %s FROM mysql.User WHERE User=\"testGlobal1\" and host=\"localhost\"", mysql.Priv2UserCol[v])
tk.MustQuery(sql).Check(testkit.Rows("Y"))
}
}
func (s *testSuite) TestGrantDBScope(c *C) {
tk := testkit.NewTestKit(c, s.store)
// Create a new user.
createUserSQL := `CREATE USER 'testDB'@'localhost' IDENTIFIED BY '123';`
tk.MustExec(createUserSQL)
// Make sure all the db privs for new user is empty.
sql := fmt.Sprintf("SELECT * FROM mysql.db WHERE User=\"testDB\" and host=\"localhost\"")
tk.MustQuery(sql).Check(testkit.Rows())
// Grant each priv to the user.
for _, v := range mysql.AllDBPrivs {
sql := fmt.Sprintf("GRANT %s ON test.* TO 'testDB'@'localhost';", mysql.Priv2Str[v])
tk.MustExec(sql)
sql = fmt.Sprintf("SELECT %s FROM mysql.DB WHERE User=\"testDB\" and host=\"localhost\" and db=\"test\"", mysql.Priv2UserCol[v])
tk.MustQuery(sql).Check(testkit.Rows("Y"))
}
// Create a new user.
createUserSQL = `CREATE USER 'testDB1'@'localhost' IDENTIFIED BY '123';`
tk.MustExec(createUserSQL)
tk.MustExec("USE test;")
tk.MustExec("GRANT ALL ON * TO 'testDB1'@'localhost';")
// Make sure all the db privs for granted user is "Y".
for _, v := range mysql.AllDBPrivs {
sql := fmt.Sprintf("SELECT %s FROM mysql.DB WHERE User=\"testDB1\" and host=\"localhost\" and db=\"test\";", mysql.Priv2UserCol[v])
tk.MustQuery(sql).Check(testkit.Rows("Y"))
}
}
func (s *testSuite) TestWithGrantOption(c *C) {
tk := testkit.NewTestKit(c, s.store)
// Create a new user.
createUserSQL := `CREATE USER 'testWithGrant'@'localhost' IDENTIFIED BY '123';`
tk.MustExec(createUserSQL)
// Make sure all the db privs for new user is empty.
sql := fmt.Sprintf("SELECT * FROM mysql.db WHERE User=\"testWithGrant\" and host=\"localhost\"")
tk.MustQuery(sql).Check(testkit.Rows())
// Grant select priv to the user, with grant option.
tk.MustExec("GRANT select ON test.* TO 'testWithGrant'@'localhost' WITH GRANT OPTION;")
tk.MustQuery("SELECT grant_priv FROM mysql.DB WHERE User=\"testWithGrant\" and host=\"localhost\" and db=\"test\"").Check(testkit.Rows("Y"))
}
func (s *testSuite) TestTableScope(c *C) {
tk := testkit.NewTestKit(c, s.store)
// Create a new user.
createUserSQL := `CREATE USER 'testTbl'@'localhost' IDENTIFIED BY '123';`
tk.MustExec(createUserSQL)
tk.MustExec(`CREATE TABLE test.test1(c1 int);`)
// Make sure all the table privs for new user is empty.
tk.MustQuery(`SELECT * FROM mysql.Tables_priv WHERE User="testTbl" and host="localhost" and db="test" and Table_name="test1"`).Check(testkit.Rows())
// Grant each priv to the user.
for _, v := range mysql.AllTablePrivs {
sql := fmt.Sprintf("GRANT %s ON test.test1 TO 'testTbl'@'localhost';", mysql.Priv2Str[v])
tk.MustExec(sql)
rows := tk.MustQuery(`SELECT Table_priv FROM mysql.Tables_priv WHERE User="testTbl" and host="localhost" and db="test" and Table_name="test1";`).Rows()
c.Assert(rows, HasLen, 1)
row := rows[0]
c.Assert(row, HasLen, 1)
p := fmt.Sprintf("%v", row[0])
c.Assert(strings.Index(p, mysql.Priv2SetStr[v]), Greater, -1)
}
// Create a new user.
createUserSQL = `CREATE USER 'testTbl1'@'localhost' IDENTIFIED BY '123';`
tk.MustExec(createUserSQL)
tk.MustExec("USE test;")
tk.MustExec(`CREATE TABLE test2(c1 int);`)
// Grant all table scope privs.
tk.MustExec("GRANT ALL ON test2 TO 'testTbl1'@'localhost';")
// Make sure all the table privs for granted user are in the Table_priv set.
for _, v := range mysql.AllTablePrivs {
rows := tk.MustQuery(`SELECT Table_priv FROM mysql.Tables_priv WHERE User="testTbl1" and host="localhost" and db="test" and Table_name="test2";`).Rows()
c.Assert(rows, HasLen, 1)
row := rows[0]
c.Assert(row, HasLen, 1)
p := fmt.Sprintf("%v", row[0])
c.Assert(strings.Index(p, mysql.Priv2SetStr[v]), Greater, -1)
}
}
func (s *testSuite) TestColumnScope(c *C) {
tk := testkit.NewTestKit(c, s.store)
// Create a new user.
createUserSQL := `CREATE USER 'testCol'@'localhost' IDENTIFIED BY '123';`
tk.MustExec(createUserSQL)
tk.MustExec(`CREATE TABLE test.test3(c1 int, c2 int);`)
// Make sure all the column privs for new user is empty.
tk.MustQuery(`SELECT * FROM mysql.Columns_priv WHERE User="testCol" and host="localhost" and db="test" and Table_name="test3" and Column_name="c1"`).Check(testkit.Rows())
tk.MustQuery(`SELECT * FROM mysql.Columns_priv WHERE User="testCol" and host="localhost" and db="test" and Table_name="test3" and Column_name="c2"`).Check(testkit.Rows())
// Grant each priv to the user.
for _, v := range mysql.AllColumnPrivs {
sql := fmt.Sprintf("GRANT %s(c1) ON test.test3 TO 'testCol'@'localhost';", mysql.Priv2Str[v])
tk.MustExec(sql)
rows := tk.MustQuery(`SELECT Column_priv FROM mysql.Columns_priv WHERE User="testCol" and host="localhost" and db="test" and Table_name="test3" and Column_name="c1";`).Rows()
c.Assert(rows, HasLen, 1)
row := rows[0]
c.Assert(row, HasLen, 1)
p := fmt.Sprintf("%v", row[0])
c.Assert(strings.Index(p, mysql.Priv2SetStr[v]), Greater, -1)
}
// Create a new user.
createUserSQL = `CREATE USER 'testCol1'@'localhost' IDENTIFIED BY '123';`
tk.MustExec(createUserSQL)
tk.MustExec("USE test;")
// Grant all column scope privs.
tk.MustExec("GRANT ALL(c2) ON test3 TO 'testCol1'@'localhost';")
// Make sure all the column privs for granted user are in the Column_priv set.
for _, v := range mysql.AllColumnPrivs {
rows := tk.MustQuery(`SELECT Column_priv FROM mysql.Columns_priv WHERE User="testCol1" and host="localhost" and db="test" and Table_name="test3" and Column_name="c2";`).Rows()
c.Assert(rows, HasLen, 1)
row := rows[0]
c.Assert(row, HasLen, 1)
p := fmt.Sprintf("%v", row[0])
c.Assert(strings.Index(p, mysql.Priv2SetStr[v]), Greater, -1)
}
}
func (s *testSuite) TestIssue2456(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("CREATE USER 'dduser'@'%' IDENTIFIED by '123456';")
tk.MustExec("GRANT ALL PRIVILEGES ON `dddb_%`.* TO 'dduser'@'%';")
tk.MustExec("GRANT ALL PRIVILEGES ON `dddb_%`.`te%` to 'dduser'@'%';")
}
func (s *testSuite) TestCreateUserWhenGrant(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("DROP USER IF EXISTS 'test'@'%'")
tk.MustExec("GRANT ALL PRIVILEGES ON *.* to 'test'@'%' IDENTIFIED BY 'xxx'")
// Make sure user is created automatically when grant to a non-exists one.
tk.MustQuery("SELECT user FROM mysql.user WHERE user='test' and host='%'").Check(
testkit.Rows("test"),
)
}
func (s *testSuite) TestIssue2654(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("DROP USER IF EXISTS 'test'@'%'")
tk.MustExec("CREATE USER 'test'@'%' IDENTIFIED BY 'test'")
tk.MustExec("GRANT SELECT ON test.* to 'test'")
rows := tk.MustQuery("SELECT user,host FROM mysql.user WHERE user='test' and host='%'")
rows.Check(testkit.Rows("test %"))
}
| executor/grant_test.go | 0 | https://github.com/pingcap/tidb/commit/b35e024ace8a525b61fd53eae22e9723d06454ca | [
0.00017683228361420333,
0.00017182083684019744,
0.000164195938850753,
0.00017176795518025756,
0.000003114236506007728
] |
{
"id": 3,
"code_window": [
"\tvars := e.ctx.GetSessionVars()\n",
"\tid, ok := vars.PreparedStmtNameToID[e.Name]\n",
"\tif !ok {\n",
"\t\treturn nil, errors.Trace(plan.ErrStmtNotFound)\n",
"\t}\n",
"\tdelete(vars.PreparedStmtNameToID, e.Name)\n",
"\tdelete(vars.PreparedStmts, id)\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\treturn errors.Trace(plan.ErrStmtNotFound)\n"
],
"file_path": "executor/prepared.go",
"type": "replace",
"edit_start_line_idx": 243
} | // Copyright 2016 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package expression
import (
"bytes"
"fmt"
"github.com/juju/errors"
"github.com/pingcap/tidb/ast"
"github.com/pingcap/tidb/context"
"github.com/pingcap/tidb/model"
"github.com/pingcap/tidb/mysql"
"github.com/pingcap/tidb/sessionctx/stmtctx"
"github.com/pingcap/tidb/terror"
"github.com/pingcap/tidb/types"
"github.com/pingcap/tidb/types/json"
"github.com/pingcap/tidb/util/codec"
)
// ScalarFunction is the function that returns a value.
type ScalarFunction struct {
FuncName model.CIStr
// RetType is the type that ScalarFunction returns.
// TODO: Implement type inference here, now we use ast's return type temporarily.
RetType *types.FieldType
Function builtinFunc
}
// GetArgs gets arguments of function.
func (sf *ScalarFunction) GetArgs() []Expression {
return sf.Function.getArgs()
}
// GetCtx gets the context of function.
func (sf *ScalarFunction) GetCtx() context.Context {
return sf.Function.getCtx()
}
// String implements fmt.Stringer interface.
func (sf *ScalarFunction) String() string {
result := sf.FuncName.L + "("
for i, arg := range sf.GetArgs() {
result += arg.String()
if i+1 != len(sf.GetArgs()) {
result += ", "
}
}
result += ")"
return result
}
// MarshalJSON implements json.Marshaler interface.
func (sf *ScalarFunction) MarshalJSON() ([]byte, error) {
buffer := bytes.NewBufferString(fmt.Sprintf("\"%s\"", sf))
return buffer.Bytes(), nil
}
// NewFunction creates a new scalar function or constant.
func NewFunction(ctx context.Context, funcName string, retType *types.FieldType, args ...Expression) (Expression, error) {
if retType == nil {
return nil, errors.Errorf("RetType cannot be nil for ScalarFunction.")
}
if funcName == ast.Cast {
return BuildCastFunction(ctx, args[0], retType), nil
}
fc, ok := funcs[funcName]
if !ok {
return nil, errFunctionNotExists.GenByArgs("FUNCTION", funcName)
}
funcArgs := make([]Expression, len(args))
copy(funcArgs, args)
f, err := fc.getFunction(ctx, funcArgs)
if err != nil {
return nil, errors.Trace(err)
}
if builtinRetTp := f.getRetTp(); builtinRetTp.Tp != mysql.TypeUnspecified || retType.Tp == mysql.TypeUnspecified {
retType = builtinRetTp
}
sf := &ScalarFunction{
FuncName: model.NewCIStr(funcName),
RetType: retType,
Function: f,
}
return FoldConstant(sf), nil
}
// NewFunctionInternal is similar to NewFunction, but do not returns error, should only be used internally.
func NewFunctionInternal(ctx context.Context, funcName string, retType *types.FieldType, args ...Expression) Expression {
expr, err := NewFunction(ctx, funcName, retType, args...)
terror.Log(errors.Trace(err))
return expr
}
// ScalarFuncs2Exprs converts []*ScalarFunction to []Expression.
func ScalarFuncs2Exprs(funcs []*ScalarFunction) []Expression {
result := make([]Expression, 0, len(funcs))
for _, col := range funcs {
result = append(result, col)
}
return result
}
// Clone implements Expression interface.
func (sf *ScalarFunction) Clone() Expression {
newArgs := make([]Expression, 0, len(sf.GetArgs()))
for _, arg := range sf.GetArgs() {
newArgs = append(newArgs, arg.Clone())
}
switch sf.FuncName.L {
case ast.Cast:
return BuildCastFunction(sf.GetCtx(), sf.GetArgs()[0], sf.GetType())
case ast.Values:
var offset int
switch sf.GetType().EvalType() {
case types.ETInt:
offset = sf.Function.(*builtinValuesIntSig).offset
case types.ETReal:
offset = sf.Function.(*builtinValuesRealSig).offset
case types.ETDecimal:
offset = sf.Function.(*builtinValuesDecimalSig).offset
case types.ETString:
offset = sf.Function.(*builtinValuesStringSig).offset
case types.ETDatetime, types.ETTimestamp:
offset = sf.Function.(*builtinValuesTimeSig).offset
case types.ETDuration:
offset = sf.Function.(*builtinValuesDurationSig).offset
case types.ETJson:
offset = sf.Function.(*builtinValuesJSONSig).offset
}
return NewValuesFunc(offset, sf.GetType(), sf.GetCtx())
}
newFunc := NewFunctionInternal(sf.GetCtx(), sf.FuncName.L, sf.RetType, newArgs...)
return newFunc
}
// GetType implements Expression interface.
func (sf *ScalarFunction) GetType() *types.FieldType {
return sf.RetType
}
// Equal implements Expression interface.
func (sf *ScalarFunction) Equal(e Expression, ctx context.Context) bool {
fun, ok := e.(*ScalarFunction)
if !ok {
return false
}
if sf.FuncName.L != fun.FuncName.L {
return false
}
return sf.Function.equal(fun.Function)
}
// IsCorrelated implements Expression interface.
func (sf *ScalarFunction) IsCorrelated() bool {
for _, arg := range sf.GetArgs() {
if arg.IsCorrelated() {
return true
}
}
return false
}
// Decorrelate implements Expression interface.
func (sf *ScalarFunction) Decorrelate(schema *Schema) Expression {
for i, arg := range sf.GetArgs() {
sf.GetArgs()[i] = arg.Decorrelate(schema)
}
return sf
}
// Eval implements Expression interface.
func (sf *ScalarFunction) Eval(row types.Row) (d types.Datum, err error) {
sc := sf.GetCtx().GetSessionVars().StmtCtx
var (
res interface{}
isNull bool
)
switch tp, evalType := sf.GetType(), sf.GetType().EvalType(); evalType {
case types.ETInt:
var intRes int64
intRes, isNull, err = sf.EvalInt(row, sc)
if mysql.HasUnsignedFlag(tp.Flag) {
res = uint64(intRes)
} else {
res = intRes
}
case types.ETReal:
res, isNull, err = sf.EvalReal(row, sc)
case types.ETDecimal:
res, isNull, err = sf.EvalDecimal(row, sc)
case types.ETDatetime, types.ETTimestamp:
res, isNull, err = sf.EvalTime(row, sc)
case types.ETDuration:
res, isNull, err = sf.EvalDuration(row, sc)
case types.ETJson:
res, isNull, err = sf.EvalJSON(row, sc)
case types.ETString:
res, isNull, err = sf.EvalString(row, sc)
}
if isNull || err != nil {
d.SetValue(nil)
return d, errors.Trace(err)
}
d.SetValue(res)
return
}
// EvalInt implements Expression interface.
func (sf *ScalarFunction) EvalInt(row types.Row, sc *stmtctx.StatementContext) (int64, bool, error) {
return sf.Function.evalInt(row)
}
// EvalReal implements Expression interface.
func (sf *ScalarFunction) EvalReal(row types.Row, sc *stmtctx.StatementContext) (float64, bool, error) {
return sf.Function.evalReal(row)
}
// EvalDecimal implements Expression interface.
func (sf *ScalarFunction) EvalDecimal(row types.Row, sc *stmtctx.StatementContext) (*types.MyDecimal, bool, error) {
return sf.Function.evalDecimal(row)
}
// EvalString implements Expression interface.
func (sf *ScalarFunction) EvalString(row types.Row, sc *stmtctx.StatementContext) (string, bool, error) {
return sf.Function.evalString(row)
}
// EvalTime implements Expression interface.
func (sf *ScalarFunction) EvalTime(row types.Row, sc *stmtctx.StatementContext) (types.Time, bool, error) {
return sf.Function.evalTime(row)
}
// EvalDuration implements Expression interface.
func (sf *ScalarFunction) EvalDuration(row types.Row, sc *stmtctx.StatementContext) (types.Duration, bool, error) {
return sf.Function.evalDuration(row)
}
// EvalJSON implements Expression interface.
func (sf *ScalarFunction) EvalJSON(row types.Row, sc *stmtctx.StatementContext) (json.JSON, bool, error) {
return sf.Function.evalJSON(row)
}
// HashCode implements Expression interface.
func (sf *ScalarFunction) HashCode() []byte {
v := make([]types.Datum, 0, len(sf.GetArgs())+1)
bytes, err := codec.EncodeValue(nil, types.NewStringDatum(sf.FuncName.L))
terror.Log(errors.Trace(err))
v = append(v, types.NewBytesDatum(bytes))
for _, arg := range sf.GetArgs() {
v = append(v, types.NewBytesDatum(arg.HashCode()))
}
bytes = bytes[:0]
bytes, err = codec.EncodeValue(bytes, v...)
terror.Log(errors.Trace(err))
return bytes
}
// ResolveIndices implements Expression interface.
func (sf *ScalarFunction) ResolveIndices(schema *Schema) {
for _, arg := range sf.GetArgs() {
arg.ResolveIndices(schema)
}
}
| expression/scalar_function.go | 0 | https://github.com/pingcap/tidb/commit/b35e024ace8a525b61fd53eae22e9723d06454ca | [
0.0011835785117000341,
0.00028105260571464896,
0.00016145063273143023,
0.00017179205315187573,
0.0002850722230505198
] |
{
"id": 4,
"code_window": [
"\t}\n",
"\tdelete(vars.PreparedStmtNameToID, e.Name)\n",
"\tdelete(vars.PreparedStmts, id)\n",
"\treturn nil, nil\n",
"}\n",
"\n",
"// Close implements Executor Close interface.\n",
"func (e *DeallocateExec) Close() error {\n",
"\treturn nil\n",
"}\n",
"\n",
"// Open implements Executor Open interface.\n",
"func (e *DeallocateExec) Open(goCtx goctx.Context) error {\n",
"\treturn nil\n",
"}\n",
"\n",
"// CompileExecutePreparedStmt compiles a session Execute command to a stmt.Statement.\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "executor/prepared.go",
"type": "replace",
"edit_start_line_idx": 247
} | // Copyright 2015 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package executor
import (
"math"
"sort"
"github.com/juju/errors"
"github.com/pingcap/tidb/ast"
"github.com/pingcap/tidb/context"
"github.com/pingcap/tidb/expression"
"github.com/pingcap/tidb/infoschema"
"github.com/pingcap/tidb/kv"
"github.com/pingcap/tidb/parser"
"github.com/pingcap/tidb/plan"
"github.com/pingcap/tidb/sessionctx/stmtctx"
"github.com/pingcap/tidb/types"
"github.com/pingcap/tidb/util/chunk"
"github.com/pingcap/tidb/util/sqlexec"
goctx "golang.org/x/net/context"
)
var (
_ Executor = &DeallocateExec{}
_ Executor = &ExecuteExec{}
_ Executor = &PrepareExec{}
)
type paramMarkerSorter struct {
markers []*ast.ParamMarkerExpr
}
func (p *paramMarkerSorter) Len() int {
return len(p.markers)
}
func (p *paramMarkerSorter) Less(i, j int) bool {
return p.markers[i].Offset < p.markers[j].Offset
}
func (p *paramMarkerSorter) Swap(i, j int) {
p.markers[i], p.markers[j] = p.markers[j], p.markers[i]
}
type paramMarkerExtractor struct {
markers []*ast.ParamMarkerExpr
}
func (e *paramMarkerExtractor) Enter(in ast.Node) (ast.Node, bool) {
return in, false
}
func (e *paramMarkerExtractor) Leave(in ast.Node) (ast.Node, bool) {
if x, ok := in.(*ast.ParamMarkerExpr); ok {
e.markers = append(e.markers, x)
}
return in, true
}
// PrepareExec represents a PREPARE executor.
type PrepareExec struct {
baseExecutor
is infoschema.InfoSchema
name string
sqlText string
ID uint32
ParamCount int
Fields []*ast.ResultField
}
// NewPrepareExec creates a new PrepareExec.
func NewPrepareExec(ctx context.Context, is infoschema.InfoSchema, sqlTxt string) *PrepareExec {
return &PrepareExec{
baseExecutor: newBaseExecutor(nil, ctx),
is: is,
sqlText: sqlTxt,
}
}
// Next implements the Executor Next interface.
func (e *PrepareExec) Next(goCtx goctx.Context) (Row, error) {
return nil, errors.Trace(e.DoPrepare())
}
// NextChunk implements the Executor NextChunk interface.
func (e *PrepareExec) NextChunk(goCtx goctx.Context, chk *chunk.Chunk) error {
return errors.Trace(e.DoPrepare())
}
// DoPrepare prepares the statement, it can be called multiple times without side effect.
func (e *PrepareExec) DoPrepare() error {
vars := e.ctx.GetSessionVars()
if e.ID != 0 {
// Must be the case when we retry a prepare.
// Make sure it is idempotent.
_, ok := vars.PreparedStmts[e.ID]
if ok {
return nil
}
}
charset, collation := vars.GetCharsetInfo()
var (
stmts []ast.StmtNode
err error
)
if sqlParser, ok := e.ctx.(sqlexec.SQLParser); ok {
stmts, err = sqlParser.ParseSQL(e.sqlText, charset, collation)
} else {
stmts, err = parser.New().Parse(e.sqlText, charset, collation)
}
if err != nil {
return errors.Trace(err)
}
if len(stmts) != 1 {
return ErrPrepareMulti
}
stmt := stmts[0]
if _, ok := stmt.(ast.DDLNode); ok {
return ErrPrepareDDL
}
var extractor paramMarkerExtractor
stmt.Accept(&extractor)
err = plan.Preprocess(e.ctx, stmt, e.is, true)
if err != nil {
return errors.Trace(err)
}
// The parameter markers are appended in visiting order, which may not
// be the same as the position order in the query string. We need to
// sort it by position.
sorter := ¶mMarkerSorter{markers: extractor.markers}
sort.Sort(sorter)
e.ParamCount = len(sorter.markers)
for i := 0; i < e.ParamCount; i++ {
sorter.markers[i].Order = i
}
prepared := &plan.Prepared{
Stmt: stmt,
Params: sorter.markers,
SchemaVersion: e.is.SchemaMetaVersion(),
}
prepared.UseCache = plan.PreparedPlanCacheEnabled && plan.Cacheable(stmt)
// We try to build the real statement of preparedStmt.
for i := range prepared.Params {
prepared.Params[i].SetDatum(types.NewIntDatum(0))
}
_, err = plan.BuildLogicalPlan(e.ctx, stmt, e.is)
if err != nil {
return errors.Trace(err)
}
if e.ID == 0 {
e.ID = vars.GetNextPreparedStmtID()
}
if e.name != "" {
vars.PreparedStmtNameToID[e.name] = e.ID
}
vars.PreparedStmts[e.ID] = prepared
return nil
}
// ExecuteExec represents an EXECUTE executor.
// It cannot be executed by itself, all it needs to do is to build
// another Executor from a prepared statement.
type ExecuteExec struct {
baseExecutor
IS infoschema.InfoSchema
Name string
UsingVars []expression.Expression
ID uint32
StmtExec Executor
Stmt ast.StmtNode
Plan plan.Plan
}
// Next implements the Executor Next interface.
func (e *ExecuteExec) Next(goCtx goctx.Context) (Row, error) {
// Will never be called.
return nil, nil
}
// Open implements the Executor Open interface.
func (e *ExecuteExec) Open(goCtx goctx.Context) error {
return nil
}
// Close implements Executor Close interface.
func (e *ExecuteExec) Close() error {
// Will never be called.
return nil
}
// Build builds a prepared statement into an executor.
// After Build, e.StmtExec will be used to do the real execution.
func (e *ExecuteExec) Build() error {
var err error
if IsPointGetWithPKOrUniqueKeyByAutoCommit(e.ctx, e.Plan) {
err = e.ctx.InitTxnWithStartTS(math.MaxUint64)
} else {
err = e.ctx.ActivePendingTxn()
}
if err != nil {
return errors.Trace(err)
}
b := newExecutorBuilder(e.ctx, e.IS, kv.PriorityNormal)
stmtExec := b.build(e.Plan)
if b.err != nil {
return errors.Trace(b.err)
}
e.StmtExec = stmtExec
ResetStmtCtx(e.ctx, e.Stmt)
stmtCount(e.Stmt, e.Plan, e.ctx.GetSessionVars().InRestrictedSQL)
return nil
}
// DeallocateExec represent a DEALLOCATE executor.
type DeallocateExec struct {
baseExecutor
Name string
}
// Next implements the Executor Next interface.
func (e *DeallocateExec) Next(goCtx goctx.Context) (Row, error) {
vars := e.ctx.GetSessionVars()
id, ok := vars.PreparedStmtNameToID[e.Name]
if !ok {
return nil, errors.Trace(plan.ErrStmtNotFound)
}
delete(vars.PreparedStmtNameToID, e.Name)
delete(vars.PreparedStmts, id)
return nil, nil
}
// Close implements Executor Close interface.
func (e *DeallocateExec) Close() error {
return nil
}
// Open implements Executor Open interface.
func (e *DeallocateExec) Open(goCtx goctx.Context) error {
return nil
}
// CompileExecutePreparedStmt compiles a session Execute command to a stmt.Statement.
func CompileExecutePreparedStmt(ctx context.Context, ID uint32, args ...interface{}) (ast.Statement, error) {
execStmt := &ast.ExecuteStmt{ExecID: ID}
execStmt.UsingVars = make([]ast.ExprNode, len(args))
for i, val := range args {
execStmt.UsingVars[i] = ast.NewValueExpr(val)
}
is := GetInfoSchema(ctx)
execPlan, err := plan.Optimize(ctx, execStmt, is)
if err != nil {
return nil, errors.Trace(err)
}
stmt := &ExecStmt{
InfoSchema: GetInfoSchema(ctx),
Plan: execPlan,
StmtNode: execStmt,
Ctx: ctx,
}
if prepared, ok := ctx.GetSessionVars().PreparedStmts[ID].(*plan.Prepared); ok {
stmt.Text = prepared.Stmt.Text()
}
return stmt, nil
}
// ResetStmtCtx resets the StmtContext.
// Before every execution, we must clear statement context.
func ResetStmtCtx(ctx context.Context, s ast.StmtNode) {
sessVars := ctx.GetSessionVars()
sc := new(stmtctx.StatementContext)
sc.TimeZone = sessVars.GetTimeZone()
switch stmt := s.(type) {
case *ast.UpdateStmt:
sc.IgnoreTruncate = false
sc.OverflowAsWarning = false
sc.TruncateAsWarning = !sessVars.StrictSQLMode || stmt.IgnoreErr
sc.InUpdateOrDeleteStmt = true
sc.DividedByZeroAsWarning = stmt.IgnoreErr
sc.IgnoreZeroInDate = !sessVars.StrictSQLMode || stmt.IgnoreErr
case *ast.DeleteStmt:
sc.IgnoreTruncate = false
sc.OverflowAsWarning = false
sc.TruncateAsWarning = !sessVars.StrictSQLMode || stmt.IgnoreErr
sc.InUpdateOrDeleteStmt = true
sc.DividedByZeroAsWarning = stmt.IgnoreErr
sc.IgnoreZeroInDate = !sessVars.StrictSQLMode || stmt.IgnoreErr
case *ast.InsertStmt:
sc.IgnoreTruncate = false
sc.TruncateAsWarning = !sessVars.StrictSQLMode || stmt.IgnoreErr
sc.InInsertStmt = true
sc.DividedByZeroAsWarning = stmt.IgnoreErr
sc.IgnoreZeroInDate = !sessVars.StrictSQLMode || stmt.IgnoreErr
case *ast.CreateTableStmt, *ast.AlterTableStmt:
// Make sure the sql_mode is strict when checking column default value.
sc.IgnoreTruncate = false
sc.OverflowAsWarning = false
sc.TruncateAsWarning = false
case *ast.LoadDataStmt:
sc.IgnoreTruncate = false
sc.OverflowAsWarning = false
sc.TruncateAsWarning = !sessVars.StrictSQLMode
case *ast.SelectStmt:
sc.InSelectStmt = true
// see https://dev.mysql.com/doc/refman/5.7/en/sql-mode.html#sql-mode-strict
// said "For statements such as SELECT that do not change data, invalid values
// generate a warning in strict mode, not an error."
// and https://dev.mysql.com/doc/refman/5.7/en/out-of-range-and-overflow.html
sc.OverflowAsWarning = true
// Return warning for truncate error in selection.
sc.IgnoreTruncate = false
sc.TruncateAsWarning = true
sc.IgnoreZeroInDate = true
if opts := stmt.SelectStmtOpts; opts != nil {
sc.Priority = opts.Priority
sc.NotFillCache = !opts.SQLCache
}
sc.PadCharToFullLength = ctx.GetSessionVars().SQLMode.HasPadCharToFullLengthMode()
default:
sc.IgnoreTruncate = true
sc.OverflowAsWarning = false
if show, ok := s.(*ast.ShowStmt); ok {
if show.Tp == ast.ShowWarnings {
sc.InShowWarning = true
sc.SetWarnings(sessVars.StmtCtx.GetWarnings())
}
}
sc.IgnoreZeroInDate = true
}
if sessVars.LastInsertID > 0 {
sessVars.PrevLastInsertID = sessVars.LastInsertID
sessVars.LastInsertID = 0
}
sessVars.ResetPrevAffectedRows()
sessVars.InsertID = 0
sessVars.StmtCtx = sc
}
| executor/prepared.go | 1 | https://github.com/pingcap/tidb/commit/b35e024ace8a525b61fd53eae22e9723d06454ca | [
0.8985152840614319,
0.029730694368481636,
0.00016398800653405488,
0.0012061720481142402,
0.1472022384405136
] |
{
"id": 4,
"code_window": [
"\t}\n",
"\tdelete(vars.PreparedStmtNameToID, e.Name)\n",
"\tdelete(vars.PreparedStmts, id)\n",
"\treturn nil, nil\n",
"}\n",
"\n",
"// Close implements Executor Close interface.\n",
"func (e *DeallocateExec) Close() error {\n",
"\treturn nil\n",
"}\n",
"\n",
"// Open implements Executor Open interface.\n",
"func (e *DeallocateExec) Open(goCtx goctx.Context) error {\n",
"\treturn nil\n",
"}\n",
"\n",
"// CompileExecutePreparedStmt compiles a session Execute command to a stmt.Statement.\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "executor/prepared.go",
"type": "replace",
"edit_start_line_idx": 247
} | // Copyright 2017 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package statistics
import (
"fmt"
"github.com/juju/errors"
"github.com/pingcap/tidb/ast"
"github.com/pingcap/tidb/ddl/util"
"github.com/pingcap/tidb/model"
"github.com/pingcap/tidb/mysql"
"github.com/pingcap/tidb/types"
"github.com/pingcap/tidb/util/sqlexec"
log "github.com/sirupsen/logrus"
goctx "golang.org/x/net/context"
)
// HandleDDLEvent begins to process a ddl task.
func (h *Handle) HandleDDLEvent(t *util.Event) error {
switch t.Tp {
case model.ActionCreateTable:
return h.insertTableStats2KV(t.TableInfo)
case model.ActionDropTable:
return h.DeleteTableStatsFromKV(t.TableInfo.ID)
case model.ActionAddColumn:
return h.insertColStats2KV(t.TableInfo.ID, t.ColumnInfo)
case model.ActionDropColumn:
return h.deleteHistStatsFromKV(t.TableInfo.ID, t.ColumnInfo.ID, 0)
case model.ActionDropIndex:
return h.deleteHistStatsFromKV(t.TableInfo.ID, t.IndexInfo.ID, 1)
default:
log.Warnf("Unsupported ddl event for statistic %s", t)
}
return nil
}
// DDLEventCh returns ddl events channel in handle.
func (h *Handle) DDLEventCh() chan *util.Event {
return h.ddlEventCh
}
// insertTableStats2KV inserts a record standing for a new table to stats_meta and inserts some records standing for the
// new columns and indices which belong to this table.
func (h *Handle) insertTableStats2KV(info *model.TableInfo) error {
exec := h.ctx.(sqlexec.SQLExecutor)
_, err := exec.Execute(goctx.Background(), "begin")
if err != nil {
return errors.Trace(err)
}
_, err = exec.Execute(goctx.Background(), fmt.Sprintf("insert into mysql.stats_meta (version, table_id) values(%d, %d)", h.ctx.Txn().StartTS(), info.ID))
if err != nil {
return errors.Trace(err)
}
for _, col := range info.Columns {
_, err = exec.Execute(goctx.Background(), fmt.Sprintf("insert into mysql.stats_histograms (table_id, is_index, hist_id, distinct_count, version) values(%d, 0, %d, 0, %d)", info.ID, col.ID, h.ctx.Txn().StartTS()))
if err != nil {
return errors.Trace(err)
}
}
for _, idx := range info.Indices {
_, err = exec.Execute(goctx.Background(), fmt.Sprintf("insert into mysql.stats_histograms (table_id, is_index, hist_id, distinct_count, version) values(%d, 1, %d, 0, %d)", info.ID, idx.ID, h.ctx.Txn().StartTS()))
if err != nil {
return errors.Trace(err)
}
}
_, err = exec.Execute(goctx.Background(), "commit")
return errors.Trace(err)
}
// DeleteTableStatsFromKV deletes table statistics from kv.
func (h *Handle) DeleteTableStatsFromKV(id int64) error {
exec := h.ctx.(sqlexec.SQLExecutor)
_, err := exec.Execute(goctx.Background(), "begin")
if err != nil {
return errors.Trace(err)
}
// First of all, we update the version.
_, err = exec.Execute(goctx.Background(), fmt.Sprintf("update mysql.stats_meta set version = %d where table_id = %d ", h.ctx.Txn().StartTS(), id))
if err != nil {
return errors.Trace(err)
}
_, err = exec.Execute(goctx.Background(), fmt.Sprintf("delete from mysql.stats_histograms where table_id = %d", id))
if err != nil {
return errors.Trace(err)
}
_, err = exec.Execute(goctx.Background(), fmt.Sprintf("delete from mysql.stats_buckets where table_id = %d", id))
if err != nil {
return errors.Trace(err)
}
_, err = exec.Execute(goctx.Background(), "commit")
return errors.Trace(err)
}
// insertColStats2KV insert a record to stats_histograms with distinct_count 1 and insert a bucket to stats_buckets with default value.
// This operation also updates version.
func (h *Handle) insertColStats2KV(tableID int64, colInfo *model.ColumnInfo) error {
exec := h.ctx.(sqlexec.SQLExecutor)
_, err := exec.Execute(goctx.Background(), "begin")
if err != nil {
return errors.Trace(err)
}
// First of all, we update the version.
_, err = exec.Execute(goctx.Background(), fmt.Sprintf("update mysql.stats_meta set version = %d where table_id = %d ", h.ctx.Txn().StartTS(), tableID))
if err != nil {
return errors.Trace(err)
}
goCtx := goctx.TODO()
// If we didn't update anything by last SQL, it means the stats of this table does not exist.
if h.ctx.GetSessionVars().StmtCtx.AffectedRows() > 0 {
exec := h.ctx.(sqlexec.SQLExecutor)
// By this step we can get the count of this table, then we can sure the count and repeats of bucket.
var rs []ast.RecordSet
rs, err = exec.Execute(goCtx, fmt.Sprintf("select count from mysql.stats_meta where table_id = %d", tableID))
if err != nil {
return errors.Trace(err)
}
var row types.Row
row, err = rs[0].Next(goCtx)
if err != nil {
return errors.Trace(err)
}
count := row.GetInt64(0)
value := types.NewDatum(colInfo.OriginDefaultValue)
value, err = value.ConvertTo(h.ctx.GetSessionVars().StmtCtx, &colInfo.FieldType)
if err != nil {
return errors.Trace(err)
}
if value.IsNull() {
// If the adding column has default value null, all the existing rows have null value on the newly added column.
_, err = exec.Execute(goCtx, fmt.Sprintf("insert into mysql.stats_histograms (version, table_id, is_index, hist_id, distinct_count, null_count) values (%d, %d, 0, %d, 0, %d)", h.ctx.Txn().StartTS(), tableID, colInfo.ID, count))
if err != nil {
return errors.Trace(err)
}
} else {
// If this stats exists, we insert histogram meta first, the distinct_count will always be one.
_, err = exec.Execute(goCtx, fmt.Sprintf("insert into mysql.stats_histograms (version, table_id, is_index, hist_id, distinct_count) values (%d, %d, 0, %d, 1)", h.ctx.Txn().StartTS(), tableID, colInfo.ID))
if err != nil {
return errors.Trace(err)
}
value, err = value.ConvertTo(h.ctx.GetSessionVars().StmtCtx, types.NewFieldType(mysql.TypeBlob))
if err != nil {
return errors.Trace(err)
}
// There must be only one bucket for this new column and the value is the default value.
_, err = exec.Execute(goCtx, fmt.Sprintf("insert into mysql.stats_buckets (table_id, is_index, hist_id, bucket_id, repeats, count, lower_bound, upper_bound) values (%d, 0, %d, 0, %d, %d, X'%X', X'%X')", tableID, colInfo.ID, count, count, value.GetBytes(), value.GetBytes()))
if err != nil {
return errors.Trace(err)
}
}
}
_, err = exec.Execute(goCtx, "commit")
return errors.Trace(err)
}
// deleteHistStatsFromKV deletes all records about a column or an index and updates version.
func (h *Handle) deleteHistStatsFromKV(tableID int64, histID int64, isIndex int) error {
exec := h.ctx.(sqlexec.SQLExecutor)
_, err := exec.Execute(goctx.Background(), "begin")
if err != nil {
return errors.Trace(err)
}
// First of all, we update the version. If this table doesn't exist, it won't have any problem. Because we cannot delete anything.
_, err = exec.Execute(goctx.Background(), fmt.Sprintf("update mysql.stats_meta set version = %d where table_id = %d ", h.ctx.Txn().StartTS(), tableID))
if err != nil {
return errors.Trace(err)
}
// delete histogram meta
_, err = exec.Execute(goctx.Background(), fmt.Sprintf("delete from mysql.stats_histograms where table_id = %d and hist_id = %d and is_index = %d", tableID, histID, isIndex))
if err != nil {
return errors.Trace(err)
}
// delete all buckets
_, err = exec.Execute(goctx.Background(), fmt.Sprintf("delete from mysql.stats_buckets where table_id = %d and hist_id = %d and is_index = %d", tableID, histID, isIndex))
if err != nil {
return errors.Trace(err)
}
_, err = exec.Execute(goctx.Background(), "commit")
return errors.Trace(err)
}
| statistics/ddl.go | 0 | https://github.com/pingcap/tidb/commit/b35e024ace8a525b61fd53eae22e9723d06454ca | [
0.0020437906496226788,
0.00028717832174152136,
0.00016355123079847544,
0.00017101704725064337,
0.00040708822780288756
] |
{
"id": 4,
"code_window": [
"\t}\n",
"\tdelete(vars.PreparedStmtNameToID, e.Name)\n",
"\tdelete(vars.PreparedStmts, id)\n",
"\treturn nil, nil\n",
"}\n",
"\n",
"// Close implements Executor Close interface.\n",
"func (e *DeallocateExec) Close() error {\n",
"\treturn nil\n",
"}\n",
"\n",
"// Open implements Executor Open interface.\n",
"func (e *DeallocateExec) Open(goCtx goctx.Context) error {\n",
"\treturn nil\n",
"}\n",
"\n",
"// CompileExecutePreparedStmt compiles a session Execute command to a stmt.Statement.\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "executor/prepared.go",
"type": "replace",
"edit_start_line_idx": 247
} | // Go driver for MySQL X Protocol
// Based heavily on Go MySQL Driver - A MySQL-Driver for Go's database/sql package
//
// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved.
// Copyright 2016 Simon J Mudd.
//
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
// You can obtain one at http://mozilla.org/MPL/2.0/.
package mysql
import (
"database/sql/driver"
"io"
log "github.com/sirupsen/logrus"
"github.com/golang/protobuf/proto"
"github.com/juju/errors"
"github.com/pingcap/tipb/go-mysqlx"
"github.com/pingcap/tipb/go-mysqlx/Resultset"
)
type mysqlXRows struct {
columns [](*Mysqlx_Resultset.ColumnMetaData) // holds column metadata (if present) for a row
mc *mysqlXConn
state queryState
err error // provides the error received from a query (if present)
}
// readMsgIfNecessary reads in a message only if we don't have one already
func (rows *mysqlXRows) readMsgIfNecessary() error {
// safety checks (which maybe can removed later
if rows == nil {
return errors.Errorf("mysqlXRows.readMsgIfNecessary: rows == nil")
}
if rows.mc == nil {
return errors.Errorf("mysqlXRows.readMsgIfNecessary: rows.mc == nil")
}
// if we already have a protobuf message then no need to read a new one
if rows.mc.pb != nil {
return nil
}
var err error
rows.mc.pb, err = rows.mc.readMsg()
if err != nil {
err = errors.Trace(err)
rows.err = err
rows.state = queryStateError
}
return err
}
// Columns returns the column meta data of a row and may need to
// read in some of the metadata messages from the network.
func (rows *mysqlXRows) Columns() []string {
if err := rows.collectColumnMetaData(); err != nil {
panic(err)
}
columns := make([]string, len(rows.columns))
for i := range rows.columns {
// FIXME: handle: if rows.mc.cfg.columnsWithAlias { ....
columns[i] = string(rows.columns[i].GetName())
}
return columns
}
// we have finished with the iterator
// - given Close can be called at any time we may have pending
// messages in the queue which need skipping so we really need
// to keep the state of where we are.
func (rows *mysqlXRows) Close() error {
// safety checks
if rows == nil {
return nil // to avoid breakage. Fix the calling code later
}
if rows.mc == nil {
return nil // no connection information
}
if rows.mc.netConn == nil {
return ErrInvalidConn
}
// We may have "query packets" which have not yet been
// processed. If so just let them through but ignore them.
for rows.state != queryStateDone && rows.state != queryStateError {
if err := rows.readMsgIfNecessary(); err != nil {
break
}
// Finish if we get an error or if the mssage type is EXECUTE_OK or ERROR
switch Mysqlx.ServerMessages_Type(rows.mc.pb.msgType) {
case Mysqlx.ServerMessages_ERROR:
if err := rows.mc.processErrorMsg(); err != nil {
return errors.Trace(err)
}
rows.state = queryStateError
case Mysqlx.ServerMessages_SQL_STMT_EXECUTE_OK:
rows.state = queryStateDone
case Mysqlx.ServerMessages_NOTICE:
if err := rows.mc.processNotice("mysqlXRows.Close"); err != nil {
return errors.Trace(err)
}
}
rows.mc.pb = nil
}
// clean up
rows.columns = nil
rows.mc.pb = nil
rows.mc = nil
rows.state = queryStateStart
return nil
}
// add the column information to the row
func (rows *mysqlXRows) addColumnMetaData() error {
if rows == nil {
return errors.Errorf("mysqlXrows.addColumnMetaData: rows == nil")
}
column := new(Mysqlx_Resultset.ColumnMetaData)
if err := proto.Unmarshal(rows.mc.pb.payload, column); err != nil {
return errors.Trace(err)
}
rows.columns = append(rows.columns, column)
rows.mc.pb = nil
return nil
}
// process a single row (in rows.mc.pb) and return if there was an error
func processRow(rows *mysqlXRows, dest []driver.Value) error {
var err error
myRow := new(Mysqlx_Resultset.Row)
if err = proto.Unmarshal(rows.mc.pb.payload, myRow); err != nil {
return errors.Trace(err)
}
rows.mc.pb = nil // consume the message
// copy over data converting each type to a dest type
for i := range dest {
if dest[i], err = convertColumnData(rows.columns[i], myRow.GetField()[i]); err != nil {
return errors.Trace(err)
}
}
return nil // no error
}
// Read a row of data from the connection until no more and then return io.EOF to indicate we have finished
func (rows *mysqlXRows) Next(dest []driver.Value) error {
// safety checks
if rows == nil {
log.Fatal("mysqlXRows.Next: rows == nil")
}
if rows.mc == nil {
log.Fatal("mysqlXRows.Next: rows.mc == nil")
}
// Finished? Don't continue
if rows.state.Finished() {
return io.EOF
}
// Have we read the column data yet? If not read it.
if rows.state == queryStateWaitingColumnMetaData {
if err := rows.collectColumnMetaData(); err != nil {
return errors.Trace(err)
}
}
// clean this logic up into a smaller more readable loop
done := false
for !done {
switch rows.state {
case queryStateWaitingRow:
// pull in a message if needed
if err := rows.readMsgIfNecessary(); err != nil {
log.Fatalf("DEBUG: mysqlXRow.Next: failed to read data if necessary")
}
// check if it's a Row message!
switch Mysqlx.ServerMessages_Type(rows.mc.pb.msgType) {
case Mysqlx.ServerMessages_RESULTSET_ROW:
if err := processRow(rows, dest); err != nil {
return errors.Trace(err)
}
done = true
case Mysqlx.ServerMessages_NOTICE:
if err := rows.mc.processNotice("mysqlXRows.Next"); err != nil {
return errors.Trace(err)
}
case Mysqlx.ServerMessages_RESULTSET_FETCH_DONE:
rows.state = queryStateWaitingExecuteOk
// done = true SKIP to next message
rows.mc.pb = nil
case Mysqlx.ServerMessages_ERROR:
// should treat each message
rows.state = queryStateDone
done = true
rows.mc.pb = nil
default:
log.Fatalf("mysqlXRowx.Next received unexpected message type: %s", printableMsgTypeIn(Mysqlx.ServerMessages_Type(rows.mc.pb.msgType)))
}
case queryStateDone, queryStateWaitingExecuteOk:
return io.EOF
default:
log.Fatalf("mysqlXRows.Next: called in unexpected state: %v", rows.state.String())
// otherwise assume everything is fine
}
}
return nil
}
// Expectation here is to receive one of
// - RESULTSET_COLUMN_META_DATA (expected)
// - NOTICE (may happen, not expected)
// - RESULTSET_ROW (expected, changes state)
func (rows *mysqlXRows) collectColumnMetaData() error {
if rows == nil {
return errors.Errorf("BUG: mysqlXRows.collectColumnMetaData: rows == nil")
}
for !rows.state.Finished() && rows.state != queryStateWaitingRow {
if err := rows.readMsgIfNecessary(); err != nil {
return errors.Errorf("DEBUG: mysqlXRows.collectColumnMetaData: failed to read data if necessary")
}
switch Mysqlx.ServerMessages_Type(rows.mc.pb.msgType) {
case Mysqlx.ServerMessages_RESULTSET_COLUMN_META_DATA:
if err := rows.addColumnMetaData(); err != nil {
return errors.Trace(err)
}
case Mysqlx.ServerMessages_RESULTSET_ROW:
rows.state = queryStateWaitingRow
case Mysqlx.ServerMessages_NOTICE:
// don't really expect a notice but process it
if err := rows.mc.processNotice("mysqlxRows.collectColumnMetaData"); err != nil {
return errors.Trace(err)
}
case Mysqlx.ServerMessages_ERROR:
if err := rows.mc.processErrorMsg(); err != nil {
return errors.Trace(err)
}
rows.state = queryStateError
case Mysqlx.ServerMessages_RESULTSET_FETCH_DONE:
rows.state = queryStateWaitingExecuteOk
rows.mc.pb = nil
case Mysqlx.ServerMessages_SQL_STMT_EXECUTE_OK:
rows.state = queryStateDone
rows.mc.pb = nil
default:
e := errors.Errorf("mysqlXRows.collectColumnMetaData: received unexpected message type: %s",
printableMsgTypeIn(Mysqlx.ServerMessages_Type(rows.mc.pb.msgType)))
rows.state = queryStateError
rows.mc.pb = nil
return e
}
}
return nil
}
| _vendor/src/github.com/pingcap/mysqlx-driver/rows.go | 0 | https://github.com/pingcap/tidb/commit/b35e024ace8a525b61fd53eae22e9723d06454ca | [
0.028904829174280167,
0.0012503890320658684,
0.00016324134776368737,
0.0001713150559226051,
0.0054238480515778065
] |
{
"id": 4,
"code_window": [
"\t}\n",
"\tdelete(vars.PreparedStmtNameToID, e.Name)\n",
"\tdelete(vars.PreparedStmts, id)\n",
"\treturn nil, nil\n",
"}\n",
"\n",
"// Close implements Executor Close interface.\n",
"func (e *DeallocateExec) Close() error {\n",
"\treturn nil\n",
"}\n",
"\n",
"// Open implements Executor Open interface.\n",
"func (e *DeallocateExec) Open(goCtx goctx.Context) error {\n",
"\treturn nil\n",
"}\n",
"\n",
"// CompileExecutePreparedStmt compiles a session Execute command to a stmt.Statement.\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "executor/prepared.go",
"type": "replace",
"edit_start_line_idx": 247
} | // Copyright 2015 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package expression
import (
"github.com/cznic/mathutil"
"github.com/juju/errors"
"github.com/pingcap/tidb/context"
"github.com/pingcap/tidb/mysql"
"github.com/pingcap/tidb/types"
"github.com/pingcap/tidb/types/json"
"github.com/pingcap/tidb/util/charset"
"github.com/pingcap/tipb/go-tipb"
)
var (
_ functionClass = &caseWhenFunctionClass{}
_ functionClass = &ifFunctionClass{}
_ functionClass = &ifNullFunctionClass{}
)
var (
_ builtinFunc = &builtinCaseWhenIntSig{}
_ builtinFunc = &builtinCaseWhenRealSig{}
_ builtinFunc = &builtinCaseWhenDecimalSig{}
_ builtinFunc = &builtinCaseWhenStringSig{}
_ builtinFunc = &builtinCaseWhenTimeSig{}
_ builtinFunc = &builtinCaseWhenDurationSig{}
_ builtinFunc = &builtinIfNullIntSig{}
_ builtinFunc = &builtinIfNullRealSig{}
_ builtinFunc = &builtinIfNullDecimalSig{}
_ builtinFunc = &builtinIfNullStringSig{}
_ builtinFunc = &builtinIfNullTimeSig{}
_ builtinFunc = &builtinIfNullDurationSig{}
_ builtinFunc = &builtinIfNullJSONSig{}
_ builtinFunc = &builtinIfIntSig{}
_ builtinFunc = &builtinIfRealSig{}
_ builtinFunc = &builtinIfDecimalSig{}
_ builtinFunc = &builtinIfStringSig{}
_ builtinFunc = &builtinIfTimeSig{}
_ builtinFunc = &builtinIfDurationSig{}
_ builtinFunc = &builtinIfJSONSig{}
)
type caseWhenFunctionClass struct {
baseFunctionClass
}
// Infer result type for builtin IF, IFNULL && NULLIF.
func inferType4ControlFuncs(lhs, rhs *types.FieldType) *types.FieldType {
resultFieldType := &types.FieldType{}
if lhs.Tp == mysql.TypeNull {
*resultFieldType = *rhs
// If both arguments are NULL, make resulting type BINARY(0).
if rhs.Tp == mysql.TypeNull {
resultFieldType.Tp = mysql.TypeString
resultFieldType.Flen, resultFieldType.Decimal = 0, 0
types.SetBinChsClnFlag(resultFieldType)
}
} else if rhs.Tp == mysql.TypeNull {
*resultFieldType = *lhs
} else {
var unsignedFlag uint
evalType := types.AggregateEvalType([]*types.FieldType{lhs, rhs}, &unsignedFlag)
resultFieldType = types.AggFieldType([]*types.FieldType{lhs, rhs})
if evalType == types.ETInt {
resultFieldType.Decimal = 0
} else {
if lhs.Decimal == types.UnspecifiedLength || rhs.Decimal == types.UnspecifiedLength {
resultFieldType.Decimal = types.UnspecifiedLength
} else {
resultFieldType.Decimal = mathutil.Max(lhs.Decimal, rhs.Decimal)
}
}
if types.IsNonBinaryStr(lhs) && !types.IsBinaryStr(rhs) {
resultFieldType.Charset, resultFieldType.Collate, resultFieldType.Flag = charset.CharsetUTF8, charset.CollationUTF8, 0
if mysql.HasBinaryFlag(lhs.Flag) {
resultFieldType.Flag |= mysql.BinaryFlag
}
} else if types.IsNonBinaryStr(rhs) && !types.IsBinaryStr(lhs) {
resultFieldType.Charset, resultFieldType.Collate, resultFieldType.Flag = charset.CharsetUTF8, charset.CollationUTF8, 0
if mysql.HasBinaryFlag(rhs.Flag) {
resultFieldType.Flag |= mysql.BinaryFlag
}
} else if types.IsBinaryStr(lhs) || types.IsBinaryStr(rhs) || !evalType.IsStringKind() {
types.SetBinChsClnFlag(resultFieldType)
} else {
resultFieldType.Charset, resultFieldType.Collate, resultFieldType.Flag = charset.CharsetUTF8, charset.CollationUTF8, 0
}
if evalType == types.ETDecimal || evalType == types.ETInt {
lhsUnsignedFlag, rhsUnsignedFlag := mysql.HasUnsignedFlag(lhs.Flag), mysql.HasUnsignedFlag(rhs.Flag)
lhsFlagLen, rhsFlagLen := 0, 0
if !lhsUnsignedFlag {
lhsFlagLen = 1
}
if !rhsUnsignedFlag {
rhsFlagLen = 1
}
lhsFlen := lhs.Flen - lhsFlagLen
rhsFlen := rhs.Flen - rhsFlagLen
if lhs.Decimal != types.UnspecifiedLength {
lhsFlen -= lhs.Decimal
}
if lhs.Decimal != types.UnspecifiedLength {
rhsFlen -= rhs.Decimal
}
resultFieldType.Flen = mathutil.Max(lhsFlen, rhsFlen) + resultFieldType.Decimal + 1
} else {
resultFieldType.Flen = mathutil.Max(lhs.Flen, rhs.Flen)
}
}
// Fix decimal for int and string.
resultEvalType := resultFieldType.EvalType()
if resultEvalType == types.ETInt {
resultFieldType.Decimal = 0
} else if resultEvalType == types.ETString {
if lhs.Tp != mysql.TypeNull || rhs.Tp != mysql.TypeNull {
resultFieldType.Decimal = types.UnspecifiedLength
}
}
return resultFieldType
}
func (c *caseWhenFunctionClass) getFunction(ctx context.Context, args []Expression) (sig builtinFunc, err error) {
if err = c.verifyArgs(args); err != nil {
return nil, errors.Trace(err)
}
l := len(args)
// Fill in each 'THEN' clause parameter type.
fieldTps := make([]*types.FieldType, 0, (l+1)/2)
decimal, flen, isBinaryStr := args[1].GetType().Decimal, 0, false
for i := 1; i < l; i += 2 {
fieldTps = append(fieldTps, args[i].GetType())
decimal = mathutil.Max(decimal, args[i].GetType().Decimal)
flen = mathutil.Max(flen, args[i].GetType().Flen)
isBinaryStr = isBinaryStr || types.IsBinaryStr(args[i].GetType())
}
if l%2 == 1 {
fieldTps = append(fieldTps, args[l-1].GetType())
decimal = mathutil.Max(decimal, args[l-1].GetType().Decimal)
flen = mathutil.Max(flen, args[l-1].GetType().Flen)
isBinaryStr = isBinaryStr || types.IsBinaryStr(args[l-1].GetType())
}
fieldTp := types.AggFieldType(fieldTps)
tp := fieldTp.EvalType()
if tp == types.ETInt {
decimal = 0
}
fieldTp.Decimal, fieldTp.Flen = decimal, flen
if fieldTp.EvalType().IsStringKind() && !isBinaryStr {
fieldTp.Charset, fieldTp.Collate = mysql.DefaultCharset, mysql.DefaultCollationName
}
// Set retType to BINARY(0) if all arguments are of type NULL.
if fieldTp.Tp == mysql.TypeNull {
fieldTp.Flen, fieldTp.Decimal = 0, -1
types.SetBinChsClnFlag(fieldTp)
}
argTps := make([]types.EvalType, 0, l)
for i := 0; i < l-1; i += 2 {
argTps = append(argTps, types.ETInt, tp)
}
if l%2 == 1 {
argTps = append(argTps, tp)
}
bf := newBaseBuiltinFuncWithTp(ctx, args, tp, argTps...)
bf.tp = fieldTp
switch tp {
case types.ETInt:
bf.tp.Decimal = 0
sig = &builtinCaseWhenIntSig{bf}
sig.setPbCode(tipb.ScalarFuncSig_CaseWhenInt)
case types.ETReal:
sig = &builtinCaseWhenRealSig{bf}
sig.setPbCode(tipb.ScalarFuncSig_CaseWhenReal)
case types.ETDecimal:
sig = &builtinCaseWhenDecimalSig{bf}
sig.setPbCode(tipb.ScalarFuncSig_CaseWhenDecimal)
case types.ETString:
bf.tp.Decimal = types.UnspecifiedLength
sig = &builtinCaseWhenStringSig{bf}
sig.setPbCode(tipb.ScalarFuncSig_CaseWhenString)
case types.ETDatetime, types.ETTimestamp:
sig = &builtinCaseWhenTimeSig{bf}
sig.setPbCode(tipb.ScalarFuncSig_CaseWhenTime)
case types.ETDuration:
sig = &builtinCaseWhenDurationSig{bf}
sig.setPbCode(tipb.ScalarFuncSig_CaseWhenDuration)
}
return sig, nil
}
type builtinCaseWhenIntSig struct {
baseBuiltinFunc
}
// evalInt evals a builtinCaseWhenIntSig.
// See https://dev.mysql.com/doc/refman/5.7/en/case.html
func (b *builtinCaseWhenIntSig) evalInt(row types.Row) (ret int64, isNull bool, err error) {
sc := b.ctx.GetSessionVars().StmtCtx
var condition int64
args, l := b.getArgs(), len(b.getArgs())
for i := 0; i < l-1; i += 2 {
condition, isNull, err = args[i].EvalInt(row, sc)
if err != nil {
return 0, isNull, errors.Trace(err)
}
if isNull || condition == 0 {
continue
}
ret, isNull, err = args[i+1].EvalInt(row, sc)
return ret, isNull, errors.Trace(err)
}
// when clause(condition, result) -> args[i], args[i+1]; (i >= 0 && i+1 < l-1)
// else clause -> args[l-1]
// If case clause has else clause, l%2 == 1.
if l%2 == 1 {
ret, isNull, err = args[l-1].EvalInt(row, sc)
return ret, isNull, errors.Trace(err)
}
return ret, true, nil
}
type builtinCaseWhenRealSig struct {
baseBuiltinFunc
}
// evalReal evals a builtinCaseWhenRealSig.
// See https://dev.mysql.com/doc/refman/5.7/en/case.html
func (b *builtinCaseWhenRealSig) evalReal(row types.Row) (ret float64, isNull bool, err error) {
sc := b.ctx.GetSessionVars().StmtCtx
var condition int64
args, l := b.getArgs(), len(b.getArgs())
for i := 0; i < l-1; i += 2 {
condition, isNull, err = args[i].EvalInt(row, sc)
if err != nil {
return 0, isNull, errors.Trace(err)
}
if isNull || condition == 0 {
continue
}
ret, isNull, err = args[i+1].EvalReal(row, sc)
return ret, isNull, errors.Trace(err)
}
// when clause(condition, result) -> args[i], args[i+1]; (i >= 0 && i+1 < l-1)
// else clause -> args[l-1]
// If case clause has else clause, l%2 == 1.
if l%2 == 1 {
ret, isNull, err = args[l-1].EvalReal(row, sc)
return ret, isNull, errors.Trace(err)
}
return ret, true, nil
}
type builtinCaseWhenDecimalSig struct {
baseBuiltinFunc
}
// evalDecimal evals a builtinCaseWhenDecimalSig.
// See https://dev.mysql.com/doc/refman/5.7/en/case.html
func (b *builtinCaseWhenDecimalSig) evalDecimal(row types.Row) (ret *types.MyDecimal, isNull bool, err error) {
sc := b.ctx.GetSessionVars().StmtCtx
var condition int64
args, l := b.getArgs(), len(b.getArgs())
for i := 0; i < l-1; i += 2 {
condition, isNull, err = args[i].EvalInt(row, sc)
if err != nil {
return nil, isNull, errors.Trace(err)
}
if isNull || condition == 0 {
continue
}
ret, isNull, err = args[i+1].EvalDecimal(row, sc)
return ret, isNull, errors.Trace(err)
}
// when clause(condition, result) -> args[i], args[i+1]; (i >= 0 && i+1 < l-1)
// else clause -> args[l-1]
// If case clause has else clause, l%2 == 1.
if l%2 == 1 {
ret, isNull, err = args[l-1].EvalDecimal(row, sc)
return ret, isNull, errors.Trace(err)
}
return ret, true, nil
}
type builtinCaseWhenStringSig struct {
baseBuiltinFunc
}
// evalString evals a builtinCaseWhenStringSig.
// See https://dev.mysql.com/doc/refman/5.7/en/case.html
func (b *builtinCaseWhenStringSig) evalString(row types.Row) (ret string, isNull bool, err error) {
sc := b.ctx.GetSessionVars().StmtCtx
var condition int64
args, l := b.getArgs(), len(b.getArgs())
for i := 0; i < l-1; i += 2 {
condition, isNull, err = args[i].EvalInt(row, sc)
if err != nil {
return "", isNull, errors.Trace(err)
}
if isNull || condition == 0 {
continue
}
ret, isNull, err = args[i+1].EvalString(row, sc)
return ret, isNull, errors.Trace(err)
}
// when clause(condition, result) -> args[i], args[i+1]; (i >= 0 && i+1 < l-1)
// else clause -> args[l-1]
// If case clause has else clause, l%2 == 1.
if l%2 == 1 {
ret, isNull, err = args[l-1].EvalString(row, sc)
return ret, isNull, errors.Trace(err)
}
return ret, true, nil
}
type builtinCaseWhenTimeSig struct {
baseBuiltinFunc
}
// evalTime evals a builtinCaseWhenTimeSig.
// See https://dev.mysql.com/doc/refman/5.7/en/case.html
func (b *builtinCaseWhenTimeSig) evalTime(row types.Row) (ret types.Time, isNull bool, err error) {
sc := b.ctx.GetSessionVars().StmtCtx
var condition int64
args, l := b.getArgs(), len(b.getArgs())
for i := 0; i < l-1; i += 2 {
condition, isNull, err = args[i].EvalInt(row, sc)
if err != nil {
return ret, isNull, errors.Trace(err)
}
if isNull || condition == 0 {
continue
}
ret, isNull, err = args[i+1].EvalTime(row, sc)
return ret, isNull, errors.Trace(err)
}
// when clause(condition, result) -> args[i], args[i+1]; (i >= 0 && i+1 < l-1)
// else clause -> args[l-1]
// If case clause has else clause, l%2 == 1.
if l%2 == 1 {
ret, isNull, err = args[l-1].EvalTime(row, sc)
return ret, isNull, errors.Trace(err)
}
return ret, true, nil
}
type builtinCaseWhenDurationSig struct {
baseBuiltinFunc
}
// evalDuration evals a builtinCaseWhenDurationSig.
// See https://dev.mysql.com/doc/refman/5.7/en/case.html
func (b *builtinCaseWhenDurationSig) evalDuration(row types.Row) (ret types.Duration, isNull bool, err error) {
sc := b.ctx.GetSessionVars().StmtCtx
var condition int64
args, l := b.getArgs(), len(b.getArgs())
for i := 0; i < l-1; i += 2 {
condition, isNull, err = args[i].EvalInt(row, sc)
if err != nil {
return ret, true, errors.Trace(err)
}
if isNull || condition == 0 {
continue
}
ret, isNull, err = args[i+1].EvalDuration(row, sc)
return ret, isNull, errors.Trace(err)
}
// when clause(condition, result) -> args[i], args[i+1]; (i >= 0 && i+1 < l-1)
// else clause -> args[l-1]
// If case clause has else clause, l%2 == 1.
if l%2 == 1 {
ret, isNull, err = args[l-1].EvalDuration(row, sc)
return ret, isNull, errors.Trace(err)
}
return ret, true, nil
}
type ifFunctionClass struct {
baseFunctionClass
}
// See https://dev.mysql.com/doc/refman/5.7/en/control-flow-functions.html#function_if
func (c *ifFunctionClass) getFunction(ctx context.Context, args []Expression) (sig builtinFunc, err error) {
if err = c.verifyArgs(args); err != nil {
return nil, errors.Trace(err)
}
retTp := inferType4ControlFuncs(args[1].GetType(), args[2].GetType())
evalTps := retTp.EvalType()
bf := newBaseBuiltinFuncWithTp(ctx, args, evalTps, types.ETInt, evalTps, evalTps)
bf.tp = retTp
switch evalTps {
case types.ETInt:
sig = &builtinIfIntSig{bf}
sig.setPbCode(tipb.ScalarFuncSig_IfInt)
case types.ETReal:
sig = &builtinIfRealSig{bf}
sig.setPbCode(tipb.ScalarFuncSig_IfReal)
case types.ETDecimal:
sig = &builtinIfDecimalSig{bf}
sig.setPbCode(tipb.ScalarFuncSig_IfDecimal)
case types.ETString:
sig = &builtinIfStringSig{bf}
sig.setPbCode(tipb.ScalarFuncSig_IfString)
case types.ETDatetime, types.ETTimestamp:
sig = &builtinIfTimeSig{bf}
sig.setPbCode(tipb.ScalarFuncSig_IfTime)
case types.ETDuration:
sig = &builtinIfDurationSig{bf}
sig.setPbCode(tipb.ScalarFuncSig_IfDuration)
case types.ETJson:
sig = &builtinIfJSONSig{bf}
sig.setPbCode(tipb.ScalarFuncSig_IfJson)
}
return sig, nil
}
type builtinIfIntSig struct {
baseBuiltinFunc
}
func (b *builtinIfIntSig) evalInt(row types.Row) (ret int64, isNull bool, err error) {
sc := b.ctx.GetSessionVars().StmtCtx
arg0, isNull0, err := b.args[0].EvalInt(row, sc)
if err != nil {
return 0, true, errors.Trace(err)
}
arg1, isNull1, err := b.args[1].EvalInt(row, sc)
if (!isNull0 && arg0 != 0) || err != nil {
return arg1, isNull1, errors.Trace(err)
}
arg2, isNull2, err := b.args[2].EvalInt(row, sc)
return arg2, isNull2, errors.Trace(err)
}
type builtinIfRealSig struct {
baseBuiltinFunc
}
func (b *builtinIfRealSig) evalReal(row types.Row) (ret float64, isNull bool, err error) {
sc := b.ctx.GetSessionVars().StmtCtx
arg0, isNull0, err := b.args[0].EvalInt(row, sc)
if err != nil {
return 0, true, errors.Trace(err)
}
arg1, isNull1, err := b.args[1].EvalReal(row, sc)
if (!isNull0 && arg0 != 0) || err != nil {
return arg1, isNull1, errors.Trace(err)
}
arg2, isNull2, err := b.args[2].EvalReal(row, sc)
return arg2, isNull2, errors.Trace(err)
}
type builtinIfDecimalSig struct {
baseBuiltinFunc
}
func (b *builtinIfDecimalSig) evalDecimal(row types.Row) (ret *types.MyDecimal, isNull bool, err error) {
sc := b.ctx.GetSessionVars().StmtCtx
arg0, isNull0, err := b.args[0].EvalInt(row, sc)
if err != nil {
return nil, true, errors.Trace(err)
}
arg1, isNull1, err := b.args[1].EvalDecimal(row, sc)
if (!isNull0 && arg0 != 0) || err != nil {
return arg1, isNull1, errors.Trace(err)
}
arg2, isNull2, err := b.args[2].EvalDecimal(row, sc)
return arg2, isNull2, errors.Trace(err)
}
type builtinIfStringSig struct {
baseBuiltinFunc
}
func (b *builtinIfStringSig) evalString(row types.Row) (ret string, isNull bool, err error) {
sc := b.ctx.GetSessionVars().StmtCtx
arg0, isNull0, err := b.args[0].EvalInt(row, sc)
if err != nil {
return "", true, errors.Trace(err)
}
arg1, isNull1, err := b.args[1].EvalString(row, sc)
if (!isNull0 && arg0 != 0) || err != nil {
return arg1, isNull1, errors.Trace(err)
}
arg2, isNull2, err := b.args[2].EvalString(row, sc)
return arg2, isNull2, errors.Trace(err)
}
type builtinIfTimeSig struct {
baseBuiltinFunc
}
func (b *builtinIfTimeSig) evalTime(row types.Row) (ret types.Time, isNull bool, err error) {
sc := b.ctx.GetSessionVars().StmtCtx
arg0, isNull0, err := b.args[0].EvalInt(row, sc)
if err != nil {
return ret, true, errors.Trace(err)
}
arg1, isNull1, err := b.args[1].EvalTime(row, sc)
if (!isNull0 && arg0 != 0) || err != nil {
return arg1, isNull1, errors.Trace(err)
}
arg2, isNull2, err := b.args[2].EvalTime(row, sc)
return arg2, isNull2, errors.Trace(err)
}
type builtinIfDurationSig struct {
baseBuiltinFunc
}
func (b *builtinIfDurationSig) evalDuration(row types.Row) (ret types.Duration, isNull bool, err error) {
sc := b.ctx.GetSessionVars().StmtCtx
arg0, isNull0, err := b.args[0].EvalInt(row, sc)
if err != nil {
return ret, true, errors.Trace(err)
}
arg1, isNull1, err := b.args[1].EvalDuration(row, sc)
if (!isNull0 && arg0 != 0) || err != nil {
return arg1, isNull1, errors.Trace(err)
}
arg2, isNull2, err := b.args[2].EvalDuration(row, sc)
return arg2, isNull2, errors.Trace(err)
}
type builtinIfJSONSig struct {
baseBuiltinFunc
}
func (b *builtinIfJSONSig) evalJSON(row types.Row) (ret json.JSON, isNull bool, err error) {
sc := b.ctx.GetSessionVars().StmtCtx
arg0, isNull0, err := b.args[0].EvalInt(row, sc)
if err != nil {
return ret, true, errors.Trace(err)
}
arg1, isNull1, err := b.args[1].EvalJSON(row, sc)
if err != nil {
return ret, true, errors.Trace(err)
}
arg2, isNull2, err := b.args[2].EvalJSON(row, sc)
if err != nil {
return ret, true, errors.Trace(err)
}
switch {
case isNull0 || arg0 == 0:
ret, isNull = arg2, isNull2
case arg0 != 0:
ret, isNull = arg1, isNull1
}
return
}
type ifNullFunctionClass struct {
baseFunctionClass
}
func (c *ifNullFunctionClass) getFunction(ctx context.Context, args []Expression) (sig builtinFunc, err error) {
if err = errors.Trace(c.verifyArgs(args)); err != nil {
return nil, errors.Trace(err)
}
lhs, rhs := args[0].GetType(), args[1].GetType()
retTp := inferType4ControlFuncs(lhs, rhs)
retTp.Flag |= (lhs.Flag & mysql.NotNullFlag) | (rhs.Flag & mysql.NotNullFlag)
if lhs.Tp == mysql.TypeNull && rhs.Tp == mysql.TypeNull {
retTp.Tp = mysql.TypeNull
retTp.Flen, retTp.Decimal = 0, -1
types.SetBinChsClnFlag(retTp)
}
evalTps := retTp.EvalType()
bf := newBaseBuiltinFuncWithTp(ctx, args, evalTps, evalTps, evalTps)
bf.tp = retTp
switch evalTps {
case types.ETInt:
sig = &builtinIfNullIntSig{bf}
sig.setPbCode(tipb.ScalarFuncSig_IfNullInt)
case types.ETReal:
sig = &builtinIfNullRealSig{bf}
sig.setPbCode(tipb.ScalarFuncSig_IfNullReal)
case types.ETDecimal:
sig = &builtinIfNullDecimalSig{bf}
sig.setPbCode(tipb.ScalarFuncSig_IfNullDecimal)
case types.ETString:
sig = &builtinIfNullStringSig{bf}
sig.setPbCode(tipb.ScalarFuncSig_IfNullString)
case types.ETDatetime, types.ETTimestamp:
sig = &builtinIfNullTimeSig{bf}
sig.setPbCode(tipb.ScalarFuncSig_IfNullTime)
case types.ETDuration:
sig = &builtinIfNullDurationSig{bf}
sig.setPbCode(tipb.ScalarFuncSig_IfNullDuration)
case types.ETJson:
sig = &builtinIfNullJSONSig{bf}
sig.setPbCode(tipb.ScalarFuncSig_IfNullJson)
}
return sig, nil
}
type builtinIfNullIntSig struct {
baseBuiltinFunc
}
func (b *builtinIfNullIntSig) evalInt(row types.Row) (int64, bool, error) {
sc := b.ctx.GetSessionVars().StmtCtx
arg0, isNull, err := b.args[0].EvalInt(row, sc)
if !isNull || err != nil {
return arg0, err != nil, errors.Trace(err)
}
arg1, isNull, err := b.args[1].EvalInt(row, sc)
return arg1, isNull || err != nil, errors.Trace(err)
}
type builtinIfNullRealSig struct {
baseBuiltinFunc
}
func (b *builtinIfNullRealSig) evalReal(row types.Row) (float64, bool, error) {
sc := b.ctx.GetSessionVars().StmtCtx
arg0, isNull, err := b.args[0].EvalReal(row, sc)
if !isNull || err != nil {
return arg0, err != nil, errors.Trace(err)
}
arg1, isNull, err := b.args[1].EvalReal(row, sc)
return arg1, isNull || err != nil, errors.Trace(err)
}
type builtinIfNullDecimalSig struct {
baseBuiltinFunc
}
func (b *builtinIfNullDecimalSig) evalDecimal(row types.Row) (*types.MyDecimal, bool, error) {
sc := b.ctx.GetSessionVars().StmtCtx
arg0, isNull, err := b.args[0].EvalDecimal(row, sc)
if !isNull || err != nil {
return arg0, err != nil, errors.Trace(err)
}
arg1, isNull, err := b.args[1].EvalDecimal(row, sc)
return arg1, isNull || err != nil, errors.Trace(err)
}
type builtinIfNullStringSig struct {
baseBuiltinFunc
}
func (b *builtinIfNullStringSig) evalString(row types.Row) (string, bool, error) {
sc := b.ctx.GetSessionVars().StmtCtx
arg0, isNull, err := b.args[0].EvalString(row, sc)
if !isNull || err != nil {
return arg0, err != nil, errors.Trace(err)
}
arg1, isNull, err := b.args[1].EvalString(row, sc)
return arg1, isNull || err != nil, errors.Trace(err)
}
type builtinIfNullTimeSig struct {
baseBuiltinFunc
}
func (b *builtinIfNullTimeSig) evalTime(row types.Row) (types.Time, bool, error) {
sc := b.ctx.GetSessionVars().StmtCtx
arg0, isNull, err := b.args[0].EvalTime(row, sc)
if !isNull || err != nil {
return arg0, err != nil, errors.Trace(err)
}
arg1, isNull, err := b.args[1].EvalTime(row, sc)
return arg1, isNull || err != nil, errors.Trace(err)
}
type builtinIfNullDurationSig struct {
baseBuiltinFunc
}
func (b *builtinIfNullDurationSig) evalDuration(row types.Row) (types.Duration, bool, error) {
sc := b.ctx.GetSessionVars().StmtCtx
arg0, isNull, err := b.args[0].EvalDuration(row, sc)
if !isNull || err != nil {
return arg0, err != nil, errors.Trace(err)
}
arg1, isNull, err := b.args[1].EvalDuration(row, sc)
return arg1, isNull || err != nil, errors.Trace(err)
}
type builtinIfNullJSONSig struct {
baseBuiltinFunc
}
func (b *builtinIfNullJSONSig) evalJSON(row types.Row) (json.JSON, bool, error) {
sc := b.ctx.GetSessionVars().StmtCtx
arg0, isNull, err := b.args[0].EvalJSON(row, sc)
if !isNull {
return arg0, err != nil, errors.Trace(err)
}
arg1, isNull, err := b.args[1].EvalJSON(row, sc)
return arg1, isNull || err != nil, errors.Trace(err)
}
| expression/builtin_control.go | 0 | https://github.com/pingcap/tidb/commit/b35e024ace8a525b61fd53eae22e9723d06454ca | [
0.0004249826306477189,
0.0001902193616842851,
0.0001611226034583524,
0.00017221813322976232,
0.000048593388783046976
] |
{
"id": 0,
"code_window": [
"\t\tfor i := range v {\n",
"\t\t\ta[i] = v[i];\n",
"\t\t}\n",
"\t\tv = a;\n",
"\t}\n",
"\t*vp = v[0:n+1];\n",
"\tv[n] = p;\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep"
],
"after_edit": [
"\tv = v[0:n+1];\n"
],
"file_path": "src/cmd/gobuild/gobuild.go",
"type": "replace",
"edit_start_line_idx": 73
} | // Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package gobuild
import (
"bufio";
"exec";
"fmt";
"io";
"go/ast";
"go/parser";
"os";
"path";
"sort";
"strconv";
"strings";
)
const (
ShowErrors = 1<<iota;
ForceDisplay;
)
var (
theChar string;
goarch string;
goos string;
bin = make(map[string] string);
)
var theChars = map[string] string {
"amd64": "6",
"386": "8",
"arm": "5"
}
const ObjDir = "_obj"
func fatal(format string, args ...) {
fmt.Fprintf(os.Stderr, "gobuild: %s\n", fmt.Sprintf(format, args));
os.Exit(1);
}
func init() {
var err os.Error;
goarch, err = os.Getenv("GOARCH");
goos, err = os.Getenv("GOOS");
var ok bool;
theChar, ok = theChars[goarch];
if !ok {
fatal("unknown $GOARCH: %s", goarch);
}
var binaries = []string{
theChar + "g",
theChar + "c",
theChar + "a",
"gopack",
};
for i, v := range binaries {
var s string;
if s, err = exec.LookPath(v); err != nil {
fatal("cannot find binary %s", v);
}
bin[v] = s;
}
}
func PushString(vp *[]string, p string) {
v := *vp;
n := len(v);
if n >= cap(v) {
m := 2*n + 10;
a := make([]string, n, m);
for i := range v {
a[i] = v[i];
}
v = a;
}
*vp = v[0:n+1];
v[n] = p;
}
func run(argv []string, flag int) (ok bool) {
argv0 := bin[argv[0]];
null, err := os.Open("/dev/null", os.O_RDWR, 0);
if err != nil {
fatal("open /dev/null: %s", err);
}
defer null.Close();
r, w, err := os.Pipe();
if err != nil {
fatal("pipe: %s", err);
}
pid, err := os.ForkExec(argv0, argv, os.Environ(), "", []*os.File{null, w, w});
defer r.Close();
w.Close();
if err != nil {
return false;
}
// Read the first line of output, if any. Discard the rest.
// If there is output and ShowErrors is set, show it,
// preceded by a shell command line.
// If ForceDisplay is set, we show the command even
// if there's no output; this gets set if we're just trying
// to keep the user informed.
b := bufio.NewReader(r);
line, err := b.ReadLineString('\n', true);
if flag & ShowErrors != 0 && line != "" || flag & ForceDisplay != 0 {
fmt.Fprint(os.Stderr, "$ ");
for i, s := range argv {
fmt.Fprint(os.Stderr, s, " ");
}
fmt.Fprint(os.Stderr, "\n");
fmt.Fprint(os.Stderr, " ", line);
io.Copy(r, null); // don't let process block on pipe
}
waitmsg, err := os.Wait(pid, 0);
if err != nil {
return false;
}
return waitmsg.Exited() && waitmsg.ExitStatus() == 0;
}
func Build(cmd []string, file string, flag int) (ok bool) {
var argv []string;
for i, c := range cmd {
PushString(&argv, c);
}
PushString(&argv, file);
return run(argv, flag);
}
func Archive(pkg string, files []string) {
argv := []string{ "gopack", "grc", pkg };
for i, file := range files {
PushString(&argv, file);
}
if !run(argv, ShowErrors) {
fatal("archive failed");
}
}
func Compiler(file string) []string {
switch {
case strings.HasSuffix(file, ".go"):
return []string{ theChar + "g", "-I", ObjDir };
case strings.HasSuffix(file, ".c"):
return []string{ theChar + "c", "-FVw" };
case strings.HasSuffix(file, ".s"):
return []string{ theChar + "a" };
}
fatal("don't know how to compile %s", file);
return nil;
}
func Object(file, suffix string) string {
ext := path.Ext(file);
return file[0:len(file)-len(ext)] + "." + suffix;
}
// Dollarstring returns s with literal goarch/goos values
// replaced by $lGOARCHr where l and r are the specified delimeters.
func dollarString(s, l, r string) string {
out := "";
j := 0; // index of last byte in s copied to out.
for i := 0; i < len(s); {
switch {
case i+len(goarch) <= len(s) && s[i:i+len(goarch)] == goarch:
out += s[j:i];
out += "$" + l + "GOARCH" + r;
i += len(goarch);
j = i;
case i+len(goos) <= len(s) && s[i:i+len(goos)] == goos:
out += s[j:i];
out += "$" + l + "GOOS" + r;
i += len(goos);
j = i;
default:
i++;
}
}
out += s[j:len(s)];
return out;
}
// dollarString wrappers.
// Print ShellString(s) or MakeString(s) depending on
// the context in which the result will be interpreted.
type ShellString string;
func (s ShellString) String() string {
return dollarString(string(s), "{", "}");
}
type MakeString string;
func (s MakeString) String() string {
return dollarString(string(s), "(", ")");
}
// TODO(rsc): Should this be in the AST library?
func LitString(p []*ast.StringLit) (string, os.Error) {
s := "";
for i, lit := range p {
t, err := strconv.Unquote(string(lit.Value));
if err != nil {
return "", err;
}
s += t;
}
return s, nil;
}
func PackageImports(file string) (pkg string, imports []string, err1 os.Error) {
f, err := os.Open(file, os.O_RDONLY, 0);
if err != nil {
return "", nil, err
}
prog, err := parser.Parse(f, parser.ImportsOnly);
if err != nil {
return "", nil, err;
}
// Normally one must consult the types of decl and spec,
// but we told the parser to return imports only,
// so assume it did.
var imp []string;
for _, decl := range prog.Decls {
for _, spec := range decl.(*ast.GenDecl).Specs {
str, err := LitString(spec.(*ast.ImportSpec).Path);
if err != nil {
return "", nil, os.NewError("invalid import specifier"); // better than os.EINVAL
}
PushString(&imp, str);
}
}
// TODO(rsc): should be prog.Package.Value
return prog.Name.Value, imp, nil;
}
func SourceFiles(dir string) ([]string, os.Error) {
f, err := os.Open(dir, os.O_RDONLY, 0);
if err != nil {
return nil, err;
}
names, err1 := f.Readdirnames(-1);
f.Close();
out := make([]string, 0, len(names));
for i, name := range names {
if strings.HasSuffix(name, ".go")
|| strings.HasSuffix(name, ".c")
|| strings.HasSuffix(name, ".s") {
n := len(out);
out = out[0:n+1];
out[n] = name;
}
}
sort.SortStrings(out);
return out, nil;
}
// TODO(rsc): Implement these for real as
// os.MkdirAll and os.RemoveAll and then
// make these wrappers that call fatal on error.
func MkdirAll(name string) {
p, err := exec.Run("/bin/mkdir", []string{"mkdir", "-p", name}, os.Environ(), exec.DevNull, exec.PassThrough, exec.PassThrough);
if err != nil {
fatal("run /bin/mkdir: %v", err);
}
w, err1 := p.Wait(0);
if err1 != nil {
fatal("wait /bin/mkdir: %v", err);
}
if !w.Exited() || w.ExitStatus() != 0 {
fatal("/bin/mkdir: %v", w);
}
}
func RemoveAll(name string) {
p, err := exec.Run("/bin/rm", []string{"rm", "-rf", name}, os.Environ(), exec.DevNull, exec.PassThrough, exec.PassThrough);
if err != nil {
fatal("run /bin/rm: %v", err);
}
w, err1 := p.Wait(0);
if err1 != nil {
fatal("wait /bin/rm: %v", err);
}
if !w.Exited() || w.ExitStatus() != 0 {
fatal("/bin/rm: %v", w);
}
}
| src/cmd/gobuild/util.go | 1 | https://github.com/golang/go/commit/a45c54d1a5370f8138e7988f2a64562196566eaa | [
0.9978470802307129,
0.03379103168845177,
0.000167158868862316,
0.00019050466653425246,
0.17604634165763855
] |
{
"id": 0,
"code_window": [
"\t\tfor i := range v {\n",
"\t\t\ta[i] = v[i];\n",
"\t\t}\n",
"\t\tv = a;\n",
"\t}\n",
"\t*vp = v[0:n+1];\n",
"\tv[n] = p;\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep"
],
"after_edit": [
"\tv = v[0:n+1];\n"
],
"file_path": "src/cmd/gobuild/gobuild.go",
"type": "replace",
"edit_start_line_idx": 73
} | {.section Dirs}
<h2>Subdirectories</h2>
{.repeated section @}
<a href="{Name|html}/">{Name|html}</a><br />
{.end}
<hr />
{.end}
{.section PDoc}
<h1>package {PackageName|html}</h1>
<p><code>import "{ImportPath|html}"</code></p>
{Doc|html-comment}
{.section Consts}
<h2>Constants</h2>
{.repeated section @}
{Doc|html-comment}
<pre>{Decl|html}</pre>
{.end}
{.end}
{.section Vars}
<hr />
<h2>Variables</h2>
{.repeated section @}
{Doc|html-comment}
<pre>{Decl|html}</pre>
{.end}
{.end}
{.section Funcs}
<hr />
{.repeated section @}
<h2>func {Name|html}</h2>
<p><code>{Decl|html}</code></p>
{Doc|html-comment}
{.end}
{.end}
{.section Types}
{.repeated section @}
<hr />
<h2>type {.section Type}{Name|html}{.end}</h2>
{Doc|html-comment}
<p><pre>{Decl|html}</pre></p>
{.repeated section Factories}
<h3>func {Name|html}</h3>
<p><code>{Decl|html}</code></p>
{Doc|html-comment}
{.end}
{.repeated section Methods}
<h3>func ({Recv|html}) {Name|html}</h3>
<p><code>{Decl|html}</code></p>
{Doc|html-comment}
{.end}
{.end}
{.end}
{.end}
| lib/godoc/package.html | 0 | https://github.com/golang/go/commit/a45c54d1a5370f8138e7988f2a64562196566eaa | [
0.00017429902800358832,
0.00017182207375299186,
0.0001697059633443132,
0.00017198771820403636,
0.000001688070256022911
] |
{
"id": 0,
"code_window": [
"\t\tfor i := range v {\n",
"\t\t\ta[i] = v[i];\n",
"\t\t}\n",
"\t\tv = a;\n",
"\t}\n",
"\t*vp = v[0:n+1];\n",
"\tv[n] = p;\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep"
],
"after_edit": [
"\tv = v[0:n+1];\n"
],
"file_path": "src/cmd/gobuild/gobuild.go",
"type": "replace",
"edit_start_line_idx": 73
} | // Inferno libmach/6.c
// http://code.google.com/p/inferno-os/source/browse/utils/libmach/6.c
//
// Copyright © 1994-1999 Lucent Technologies Inc.
// Power PC support Copyright © 1995-2004 C H Forsyth ([email protected]).
// Portions Copyright © 1997-1999 Vita Nuova Limited.
// Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com).
// Revisions Copyright © 2000-2004 Lucent Technologies Inc. and others.
// Portions Copyright © 2009 The Go Authors. All rights reserved.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
/*
* amd64 definition
*/
#include <u.h>
#include <libc.h>
#include <bio.h>
#include "ureg_amd64.h"
#include <mach_amd64.h>
#define REGOFF(x) offsetof(struct Ureg, x)
#define REGSIZE sizeof(struct Ureg)
#define FP_CTLS(x) (REGSIZE+2*(x))
#define FP_CTL(x) (REGSIZE+4*(x))
#define FP_REG(x) (FP_CTL(8)+16*(x))
#define XM_REG(x) (FP_CTL(8)+8*16+16*(x))
#define FPREGSIZE 512 /* TO DO? currently only 0x1A0 used */
Reglist amd64reglist[] = {
{"AX", REGOFF(ax), RINT, 'Y'},
{"BX", REGOFF(bx), RINT, 'Y'},
{"CX", REGOFF(cx), RINT, 'Y'},
{"DX", REGOFF(dx), RINT, 'Y'},
{"SI", REGOFF(si), RINT, 'Y'},
{"DI", REGOFF(di), RINT, 'Y'},
{"BP", REGOFF(bp), RINT, 'Y'},
{"R8", REGOFF(r8), RINT, 'Y'},
{"R9", REGOFF(r9), RINT, 'Y'},
{"R10", REGOFF(r10), RINT, 'Y'},
{"R11", REGOFF(r11), RINT, 'Y'},
{"R12", REGOFF(r12), RINT, 'Y'},
{"R13", REGOFF(r13), RINT, 'Y'},
{"R14", REGOFF(r14), RINT, 'Y'},
{"R15", REGOFF(r15), RINT, 'Y'},
{"DS", REGOFF(ds), RINT, 'x'},
{"ES", REGOFF(es), RINT, 'x'},
{"FS", REGOFF(fs), RINT, 'x'},
{"GS", REGOFF(gs), RINT, 'x'},
{"TYPE", REGOFF(type), RINT, 'Y'},
{"TRAP", REGOFF(type), RINT, 'Y'}, /* alias for acid */
{"ERROR", REGOFF(error), RINT, 'Y'},
{"IP", REGOFF(ip), RINT, 'Y'},
{"PC", REGOFF(ip), RINT, 'Y'}, /* alias for acid */
{"CS", REGOFF(cs), RINT, 'Y'},
{"FLAGS", REGOFF(flags), RINT, 'Y'},
{"SP", REGOFF(sp), RINT, 'Y'},
{"SS", REGOFF(ss), RINT, 'Y'},
{"FCW", FP_CTLS(0), RFLT, 'x'},
{"FSW", FP_CTLS(1), RFLT, 'x'},
{"FTW", FP_CTLS(2), RFLT, 'b'},
{"FOP", FP_CTLS(3), RFLT, 'x'},
{"RIP", FP_CTL(2), RFLT, 'Y'},
{"RDP", FP_CTL(4), RFLT, 'Y'},
{"MXCSR", FP_CTL(6), RFLT, 'X'},
{"MXCSRMASK", FP_CTL(7), RFLT, 'X'},
{"M0", FP_REG(0), RFLT, 'F'}, /* assumes double */
{"M1", FP_REG(1), RFLT, 'F'},
{"M2", FP_REG(2), RFLT, 'F'},
{"M3", FP_REG(3), RFLT, 'F'},
{"M4", FP_REG(4), RFLT, 'F'},
{"M5", FP_REG(5), RFLT, 'F'},
{"M6", FP_REG(6), RFLT, 'F'},
{"M7", FP_REG(7), RFLT, 'F'},
{"X0", XM_REG(0), RFLT, 'F'}, /* assumes double */
{"X1", XM_REG(1), RFLT, 'F'},
{"X2", XM_REG(2), RFLT, 'F'},
{"X3", XM_REG(3), RFLT, 'F'},
{"X4", XM_REG(4), RFLT, 'F'},
{"X5", XM_REG(5), RFLT, 'F'},
{"X6", XM_REG(6), RFLT, 'F'},
{"X7", XM_REG(7), RFLT, 'F'},
{"X8", XM_REG(8), RFLT, 'F'},
{"X9", XM_REG(9), RFLT, 'F'},
{"X10", XM_REG(10), RFLT, 'F'},
{"X11", XM_REG(11), RFLT, 'F'},
{"X12", XM_REG(12), RFLT, 'F'},
{"X13", XM_REG(13), RFLT, 'F'},
{"X14", XM_REG(14), RFLT, 'F'},
{"X15", XM_REG(15), RFLT, 'F'},
{"X16", XM_REG(16), RFLT, 'F'},
/*
{"F0", FP_REG(7), RFLT, '3'},
{"F1", FP_REG(6), RFLT, '3'},
{"F2", FP_REG(5), RFLT, '3'},
{"F3", FP_REG(4), RFLT, '3'},
{"F4", FP_REG(3), RFLT, '3'},
{"F5", FP_REG(2), RFLT, '3'},
{"F6", FP_REG(1), RFLT, '3'},
{"F7", FP_REG(0), RFLT, '3'},
*/
{ 0 }
};
Mach mamd64=
{
"amd64",
MAMD64, /* machine type */
amd64reglist, /* register list */
REGSIZE, /* size of registers in bytes */
FPREGSIZE, /* size of fp registers in bytes */
"PC", /* name of PC */
"SP", /* name of SP */
0, /* link register */
"setSB", /* static base register name (bogus anyways) */
0, /* static base register value */
0x1000, /* page size */
0xFFFFFFFF80110000ULL, /* kernel base */
0xFFFF800000000000ULL, /* kernel text mask */
0x00007FFFFFFFF000ULL, /* user stack top */
1, /* quantization of pc */
8, /* szaddr */
4, /* szreg */
4, /* szfloat */
8, /* szdouble */
};
| src/libmach_amd64/6.c | 0 | https://github.com/golang/go/commit/a45c54d1a5370f8138e7988f2a64562196566eaa | [
0.00017476749781053513,
0.0001700948632787913,
0.00016469662659801543,
0.00017081672558560967,
0.0000030170281206665095
] |
{
"id": 0,
"code_window": [
"\t\tfor i := range v {\n",
"\t\t\ta[i] = v[i];\n",
"\t\t}\n",
"\t\tv = a;\n",
"\t}\n",
"\t*vp = v[0:n+1];\n",
"\tv[n] = p;\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep"
],
"after_edit": [
"\tv = v[0:n+1];\n"
],
"file_path": "src/cmd/gobuild/gobuild.go",
"type": "replace",
"edit_start_line_idx": 73
} | // $G $F.go && $L $F.$A && ./$A.out
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package main
import "utf8"
func main() {
var chars [6] int;
chars[0] = 'a';
chars[1] = 'b';
chars[2] = 'c';
chars[3] = '\u65e5';
chars[4] = '\u672c';
chars[5] = '\u8a9e';
s := "";
for i := 0; i < 6; i++ {
s += string(chars[i]);
}
var l = len(s);
for w, i, j := 0,0,0; i < l; i += w {
var r int;
r, w = utf8.DecodeRuneInString(s[i:len(s)]);
if w == 0 { panic("zero width in string") }
if r != chars[j] { panic("wrong value from string") }
j++;
}
// encoded as bytes: 'a' 'b' 'c' e6 97 a5 e6 9c ac e8 aa 9e
const L = 12;
if L != l { panic("wrong length constructing array") }
a := make([]byte, L);
a[0] = 'a';
a[1] = 'b';
a[2] = 'c';
a[3] = 0xe6;
a[4] = 0x97;
a[5] = 0xa5;
a[6] = 0xe6;
a[7] = 0x9c;
a[8] = 0xac;
a[9] = 0xe8;
a[10] = 0xaa;
a[11] = 0x9e;
for w, i, j := 0,0,0; i < L; i += w {
var r int;
r, w = utf8.DecodeRune(a[i:L]);
if w == 0 { panic("zero width in bytes") }
if r != chars[j] { panic("wrong value from bytes") }
j++;
}
}
| test/utf.go | 0 | https://github.com/golang/go/commit/a45c54d1a5370f8138e7988f2a64562196566eaa | [
0.006104449741542339,
0.0018500954611226916,
0.00017600652063265443,
0.0012875202810391784,
0.002022086177021265
] |
{
"id": 1,
"code_window": [
"\tv[n] = p;\n",
"}\n",
"\n",
"func PushFile(vp *[]*File, p *File) {\n"
],
"labels": [
"add",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t*vp = v;\n"
],
"file_path": "src/cmd/gobuild/gobuild.go",
"type": "add",
"edit_start_line_idx": 75
} | // Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package gobuild
import (
"flag";
"fmt";
"gobuild";
"io";
"os";
"path";
"sort";
"strings";
"template";
"unicode";
"utf8";
)
type Pkg struct
type File struct {
Name string;
Pkg *Pkg;
Imports []string;
Deps []*Pkg;
Phase int;
}
type Pkg struct {
Name string;
Path string;
Files []*File;
}
type ArCmd struct {
Pkg *Pkg;
Files []*File;
}
type Phase struct {
Phase int;
ArCmds []*ArCmd;
}
type Info struct {
Args []string;
Char string;
Dir string;
ObjDir string;
Pkgmap map[string] *Pkg;
Packages []*Pkg;
Files map[string] *File;
Imports map[string] bool;
Phases []*Phase;
MaxPhase int;
}
var verbose = flag.Bool("v", false, "verbose mode")
var writeMakefile = flag.Bool("m", false, "write Makefile to standard output")
func PushPkg(vp *[]*Pkg, p *Pkg) {
v := *vp;
n := len(v);
if n >= cap(v) {
m := 2*n + 10;
a := make([]*Pkg, n, m);
for i := range v {
a[i] = v[i];
}
v = a;
}
*vp = v[0:n+1];
v[n] = p;
}
func PushFile(vp *[]*File, p *File) {
v := *vp;
n := len(v);
if n >= cap(v) {
m := 2*n + 10;
a := make([]*File, n, m);
for i := range v {
a[i] = v[i];
}
v = a;
}
*vp = v[0:n+1];
v[n] = p;
}
// For sorting Files
type FileArray []*File
func (a FileArray) Len() int {
return len(a)
}
func (a FileArray) Less(i, j int) bool {
return a[i].Name < a[j].Name
}
func (a FileArray) Swap(i, j int) {
a[i], a[j] = a[j], a[i]
}
// If current directory is under $GOROOT/src/pkg, return the
// path relative to there. Otherwise return "".
func PkgDir() string {
goroot, err := os.Getenv("GOROOT");
if err != nil || goroot == "" {
return ""
}
srcroot := path.Clean(goroot + "/src/pkg/");
pwd, err1 := os.Getenv("PWD"); // TODO(rsc): real pwd
if err1 != nil || pwd == "" {
return ""
}
if pwd == srcroot {
return ""
}
n := len(srcroot);
if len(pwd) < n || pwd[n] != '/' || pwd[0:n] != srcroot {
return ""
}
dir := pwd[n+1:len(pwd)];
return dir;
}
func ScanFiles(filenames []string) *Info {
// Build list of imports, local packages, and files.
// Exclude *_test.go and anything in package main.
// TODO(rsc): Build a binary from package main?
z := new(Info);
z.Args = os.Args;
z.Dir = PkgDir();
z.Char = theChar; // for template
z.ObjDir = ObjDir; // for template
z.Pkgmap = make(map[string] *Pkg);
z.Files = make(map[string] *File);
z.Imports = make(map[string] bool);
// Read Go files to find out packages and imports.
var pkg *Pkg;
for _, filename := range filenames {
if strings.Index(filename, "_test.") >= 0 {
continue;
}
f := new(File);
f.Name = filename;
if path.Ext(filename) == ".go" {
rune, _ := utf8.DecodeRuneInString(filename);
if rune != '_' && !unicode.IsLetter(rune) && !unicode.IsDecimalDigit(rune) {
// Ignore files with funny leading letters,
// to avoid editor files like .foo.go and ~foo.go.
continue;
}
pkgname, imp, err := PackageImports(filename);
if err != nil {
fatal("parsing %s: %s", filename, err);
}
if pkgname == "main" {
continue;
}
path := pkgname;
var ok bool;
pkg, ok = z.Pkgmap[path];
if !ok {
pkg = new(Pkg);
pkg.Name = pkgname;
pkg.Path = path;
z.Pkgmap[path] = pkg;
PushPkg(&z.Packages, pkg);
}
f.Pkg = pkg;
f.Imports = imp;
for _, name := range imp {
z.Imports[name] = true;
}
PushFile(&pkg.Files, f);
}
z.Files[filename] = f;
}
// Loop through files again, filling in more info.
for _, f := range z.Files {
if f.Pkg == nil {
// non-Go file: fill in package name.
// Must only be a single package in this directory.
if len(z.Pkgmap) != 1 {
fatal("cannot determine package for %s", f.Name);
}
f.Pkg = pkg;
}
// Go file: record dependencies on other packages in this directory.
for _, imp := range f.Imports {
pkg, ok := z.Pkgmap[imp];
if ok && pkg != f.Pkg {
PushPkg(&f.Deps, pkg);
}
}
}
// Update destination directory.
// If destination directory has same
// name as package name, cut it off.
dir, name := path.Split(z.Dir);
if len(z.Packages) == 1 && z.Packages[0].Name == name {
z.Dir = dir;
}
return z;
}
func PackageObj(pkg string) string {
return pkg + ".a"
}
func (z *Info) Build() {
// Create empty object directory tree.
RemoveAll(ObjDir);
obj := path.Join(ObjDir, z.Dir) + "/";
MkdirAll(obj);
// Create empty archives.
for pkgname := range z.Pkgmap {
ar := obj + PackageObj(pkgname);
os.Remove(ar);
Archive(ar, nil);
}
// Compile by repeated passes: build as many .6 as possible,
// put them in their archives, and repeat.
var pending, fail, success []*File;
for _, file := range z.Files {
PushFile(&pending, file);
}
sort.Sort(FileArray(pending));
var arfiles []string;
z.Phases = make([]*Phase, 0, len(z.Files));
for phase := 1; len(pending) > 0; phase++ {
// Run what we can.
fail = fail[0:0];
success = success[0:0];
for _, f := range pending {
if !Build(Compiler(f.Name), f.Name, 0) {
PushFile(&fail, f);
} else {
if *verbose {
fmt.Fprint(os.Stderr, f.Name, " ");
}
PushFile(&success, f);
}
}
if len(success) == 0 {
// Nothing ran; give up.
for _, f := range fail {
Build(Compiler(f.Name), f.Name, ShowErrors | ForceDisplay);
}
fatal("stalemate");
}
if *verbose {
fmt.Fprint(os.Stderr, "\n");
}
// Record phase data.
p := new(Phase);
p.ArCmds = make([]*ArCmd, 0, len(z.Pkgmap));
p.Phase = phase;
n := len(z.Phases);
z.Phases = z.Phases[0:n+1];
z.Phases[n] = p;
// Update archives.
for _, pkg := range z.Pkgmap {
arfiles = arfiles[0:0];
var files []*File;
for _, f := range success {
if f.Pkg == pkg {
PushString(&arfiles, Object(f.Name, theChar));
PushFile(&files, f);
}
f.Phase = phase;
}
if len(arfiles) > 0 {
Archive(obj + pkg.Name + ".a", arfiles);
n := len(p.ArCmds);
p.ArCmds = p.ArCmds[0:n+1];
p.ArCmds[n] = &ArCmd{pkg, files};
}
for _, filename := range arfiles {
os.Remove(filename);
}
}
pending, fail = fail, pending;
}
}
func (z *Info) Clean() {
RemoveAll(ObjDir);
for pkgname := range z.Pkgmap {
os.Remove(PackageObj(pkgname));
}
}
func Main() {
flag.Parse();
filenames := flag.Args();
if len(filenames) == 0 {
var err os.Error;
filenames, err= SourceFiles(".");
if err != nil {
fatal("reading .: %s", err.String());
}
}
state := ScanFiles(filenames);
state.Build();
if *writeMakefile {
t, err := template.Parse(makefileTemplate, makefileMap);
if err != nil {
fatal("template.Parse: %s", err.String());
}
err = t.Execute(state, os.Stdout);
if err != nil {
fatal("template.Expand: %s", err.String());
}
}
}
| src/cmd/gobuild/gobuild.go | 1 | https://github.com/golang/go/commit/a45c54d1a5370f8138e7988f2a64562196566eaa | [
0.9990390539169312,
0.3662669360637665,
0.0001724042958812788,
0.034771718084812164,
0.42412278056144714
] |
{
"id": 1,
"code_window": [
"\tv[n] = p;\n",
"}\n",
"\n",
"func PushFile(vp *[]*File, p *File) {\n"
],
"labels": [
"add",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t*vp = v;\n"
],
"file_path": "src/cmd/gobuild/gobuild.go",
"type": "add",
"edit_start_line_idx": 75
} | // godefs -f -m32 -f -I/home/rsc/pub/linux-2.6/arch/x86/include -f -I/home/rsc/pub/linux-2.6/include defs2.c
// MACHINE GENERATED - DO NOT EDIT.
// Constants
enum {
PROT_NONE = 0,
PROT_READ = 0x1,
PROT_WRITE = 0x2,
PROT_EXEC = 0x4,
MAP_ANON = 0x20,
MAP_PRIVATE = 0x2,
SA_RESTART = 0x10000000,
SA_ONSTACK = 0x8000000,
SA_RESTORER = 0x4000000,
SA_SIGINFO = 0x4,
};
// Types
#pragma pack on
typedef struct Fpreg Fpreg;
struct Fpreg {
uint16 significand[4];
uint16 exponent;
};
typedef struct Fpxreg Fpxreg;
struct Fpxreg {
uint16 significand[4];
uint16 exponent;
uint16 padding[3];
};
typedef struct Xmmreg Xmmreg;
struct Xmmreg {
uint32 element[4];
};
typedef struct Fpstate Fpstate;
struct Fpstate {
uint32 cw;
uint32 sw;
uint32 tag;
uint32 ipoff;
uint32 cssel;
uint32 dataoff;
uint32 datasel;
Fpreg _st[8];
uint16 status;
uint16 magic;
uint32 _fxsr_env[6];
uint32 mxcsr;
uint32 reserved;
Fpxreg _fxsr_st[8];
Xmmreg _xmm[8];
uint32 padding1[44];
byte _anon_[48];
};
typedef struct Timespec Timespec;
struct Timespec {
int32 tv_sec;
int32 tv_nsec;
};
typedef struct Timeval Timeval;
struct Timeval {
int32 tv_sec;
int32 tv_usec;
};
typedef struct Sigaction Sigaction;
struct Sigaction {
byte _u[4];
uint32 sa_mask;
uint32 sa_flags;
void *sa_restorer;
};
typedef struct Siginfo Siginfo;
struct Siginfo {
int32 si_signo;
int32 si_errno;
int32 si_code;
byte _sifields[116];
};
typedef struct Sigaltstack Sigaltstack;
struct Sigaltstack {
void *ss_sp;
int32 ss_flags;
uint32 ss_size;
};
typedef struct Sigcontext Sigcontext;
struct Sigcontext {
uint16 gs;
uint16 __gsh;
uint16 fs;
uint16 __fsh;
uint16 es;
uint16 __esh;
uint16 ds;
uint16 __dsh;
uint32 edi;
uint32 esi;
uint32 ebp;
uint32 esp;
uint32 ebx;
uint32 edx;
uint32 ecx;
uint32 eax;
uint32 trapno;
uint32 err;
uint32 eip;
uint16 cs;
uint16 __csh;
uint32 eflags;
uint32 esp_at_signal;
uint16 ss;
uint16 __ssh;
Fpstate *fpstate;
uint32 oldmask;
uint32 cr2;
};
typedef struct Ucontext Ucontext;
struct Ucontext {
uint32 uc_flags;
Ucontext *uc_link;
Sigaltstack uc_stack;
Sigcontext uc_mcontext;
uint32 uc_sigmask;
};
#pragma pack off
| src/pkg/runtime/linux/386/defs.h | 0 | https://github.com/golang/go/commit/a45c54d1a5370f8138e7988f2a64562196566eaa | [
0.00036218480090610683,
0.00021751494205091149,
0.00017653341637924314,
0.00020485292770899832,
0.000050392747652949765
] |
{
"id": 1,
"code_window": [
"\tv[n] = p;\n",
"}\n",
"\n",
"func PushFile(vp *[]*File, p *File) {\n"
],
"labels": [
"add",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t*vp = v;\n"
],
"file_path": "src/cmd/gobuild/gobuild.go",
"type": "add",
"edit_start_line_idx": 75
} | // Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//
// System call support for 386, Darwin
//
// func Syscall(trap int32, a1, a2, a3 int32) (r1, r2, err int32);
// func Syscall6(trap int32, a1, a2, a3, a4, a5, a6 int32) (r1, r2, err int32);
// Trap # in AX, args on stack above caller pc.
TEXT syscall·Syscall(SB),7,$0
CALL sys·entersyscall(SB)
MOVL 4(SP), AX // syscall entry
// slide args down on top of system call number
LEAL 8(SP), SI
LEAL 4(SP), DI
CLD
MOVSL
MOVSL
MOVSL
INT $0x80
JAE ok
MOVL $-1, 20(SP) // r1
MOVL $-1, 24(SP) // r2
MOVL AX, 28(SP) // errno
CALL sys·exitsyscall(SB)
RET
ok:
MOVL AX, 20(SP) // r1
MOVL DX, 24(SP) // r2
MOVL $0, 28(SP) // errno
CALL sys·exitsyscall(SB)
RET
TEXT syscall·Syscall6(SB),7,$0
CALL sys·entersyscall(SB)
MOVL 4(SP), AX // syscall entry
// slide args down on top of system call number
LEAL 8(SP), SI
LEAL 4(SP), DI
CLD
MOVSL
MOVSL
MOVSL
MOVSL
MOVSL
MOVSL
INT $0x80
JAE ok6
MOVL $-1, 32(SP) // r1
MOVL $-1, 36(SP) // r2
MOVL AX, 40(SP) // errno
CALL sys·exitsyscall(SB)
RET
ok6:
MOVL AX, 32(SP) // r1
MOVL DX, 36(SP) // r2
MOVL $0, 40(SP) // errno
CALL sys·exitsyscall(SB)
RET
TEXT syscall·RawSyscall(SB),7,$0
MOVL 4(SP), AX // syscall entry
// slide args down on top of system call number
LEAL 8(SP), SI
LEAL 4(SP), DI
CLD
MOVSL
MOVSL
MOVSL
INT $0x80
JAE ok1
MOVL $-1, 20(SP) // r1
MOVL $-1, 24(SP) // r2
MOVL AX, 28(SP) // errno
RET
ok1:
MOVL AX, 20(SP) // r1
MOVL DX, 24(SP) // r2
MOVL $0, 28(SP) // errno
RET
| src/pkg/syscall/asm_darwin_386.s | 0 | https://github.com/golang/go/commit/a45c54d1a5370f8138e7988f2a64562196566eaa | [
0.00017451537132728845,
0.0001723636087263003,
0.00017045439744833857,
0.00017249045777134597,
0.0000013812965562465251
] |
{
"id": 1,
"code_window": [
"\tv[n] = p;\n",
"}\n",
"\n",
"func PushFile(vp *[]*File, p *File) {\n"
],
"labels": [
"add",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t*vp = v;\n"
],
"file_path": "src/cmd/gobuild/gobuild.go",
"type": "add",
"edit_start_line_idx": 75
} | /*
Inferno libkern/getfields.c
http://code.google.com/p/inferno-os/source/browse/libkern/getfields.c
Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
Revisions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com). All rights reserved.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
*/
#include <lib9.h>
int
getfields(char *str, char **args, int max, int mflag, char *set)
{
Rune r;
int nr, intok, narg;
if(max <= 0)
return 0;
narg = 0;
args[narg] = str;
if(!mflag)
narg++;
intok = 0;
for(;; str += nr) {
nr = chartorune(&r, str);
if(r == 0)
break;
if(utfrune(set, r)) {
if(narg >= max)
break;
*str = 0;
intok = 0;
args[narg] = str + nr;
if(!mflag)
narg++;
} else {
if(!intok && mflag)
narg++;
intok = 1;
}
}
return narg;
}
| src/lib9/getfields.c | 0 | https://github.com/golang/go/commit/a45c54d1a5370f8138e7988f2a64562196566eaa | [
0.004711754620075226,
0.0015584488864988089,
0.00017269534873776138,
0.0003979592293035239,
0.0017286954680457711
] |
{
"id": 2,
"code_window": [
"\t\t}\n",
"\t\tv = a;\n",
"\t}\n",
"\t*vp = v[0:n+1];\n",
"\tv[n] = p;\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep"
],
"after_edit": [
"\tv = v[0:n+1];\n"
],
"file_path": "src/cmd/gobuild/gobuild.go",
"type": "replace",
"edit_start_line_idx": 88
} | // Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package gobuild
import (
"flag";
"fmt";
"gobuild";
"io";
"os";
"path";
"sort";
"strings";
"template";
"unicode";
"utf8";
)
type Pkg struct
type File struct {
Name string;
Pkg *Pkg;
Imports []string;
Deps []*Pkg;
Phase int;
}
type Pkg struct {
Name string;
Path string;
Files []*File;
}
type ArCmd struct {
Pkg *Pkg;
Files []*File;
}
type Phase struct {
Phase int;
ArCmds []*ArCmd;
}
type Info struct {
Args []string;
Char string;
Dir string;
ObjDir string;
Pkgmap map[string] *Pkg;
Packages []*Pkg;
Files map[string] *File;
Imports map[string] bool;
Phases []*Phase;
MaxPhase int;
}
var verbose = flag.Bool("v", false, "verbose mode")
var writeMakefile = flag.Bool("m", false, "write Makefile to standard output")
func PushPkg(vp *[]*Pkg, p *Pkg) {
v := *vp;
n := len(v);
if n >= cap(v) {
m := 2*n + 10;
a := make([]*Pkg, n, m);
for i := range v {
a[i] = v[i];
}
v = a;
}
*vp = v[0:n+1];
v[n] = p;
}
func PushFile(vp *[]*File, p *File) {
v := *vp;
n := len(v);
if n >= cap(v) {
m := 2*n + 10;
a := make([]*File, n, m);
for i := range v {
a[i] = v[i];
}
v = a;
}
*vp = v[0:n+1];
v[n] = p;
}
// For sorting Files
type FileArray []*File
func (a FileArray) Len() int {
return len(a)
}
func (a FileArray) Less(i, j int) bool {
return a[i].Name < a[j].Name
}
func (a FileArray) Swap(i, j int) {
a[i], a[j] = a[j], a[i]
}
// If current directory is under $GOROOT/src/pkg, return the
// path relative to there. Otherwise return "".
func PkgDir() string {
goroot, err := os.Getenv("GOROOT");
if err != nil || goroot == "" {
return ""
}
srcroot := path.Clean(goroot + "/src/pkg/");
pwd, err1 := os.Getenv("PWD"); // TODO(rsc): real pwd
if err1 != nil || pwd == "" {
return ""
}
if pwd == srcroot {
return ""
}
n := len(srcroot);
if len(pwd) < n || pwd[n] != '/' || pwd[0:n] != srcroot {
return ""
}
dir := pwd[n+1:len(pwd)];
return dir;
}
func ScanFiles(filenames []string) *Info {
// Build list of imports, local packages, and files.
// Exclude *_test.go and anything in package main.
// TODO(rsc): Build a binary from package main?
z := new(Info);
z.Args = os.Args;
z.Dir = PkgDir();
z.Char = theChar; // for template
z.ObjDir = ObjDir; // for template
z.Pkgmap = make(map[string] *Pkg);
z.Files = make(map[string] *File);
z.Imports = make(map[string] bool);
// Read Go files to find out packages and imports.
var pkg *Pkg;
for _, filename := range filenames {
if strings.Index(filename, "_test.") >= 0 {
continue;
}
f := new(File);
f.Name = filename;
if path.Ext(filename) == ".go" {
rune, _ := utf8.DecodeRuneInString(filename);
if rune != '_' && !unicode.IsLetter(rune) && !unicode.IsDecimalDigit(rune) {
// Ignore files with funny leading letters,
// to avoid editor files like .foo.go and ~foo.go.
continue;
}
pkgname, imp, err := PackageImports(filename);
if err != nil {
fatal("parsing %s: %s", filename, err);
}
if pkgname == "main" {
continue;
}
path := pkgname;
var ok bool;
pkg, ok = z.Pkgmap[path];
if !ok {
pkg = new(Pkg);
pkg.Name = pkgname;
pkg.Path = path;
z.Pkgmap[path] = pkg;
PushPkg(&z.Packages, pkg);
}
f.Pkg = pkg;
f.Imports = imp;
for _, name := range imp {
z.Imports[name] = true;
}
PushFile(&pkg.Files, f);
}
z.Files[filename] = f;
}
// Loop through files again, filling in more info.
for _, f := range z.Files {
if f.Pkg == nil {
// non-Go file: fill in package name.
// Must only be a single package in this directory.
if len(z.Pkgmap) != 1 {
fatal("cannot determine package for %s", f.Name);
}
f.Pkg = pkg;
}
// Go file: record dependencies on other packages in this directory.
for _, imp := range f.Imports {
pkg, ok := z.Pkgmap[imp];
if ok && pkg != f.Pkg {
PushPkg(&f.Deps, pkg);
}
}
}
// Update destination directory.
// If destination directory has same
// name as package name, cut it off.
dir, name := path.Split(z.Dir);
if len(z.Packages) == 1 && z.Packages[0].Name == name {
z.Dir = dir;
}
return z;
}
func PackageObj(pkg string) string {
return pkg + ".a"
}
func (z *Info) Build() {
// Create empty object directory tree.
RemoveAll(ObjDir);
obj := path.Join(ObjDir, z.Dir) + "/";
MkdirAll(obj);
// Create empty archives.
for pkgname := range z.Pkgmap {
ar := obj + PackageObj(pkgname);
os.Remove(ar);
Archive(ar, nil);
}
// Compile by repeated passes: build as many .6 as possible,
// put them in their archives, and repeat.
var pending, fail, success []*File;
for _, file := range z.Files {
PushFile(&pending, file);
}
sort.Sort(FileArray(pending));
var arfiles []string;
z.Phases = make([]*Phase, 0, len(z.Files));
for phase := 1; len(pending) > 0; phase++ {
// Run what we can.
fail = fail[0:0];
success = success[0:0];
for _, f := range pending {
if !Build(Compiler(f.Name), f.Name, 0) {
PushFile(&fail, f);
} else {
if *verbose {
fmt.Fprint(os.Stderr, f.Name, " ");
}
PushFile(&success, f);
}
}
if len(success) == 0 {
// Nothing ran; give up.
for _, f := range fail {
Build(Compiler(f.Name), f.Name, ShowErrors | ForceDisplay);
}
fatal("stalemate");
}
if *verbose {
fmt.Fprint(os.Stderr, "\n");
}
// Record phase data.
p := new(Phase);
p.ArCmds = make([]*ArCmd, 0, len(z.Pkgmap));
p.Phase = phase;
n := len(z.Phases);
z.Phases = z.Phases[0:n+1];
z.Phases[n] = p;
// Update archives.
for _, pkg := range z.Pkgmap {
arfiles = arfiles[0:0];
var files []*File;
for _, f := range success {
if f.Pkg == pkg {
PushString(&arfiles, Object(f.Name, theChar));
PushFile(&files, f);
}
f.Phase = phase;
}
if len(arfiles) > 0 {
Archive(obj + pkg.Name + ".a", arfiles);
n := len(p.ArCmds);
p.ArCmds = p.ArCmds[0:n+1];
p.ArCmds[n] = &ArCmd{pkg, files};
}
for _, filename := range arfiles {
os.Remove(filename);
}
}
pending, fail = fail, pending;
}
}
func (z *Info) Clean() {
RemoveAll(ObjDir);
for pkgname := range z.Pkgmap {
os.Remove(PackageObj(pkgname));
}
}
func Main() {
flag.Parse();
filenames := flag.Args();
if len(filenames) == 0 {
var err os.Error;
filenames, err= SourceFiles(".");
if err != nil {
fatal("reading .: %s", err.String());
}
}
state := ScanFiles(filenames);
state.Build();
if *writeMakefile {
t, err := template.Parse(makefileTemplate, makefileMap);
if err != nil {
fatal("template.Parse: %s", err.String());
}
err = t.Execute(state, os.Stdout);
if err != nil {
fatal("template.Expand: %s", err.String());
}
}
}
| src/cmd/gobuild/gobuild.go | 1 | https://github.com/golang/go/commit/a45c54d1a5370f8138e7988f2a64562196566eaa | [
0.9978073239326477,
0.07438571006059647,
0.00016588781727477908,
0.00017355753516312689,
0.2472405582666397
] |
{
"id": 2,
"code_window": [
"\t\t}\n",
"\t\tv = a;\n",
"\t}\n",
"\t*vp = v[0:n+1];\n",
"\tv[n] = p;\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep"
],
"after_edit": [
"\tv = v[0:n+1];\n"
],
"file_path": "src/cmd/gobuild/gobuild.go",
"type": "replace",
"edit_start_line_idx": 88
} | // $G $D/$F.go && $L $F.$A && ./$A.out
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package main
var a = []int { 1, 2, 3 }
func main() {
if len(a) != 3 { panic("array len") }
// print(a[0], " ", a[1], " ", a[2], "\n")
if a[0] != 1 || a[1] != 2 || a[2] != 3 { panic("array contents") }
}
| test/fixedbugs/bug101.go | 0 | https://github.com/golang/go/commit/a45c54d1a5370f8138e7988f2a64562196566eaa | [
0.0014433469623327255,
0.0012641421053558588,
0.001084937248378992,
0.0012641421053558588,
0.00017920485697686672
] |
{
"id": 2,
"code_window": [
"\t\t}\n",
"\t\tv = a;\n",
"\t}\n",
"\t*vp = v[0:n+1];\n",
"\tv[n] = p;\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep"
],
"after_edit": [
"\tv = v[0:n+1];\n"
],
"file_path": "src/cmd/gobuild/gobuild.go",
"type": "replace",
"edit_start_line_idx": 88
} | // Inferno utils/5c/sgen.c
// http://code.google.com/p/inferno-os/source/browse/utils/5c/sgen.c
//
// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
// Portions Copyright © 1995-1997 C H Forsyth ([email protected])
// Portions Copyright © 1997-1999 Vita Nuova Limited
// Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
// Portions Copyright © 2004,2006 Bruce Ellis
// Portions Copyright © 2005-2007 C H Forsyth ([email protected])
// Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
// Portions Copyright © 2009 The Go Authors. All rights reserved.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
#include "gc.h"
int32
argsize(void)
{
Type *t;
int32 s;
//print("t=%T\n", thisfn);
s = 0;
for(t=thisfn->down; t!=T; t=t->down) {
switch(t->etype) {
case TVOID:
break;
case TDOT:
s += 64;
break;
default:
s = align(s, t, Aarg1);
s = align(s, t, Aarg2);
break;
}
//print(" %d %T\n", s, t);
}
return (s+7) & ~7;
}
void
codgen(Node *n, Node *nn)
{
Prog *sp;
Node *n1, nod, nod1;
cursafe = 0;
curarg = 0;
maxargsafe = 0;
/*
* isolate name
*/
for(n1 = nn;; n1 = n1->left) {
if(n1 == Z) {
diag(nn, "cant find function name");
return;
}
if(n1->op == ONAME)
break;
}
nearln = nn->lineno;
gpseudo(ATEXT, n1->sym, nodconst(stkoff));
p->to.type = D_CONST2;
p->to.offset2 = argsize();
sp = p;
/*
* isolate first argument
*/
if(REGARG >= 0) {
if(typesuv[thisfn->link->etype]) {
nod1 = *nodret->left;
nodreg(&nod, &nod1, REGARG);
gopcode(OAS, &nod, Z, &nod1);
} else
if(firstarg && typechlp[firstargtype->etype]) {
nod1 = *nodret->left;
nod1.sym = firstarg;
nod1.type = firstargtype;
nod1.xoffset = align(0, firstargtype, Aarg1);
nod1.etype = firstargtype->etype;
nodreg(&nod, &nod1, REGARG);
gopcode(OAS, &nod, Z, &nod1);
}
}
retok = 0;
gen(n);
if(!retok)
if(thisfn->link->etype != TVOID)
warn(Z, "no return at end of function: %s", n1->sym->name);
noretval(3);
gbranch(ORETURN);
if(!debug['N'] || debug['R'] || debug['P'])
regopt(sp);
sp->to.offset += maxargsafe;
}
void
supgen(Node *n)
{
int32 spc;
Prog *sp;
if(n == Z)
return;
suppress++;
spc = pc;
sp = lastp;
gen(n);
lastp = sp;
pc = spc;
sp->link = nil;
suppress--;
}
void
gen(Node *n)
{
Node *l, nod;
Prog *sp, *spc, *spb;
Case *cn;
int32 sbc, scc;
int o, f;
loop:
if(n == Z)
return;
nearln = n->lineno;
o = n->op;
if(debug['G'])
if(o != OLIST)
print("%L %O\n", nearln, o);
retok = 0;
switch(o) {
default:
complex(n);
cgen(n, Z, 0);
break;
case OLIST:
gen(n->left);
rloop:
n = n->right;
goto loop;
case ORETURN:
retok = 1;
complex(n);
if(n->type == T)
break;
l = n->left;
if(l == Z) {
noretval(3);
gbranch(ORETURN);
break;
}
if(typesuv[n->type->etype]) {
sugen(l, nodret, n->type->width);
noretval(3);
gbranch(ORETURN);
break;
}
regret(&nod, n);
cgen(l, &nod, 0);
regfree(&nod);
if(typefd[n->type->etype])
noretval(1);
else
noretval(2);
gbranch(ORETURN);
break;
case OLABEL:
l = n->left;
if(l) {
l->pc = pc;
if(l->label)
patch(l->label, pc);
}
gbranch(OGOTO); /* prevent self reference in reg */
patch(p, pc);
goto rloop;
case OGOTO:
retok = 1;
n = n->left;
if(n == Z)
return;
if(n->complex == 0) {
diag(Z, "label undefined: %s", n->sym->name);
return;
}
if(suppress)
return;
gbranch(OGOTO);
if(n->pc) {
patch(p, n->pc);
return;
}
if(n->label)
patch(n->label, pc-1);
n->label = p;
return;
case OCASE:
l = n->left;
if(cases == C)
diag(n, "case/default outside a switch");
if(l == Z) {
cas();
cases->val = 0;
cases->def = 1;
cases->label = pc;
goto rloop;
}
complex(l);
if(l->type == T)
goto rloop;
if(l->op == OCONST)
if(typechl[l->type->etype]) {
cas();
cases->val = l->vconst;
cases->def = 0;
cases->label = pc;
goto rloop;
}
diag(n, "case expression must be integer constant");
goto rloop;
case OSWITCH:
l = n->left;
complex(l);
if(l->type == T)
break;
if(!typechl[l->type->etype]) {
diag(n, "switch expression must be integer");
break;
}
gbranch(OGOTO); /* entry */
sp = p;
cn = cases;
cases = C;
cas();
sbc = breakpc;
breakpc = pc;
gbranch(OGOTO);
spb = p;
gen(n->right);
gbranch(OGOTO);
patch(p, breakpc);
patch(sp, pc);
regalloc(&nod, l, Z);
nod.type = types[TLONG];
cgen(l, &nod, 0);
doswit(&nod);
regfree(&nod);
patch(spb, pc);
cases = cn;
breakpc = sbc;
break;
case OWHILE:
case ODWHILE:
l = n->left;
gbranch(OGOTO); /* entry */
sp = p;
scc = continpc;
continpc = pc;
gbranch(OGOTO);
spc = p;
sbc = breakpc;
breakpc = pc;
gbranch(OGOTO);
spb = p;
patch(spc, pc);
if(n->op == OWHILE)
patch(sp, pc);
bcomplex(l, Z); /* test */
patch(p, breakpc);
if(n->op == ODWHILE)
patch(sp, pc);
gen(n->right); /* body */
gbranch(OGOTO);
patch(p, continpc);
patch(spb, pc);
continpc = scc;
breakpc = sbc;
break;
case OFOR:
l = n->left;
gen(l->right->left); /* init */
gbranch(OGOTO); /* entry */
sp = p;
scc = continpc;
continpc = pc;
gbranch(OGOTO);
spc = p;
sbc = breakpc;
breakpc = pc;
gbranch(OGOTO);
spb = p;
patch(spc, pc);
gen(l->right->right); /* inc */
patch(sp, pc);
if(l->left != Z) { /* test */
bcomplex(l->left, Z);
patch(p, breakpc);
}
gen(n->right); /* body */
gbranch(OGOTO);
patch(p, continpc);
patch(spb, pc);
continpc = scc;
breakpc = sbc;
break;
case OCONTINUE:
if(continpc < 0) {
diag(n, "continue not in a loop");
break;
}
gbranch(OGOTO);
patch(p, continpc);
break;
case OBREAK:
if(breakpc < 0) {
diag(n, "break not in a loop");
break;
}
gbranch(OGOTO);
patch(p, breakpc);
break;
case OIF:
l = n->left;
if(bcomplex(l, n->right)) {
if(typefd[l->type->etype])
f = !l->fconst;
else
f = !l->vconst;
if(debug['c'])
print("%L const if %s\n", nearln, f ? "false" : "true");
if(f) {
supgen(n->right->left);
gen(n->right->right);
}
else {
gen(n->right->left);
supgen(n->right->right);
}
}
else {
sp = p;
if(n->right->left != Z)
gen(n->right->left);
if(n->right->right != Z) {
gbranch(OGOTO);
patch(sp, pc);
sp = p;
gen(n->right->right);
}
patch(sp, pc);
}
break;
case OSET:
case OUSED:
usedset(n->left, o);
break;
}
}
void
usedset(Node *n, int o)
{
if(n->op == OLIST) {
usedset(n->left, o);
usedset(n->right, o);
return;
}
complex(n);
switch(n->op) {
case OADDR: /* volatile */
gins(ANOP, n, Z);
break;
case ONAME:
if(o == OSET)
gins(ANOP, Z, n);
else
gins(ANOP, n, Z);
break;
}
}
void
noretval(int n)
{
if(n & 1) {
gins(ANOP, Z, Z);
p->to.type = D_REG;
p->to.reg = REGRET;
}
if(n & 2) {
gins(ANOP, Z, Z);
p->to.type = D_FREG;
p->to.reg = FREGRET;
}
}
/*
* calculate addressability as follows
* CONST ==> 20 $value
* NAME ==> 10 name
* REGISTER ==> 11 register
* INDREG ==> 12 *[(reg)+offset]
* &10 ==> 2 $name
* ADD(2, 20) ==> 2 $name+offset
* ADD(3, 20) ==> 3 $(reg)+offset
* &12 ==> 3 $(reg)+offset
* *11 ==> 11 ??
* *2 ==> 10 name
* *3 ==> 12 *(reg)+offset
* calculate complexity (number of registers)
*/
void
xcom(Node *n)
{
Node *l, *r;
int t;
if(n == Z)
return;
l = n->left;
r = n->right;
n->addable = 0;
n->complex = 0;
switch(n->op) {
case OCONST:
n->addable = 20;
return;
case OREGISTER:
n->addable = 11;
return;
case OINDREG:
n->addable = 12;
return;
case ONAME:
n->addable = 10;
return;
case OADDR:
xcom(l);
if(l->addable == 10)
n->addable = 2;
if(l->addable == 12)
n->addable = 3;
break;
case OIND:
xcom(l);
if(l->addable == 11)
n->addable = 12;
if(l->addable == 3)
n->addable = 12;
if(l->addable == 2)
n->addable = 10;
break;
case OADD:
xcom(l);
xcom(r);
if(l->addable == 20) {
if(r->addable == 2)
n->addable = 2;
if(r->addable == 3)
n->addable = 3;
}
if(r->addable == 20) {
if(l->addable == 2)
n->addable = 2;
if(l->addable == 3)
n->addable = 3;
}
break;
case OASLMUL:
case OASMUL:
xcom(l);
xcom(r);
t = vlog(r);
if(t >= 0) {
n->op = OASASHL;
r->vconst = t;
r->type = types[TINT];
}
break;
case OMUL:
case OLMUL:
xcom(l);
xcom(r);
t = vlog(r);
if(t >= 0) {
n->op = OASHL;
r->vconst = t;
r->type = types[TINT];
}
t = vlog(l);
if(t >= 0) {
n->op = OASHL;
n->left = r;
n->right = l;
r = l;
l = n->left;
r->vconst = t;
r->type = types[TINT];
}
break;
case OASLDIV:
xcom(l);
xcom(r);
t = vlog(r);
if(t >= 0) {
n->op = OASLSHR;
r->vconst = t;
r->type = types[TINT];
}
break;
case OLDIV:
xcom(l);
xcom(r);
t = vlog(r);
if(t >= 0) {
n->op = OLSHR;
r->vconst = t;
r->type = types[TINT];
}
break;
case OASLMOD:
xcom(l);
xcom(r);
t = vlog(r);
if(t >= 0) {
n->op = OASAND;
r->vconst--;
}
break;
case OLMOD:
xcom(l);
xcom(r);
t = vlog(r);
if(t >= 0) {
n->op = OAND;
r->vconst--;
}
break;
default:
if(l != Z)
xcom(l);
if(r != Z)
xcom(r);
break;
}
if(n->addable >= 10)
return;
if(l != Z)
n->complex = l->complex;
if(r != Z) {
if(r->complex == n->complex)
n->complex = r->complex+1;
else
if(r->complex > n->complex)
n->complex = r->complex;
}
if(n->complex == 0)
n->complex++;
if(com64(n))
return;
switch(n->op) {
case OFUNC:
n->complex = FNX;
break;
case OADD:
case OXOR:
case OAND:
case OOR:
case OEQ:
case ONE:
/*
* immediate operators, make const on right
*/
if(l->op == OCONST) {
n->left = r;
n->right = l;
}
break;
}
}
int
bcomplex(Node *n, Node *c)
{
complex(n);
if(n->type != T)
if(tcompat(n, T, n->type, tnot))
n->type = T;
if(n->type != T) {
if(c != Z && n->op == OCONST && deadheads(c))
return 1;
bool64(n);
boolgen(n, 1, Z);
} else
gbranch(OGOTO);
return 0;
}
| src/cmd/5c/sgen.c | 0 | https://github.com/golang/go/commit/a45c54d1a5370f8138e7988f2a64562196566eaa | [
0.9752296805381775,
0.20830896496772766,
0.00016590204904787242,
0.004678419791162014,
0.35020774602890015
] |
{
"id": 2,
"code_window": [
"\t\t}\n",
"\t\tv = a;\n",
"\t}\n",
"\t*vp = v[0:n+1];\n",
"\tv[n] = p;\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep"
],
"after_edit": [
"\tv = v[0:n+1];\n"
],
"file_path": "src/cmd/gobuild/gobuild.go",
"type": "replace",
"edit_start_line_idx": 88
} | // Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package io
import (
"io";
"rand";
"testing";
)
const N = 10000; // make this bigger for a larger (and slower) test
var data []byte; // test data for write tests
func init() {
data = make([]byte, N);
for i := 0; i < len(data); i++ {
data[i] = 'a' + byte(i % 26)
}
}
// Verify that contents of buf match the string s.
func check(t *testing.T, testname string, buf *ByteBuffer, s string) {
if buf.Len() != len(buf.Data()) {
t.Errorf("%s: buf.Len() == %d, len(buf.Data()) == %d\n", testname, buf.Len(), len(buf.Data()))
}
if buf.Len() != len(s) {
t.Errorf("%s: buf.Len() == %d, len(s) == %d\n", testname, buf.Len(), len(s))
}
if string(buf.Data()) != s {
t.Errorf("%s: string(buf.Data()) == %q, s == %q\n", testname, string(buf.Data()), s)
}
}
// Fill buf through n writes of fub.
// The initial contents of buf corresponds to the string s;
// the result is the final contents of buf returned as a string.
func fill(t *testing.T, testname string, buf *ByteBuffer, s string, n int, fub []byte) string {
check(t, testname + " (fill 1)", buf, s);
for ; n > 0; n-- {
m, err := buf.Write(fub);
if m != len(fub) {
t.Errorf(testname + " (fill 2): m == %d, expected %d\n", m, len(fub));
}
if err != nil {
t.Errorf(testname + " (fill 3): err should always be nil, found err == %s\n", err);
}
s += string(fub);
check(t, testname + " (fill 4)", buf, s);
}
return s;
}
// Empty buf through repeated reads into fub.
// The initial contents of buf corresponds to the string s.
func empty(t *testing.T, testname string, buf *ByteBuffer, s string, fub []byte) {
check(t, testname + " (empty 1)", buf, s);
for {
n, err := buf.Read(fub);
if n == 0 {
break;
}
if err != nil {
t.Errorf(testname + " (empty 2): err should always be nil, found err == %s\n", err);
}
s = s[n : len(s)];
check(t, testname + " (empty 3)", buf, s);
}
check(t, testname + " (empty 4)", buf, "");
}
func TestBasicOperations(t *testing.T) {
var buf ByteBuffer;
for i := 0; i < 5; i++ {
check(t, "TestBasicOperations (1)", &buf, "");
buf.Reset();
check(t, "TestBasicOperations (2)", &buf, "");
buf.Truncate(0);
check(t, "TestBasicOperations (3)", &buf, "");
n, err := buf.Write(data[0 : 1]);
if n != 1 {
t.Errorf("wrote 1 byte, but n == %d\n", n);
}
if err != nil {
t.Errorf("err should always be nil, but err == %s\n", err);
}
check(t, "TestBasicOperations (4)", &buf, "a");
buf.WriteByte(data[1]);
check(t, "TestBasicOperations (5)", &buf, "ab");
n, err = buf.Write(data[2 : 26]);
if n != 24 {
t.Errorf("wrote 25 bytes, but n == %d\n", n);
}
check(t, "TestBasicOperations (6)", &buf, string(data[0 : 26]));
buf.Truncate(26);
check(t, "TestBasicOperations (7)", &buf, string(data[0 : 26]));
buf.Truncate(20);
check(t, "TestBasicOperations (8)", &buf, string(data[0 : 20]));
empty(t, "TestBasicOperations (9)", &buf, string(data[0 : 20]), make([]byte, 5));
empty(t, "TestBasicOperations (10)", &buf, "", make([]byte, 100));
buf.WriteByte(data[1]);
c, err := buf.ReadByte();
if err != nil {
t.Errorf("ReadByte unexpected eof\n");
}
if c != data[1] {
t.Errorf("ReadByte wrong value c=%v\n", c);
}
c, err = buf.ReadByte();
if err == nil {
t.Errorf("ReadByte unexpected not eof\n");
}
}
}
func TestLargeWrites(t *testing.T) {
var buf ByteBuffer;
for i := 3; i < 30; i += 3 {
s := fill(t, "TestLargeWrites (1)", &buf, "", 5, data);
empty(t, "TestLargeWrites (2)", &buf, s, make([]byte, len(data)/i));
}
check(t, "TestLargeWrites (3)", &buf, "");
}
func TestLargeReads(t *testing.T) {
var buf ByteBuffer;
for i := 3; i < 30; i += 3 {
s := fill(t, "TestLargeReads (1)", &buf, "", 5, data[0 : len(data)/i]);
empty(t, "TestLargeReads (2)", &buf, s, make([]byte, len(data)));
}
check(t, "TestLargeReads (3)", &buf, "");
}
func TestMixedReadsAndWrites(t *testing.T) {
var buf ByteBuffer;
s := "";
for i := 0; i < 50; i++ {
wlen := rand.Intn(len(data));
s = fill(t, "TestMixedReadsAndWrites (1)", &buf, s, 1, data[0 : wlen]);
rlen := rand.Intn(len(data));
fub := make([]byte, rlen);
n, err := buf.Read(fub);
s = s[n : len(s)];
}
empty(t, "TestMixedReadsAndWrites (2)", &buf, s, make([]byte, buf.Len()));
}
| src/pkg/io/bytebuffer_test.go | 0 | https://github.com/golang/go/commit/a45c54d1a5370f8138e7988f2a64562196566eaa | [
0.002786598401144147,
0.0007914960151538253,
0.00016628166486043483,
0.00017704220954328775,
0.001020593917928636
] |
{
"id": 3,
"code_window": [
"\tv[n] = p;\n",
"}\n",
"\n",
"// For sorting Files\n"
],
"labels": [
"add",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t*vp = v;\n"
],
"file_path": "src/cmd/gobuild/gobuild.go",
"type": "add",
"edit_start_line_idx": 90
} | // Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package gobuild
import (
"flag";
"fmt";
"gobuild";
"io";
"os";
"path";
"sort";
"strings";
"template";
"unicode";
"utf8";
)
type Pkg struct
type File struct {
Name string;
Pkg *Pkg;
Imports []string;
Deps []*Pkg;
Phase int;
}
type Pkg struct {
Name string;
Path string;
Files []*File;
}
type ArCmd struct {
Pkg *Pkg;
Files []*File;
}
type Phase struct {
Phase int;
ArCmds []*ArCmd;
}
type Info struct {
Args []string;
Char string;
Dir string;
ObjDir string;
Pkgmap map[string] *Pkg;
Packages []*Pkg;
Files map[string] *File;
Imports map[string] bool;
Phases []*Phase;
MaxPhase int;
}
var verbose = flag.Bool("v", false, "verbose mode")
var writeMakefile = flag.Bool("m", false, "write Makefile to standard output")
func PushPkg(vp *[]*Pkg, p *Pkg) {
v := *vp;
n := len(v);
if n >= cap(v) {
m := 2*n + 10;
a := make([]*Pkg, n, m);
for i := range v {
a[i] = v[i];
}
v = a;
}
*vp = v[0:n+1];
v[n] = p;
}
func PushFile(vp *[]*File, p *File) {
v := *vp;
n := len(v);
if n >= cap(v) {
m := 2*n + 10;
a := make([]*File, n, m);
for i := range v {
a[i] = v[i];
}
v = a;
}
*vp = v[0:n+1];
v[n] = p;
}
// For sorting Files
type FileArray []*File
func (a FileArray) Len() int {
return len(a)
}
func (a FileArray) Less(i, j int) bool {
return a[i].Name < a[j].Name
}
func (a FileArray) Swap(i, j int) {
a[i], a[j] = a[j], a[i]
}
// If current directory is under $GOROOT/src/pkg, return the
// path relative to there. Otherwise return "".
func PkgDir() string {
goroot, err := os.Getenv("GOROOT");
if err != nil || goroot == "" {
return ""
}
srcroot := path.Clean(goroot + "/src/pkg/");
pwd, err1 := os.Getenv("PWD"); // TODO(rsc): real pwd
if err1 != nil || pwd == "" {
return ""
}
if pwd == srcroot {
return ""
}
n := len(srcroot);
if len(pwd) < n || pwd[n] != '/' || pwd[0:n] != srcroot {
return ""
}
dir := pwd[n+1:len(pwd)];
return dir;
}
func ScanFiles(filenames []string) *Info {
// Build list of imports, local packages, and files.
// Exclude *_test.go and anything in package main.
// TODO(rsc): Build a binary from package main?
z := new(Info);
z.Args = os.Args;
z.Dir = PkgDir();
z.Char = theChar; // for template
z.ObjDir = ObjDir; // for template
z.Pkgmap = make(map[string] *Pkg);
z.Files = make(map[string] *File);
z.Imports = make(map[string] bool);
// Read Go files to find out packages and imports.
var pkg *Pkg;
for _, filename := range filenames {
if strings.Index(filename, "_test.") >= 0 {
continue;
}
f := new(File);
f.Name = filename;
if path.Ext(filename) == ".go" {
rune, _ := utf8.DecodeRuneInString(filename);
if rune != '_' && !unicode.IsLetter(rune) && !unicode.IsDecimalDigit(rune) {
// Ignore files with funny leading letters,
// to avoid editor files like .foo.go and ~foo.go.
continue;
}
pkgname, imp, err := PackageImports(filename);
if err != nil {
fatal("parsing %s: %s", filename, err);
}
if pkgname == "main" {
continue;
}
path := pkgname;
var ok bool;
pkg, ok = z.Pkgmap[path];
if !ok {
pkg = new(Pkg);
pkg.Name = pkgname;
pkg.Path = path;
z.Pkgmap[path] = pkg;
PushPkg(&z.Packages, pkg);
}
f.Pkg = pkg;
f.Imports = imp;
for _, name := range imp {
z.Imports[name] = true;
}
PushFile(&pkg.Files, f);
}
z.Files[filename] = f;
}
// Loop through files again, filling in more info.
for _, f := range z.Files {
if f.Pkg == nil {
// non-Go file: fill in package name.
// Must only be a single package in this directory.
if len(z.Pkgmap) != 1 {
fatal("cannot determine package for %s", f.Name);
}
f.Pkg = pkg;
}
// Go file: record dependencies on other packages in this directory.
for _, imp := range f.Imports {
pkg, ok := z.Pkgmap[imp];
if ok && pkg != f.Pkg {
PushPkg(&f.Deps, pkg);
}
}
}
// Update destination directory.
// If destination directory has same
// name as package name, cut it off.
dir, name := path.Split(z.Dir);
if len(z.Packages) == 1 && z.Packages[0].Name == name {
z.Dir = dir;
}
return z;
}
func PackageObj(pkg string) string {
return pkg + ".a"
}
func (z *Info) Build() {
// Create empty object directory tree.
RemoveAll(ObjDir);
obj := path.Join(ObjDir, z.Dir) + "/";
MkdirAll(obj);
// Create empty archives.
for pkgname := range z.Pkgmap {
ar := obj + PackageObj(pkgname);
os.Remove(ar);
Archive(ar, nil);
}
// Compile by repeated passes: build as many .6 as possible,
// put them in their archives, and repeat.
var pending, fail, success []*File;
for _, file := range z.Files {
PushFile(&pending, file);
}
sort.Sort(FileArray(pending));
var arfiles []string;
z.Phases = make([]*Phase, 0, len(z.Files));
for phase := 1; len(pending) > 0; phase++ {
// Run what we can.
fail = fail[0:0];
success = success[0:0];
for _, f := range pending {
if !Build(Compiler(f.Name), f.Name, 0) {
PushFile(&fail, f);
} else {
if *verbose {
fmt.Fprint(os.Stderr, f.Name, " ");
}
PushFile(&success, f);
}
}
if len(success) == 0 {
// Nothing ran; give up.
for _, f := range fail {
Build(Compiler(f.Name), f.Name, ShowErrors | ForceDisplay);
}
fatal("stalemate");
}
if *verbose {
fmt.Fprint(os.Stderr, "\n");
}
// Record phase data.
p := new(Phase);
p.ArCmds = make([]*ArCmd, 0, len(z.Pkgmap));
p.Phase = phase;
n := len(z.Phases);
z.Phases = z.Phases[0:n+1];
z.Phases[n] = p;
// Update archives.
for _, pkg := range z.Pkgmap {
arfiles = arfiles[0:0];
var files []*File;
for _, f := range success {
if f.Pkg == pkg {
PushString(&arfiles, Object(f.Name, theChar));
PushFile(&files, f);
}
f.Phase = phase;
}
if len(arfiles) > 0 {
Archive(obj + pkg.Name + ".a", arfiles);
n := len(p.ArCmds);
p.ArCmds = p.ArCmds[0:n+1];
p.ArCmds[n] = &ArCmd{pkg, files};
}
for _, filename := range arfiles {
os.Remove(filename);
}
}
pending, fail = fail, pending;
}
}
func (z *Info) Clean() {
RemoveAll(ObjDir);
for pkgname := range z.Pkgmap {
os.Remove(PackageObj(pkgname));
}
}
func Main() {
flag.Parse();
filenames := flag.Args();
if len(filenames) == 0 {
var err os.Error;
filenames, err= SourceFiles(".");
if err != nil {
fatal("reading .: %s", err.String());
}
}
state := ScanFiles(filenames);
state.Build();
if *writeMakefile {
t, err := template.Parse(makefileTemplate, makefileMap);
if err != nil {
fatal("template.Parse: %s", err.String());
}
err = t.Execute(state, os.Stdout);
if err != nil {
fatal("template.Expand: %s", err.String());
}
}
}
| src/cmd/gobuild/gobuild.go | 1 | https://github.com/golang/go/commit/a45c54d1a5370f8138e7988f2a64562196566eaa | [
0.996109664440155,
0.04642590135335922,
0.0001671735371928662,
0.0002615459088701755,
0.17546942830085754
] |
{
"id": 3,
"code_window": [
"\tv[n] = p;\n",
"}\n",
"\n",
"// For sorting Files\n"
],
"labels": [
"add",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t*vp = v;\n"
],
"file_path": "src/cmd/gobuild/gobuild.go",
"type": "add",
"edit_start_line_idx": 90
} | // $G $F.go && $L $F.$A && ./$A.out
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package main
import "fmt"
var result string
func addInt(i int) {
result += fmt.Sprint(i)
}
func test1helper() {
for i := 0; i < 10; i++ {
defer addInt(i)
}
}
func test1() {
result = "";
test1helper();
if result != "9876543210" {
fmt.Printf("test1: bad defer result (should be 9876543210): %q\n", result);
}
}
func addDotDotDot(v ...) {
result += fmt.Sprint(v)
}
func test2helper() {
for i := 0; i < 10; i++ {
defer addDotDotDot(i)
}
}
func test2() {
result = "";
test2helper();
if result != "9876543210" {
fmt.Printf("test2: bad defer result (should be 9876543210): %q\n", result);
}
}
func main() {
test1();
test2();
}
| test/defer.go | 0 | https://github.com/golang/go/commit/a45c54d1a5370f8138e7988f2a64562196566eaa | [
0.002308156108483672,
0.0005283578648231924,
0.00016579499060753733,
0.0001741576852509752,
0.0007959582726471126
] |
{
"id": 3,
"code_window": [
"\tv[n] = p;\n",
"}\n",
"\n",
"// For sorting Files\n"
],
"labels": [
"add",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t*vp = v;\n"
],
"file_path": "src/cmd/gobuild/gobuild.go",
"type": "add",
"edit_start_line_idx": 90
} | // Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
TEXT _rt0_arm_linux(SB),7,$0
B _rt0_arm(SB)
| src/pkg/runtime/linux/arm/rt0.s | 0 | https://github.com/golang/go/commit/a45c54d1a5370f8138e7988f2a64562196566eaa | [
0.00017662333266343921,
0.00017662333266343921,
0.00017662333266343921,
0.00017662333266343921,
0
] |
{
"id": 3,
"code_window": [
"\tv[n] = p;\n",
"}\n",
"\n",
"// For sorting Files\n"
],
"labels": [
"add",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t*vp = v;\n"
],
"file_path": "src/cmd/gobuild/gobuild.go",
"type": "add",
"edit_start_line_idx": 90
} | // Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package os
import "syscall"
// An operating-system independent representation of Unix data structures.
// OS-specific routines in this directory convert the OS-local versions to these.
// Getpagesize returns the underlying system's memory page size.
func Getpagesize() int{
return syscall.Getpagesize()
}
// A Dir describes a file and is returned by Stat, Fstat, and Lstat
type Dir struct {
Dev uint64; // device number of file system holding file.
Ino uint64; // inode number.
Nlink uint64; // number of hard links.
Mode uint32; // permission and mode bits.
Uid uint32; // user id of owner.
Gid uint32; // group id of owner.
Rdev uint64; // device type for special file.
Size uint64; // length in bytes.
Blksize uint64; // size of blocks, in bytes.
Blocks uint64; // number of blocks allocated for file.
Atime_ns uint64; // access time; nanoseconds since epoch.
Mtime_ns uint64; // modified time; nanoseconds since epoch.
Ctime_ns uint64; // status change time; nanoseconds since epoch.
Name string; // name of file as presented to Open.
FollowedSymlink bool; // followed a symlink to get this information
}
// IsFifo reports whether the Dir describes a FIFO file.
func (dir *Dir) IsFifo() bool {
return (dir.Mode & syscall.S_IFMT) == syscall.S_IFIFO
}
// IsChar reports whether the Dir describes a character special file.
func (dir *Dir) IsChar() bool {
return (dir.Mode & syscall.S_IFMT) == syscall.S_IFCHR
}
// IsDirectory reports whether the Dir describes a directory.
func (dir *Dir) IsDirectory() bool {
return (dir.Mode & syscall.S_IFMT) == syscall.S_IFDIR
}
// IsBlock reports whether the Dir describes a block special file.
func (dir *Dir) IsBlock() bool {
return (dir.Mode & syscall.S_IFMT) == syscall.S_IFBLK
}
// IsRegular reports whether the Dir describes a regular file.
func (dir *Dir) IsRegular() bool {
return (dir.Mode & syscall.S_IFMT) == syscall.S_IFREG
}
// IsSymlink reports whether the Dir describes a symbolic link.
func (dir *Dir) IsSymlink() bool {
return (dir.Mode & syscall.S_IFMT) == syscall.S_IFLNK
}
// IsSocket reports whether the Dir describes a socket.
func (dir *Dir) IsSocket() bool {
return (dir.Mode & syscall.S_IFMT) == syscall.S_IFSOCK
}
// Permission returns the file permission bits.
func (dir *Dir) Permission() int {
return int(dir.Mode & 0777)
}
| src/pkg/os/types.go | 0 | https://github.com/golang/go/commit/a45c54d1a5370f8138e7988f2a64562196566eaa | [
0.0003134807338938117,
0.00020269639207981527,
0.00017118555842898786,
0.00017935049254447222,
0.000045472130295820534
] |
{
"id": 4,
"code_window": [
"\t\tfor i := range v {\n",
"\t\t\ta[i] = v[i];\n",
"\t\t}\n",
"\t\tv = a;\n",
"\t}\n",
"\t*vp = v[0:n+1];\n",
"\tv[n] = p;\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep"
],
"after_edit": [
"\tv = v[0:n+1];\n"
],
"file_path": "src/cmd/gobuild/util.go",
"type": "replace",
"edit_start_line_idx": 84
} | // Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package gobuild
import (
"flag";
"fmt";
"gobuild";
"io";
"os";
"path";
"sort";
"strings";
"template";
"unicode";
"utf8";
)
type Pkg struct
type File struct {
Name string;
Pkg *Pkg;
Imports []string;
Deps []*Pkg;
Phase int;
}
type Pkg struct {
Name string;
Path string;
Files []*File;
}
type ArCmd struct {
Pkg *Pkg;
Files []*File;
}
type Phase struct {
Phase int;
ArCmds []*ArCmd;
}
type Info struct {
Args []string;
Char string;
Dir string;
ObjDir string;
Pkgmap map[string] *Pkg;
Packages []*Pkg;
Files map[string] *File;
Imports map[string] bool;
Phases []*Phase;
MaxPhase int;
}
var verbose = flag.Bool("v", false, "verbose mode")
var writeMakefile = flag.Bool("m", false, "write Makefile to standard output")
func PushPkg(vp *[]*Pkg, p *Pkg) {
v := *vp;
n := len(v);
if n >= cap(v) {
m := 2*n + 10;
a := make([]*Pkg, n, m);
for i := range v {
a[i] = v[i];
}
v = a;
}
*vp = v[0:n+1];
v[n] = p;
}
func PushFile(vp *[]*File, p *File) {
v := *vp;
n := len(v);
if n >= cap(v) {
m := 2*n + 10;
a := make([]*File, n, m);
for i := range v {
a[i] = v[i];
}
v = a;
}
*vp = v[0:n+1];
v[n] = p;
}
// For sorting Files
type FileArray []*File
func (a FileArray) Len() int {
return len(a)
}
func (a FileArray) Less(i, j int) bool {
return a[i].Name < a[j].Name
}
func (a FileArray) Swap(i, j int) {
a[i], a[j] = a[j], a[i]
}
// If current directory is under $GOROOT/src/pkg, return the
// path relative to there. Otherwise return "".
func PkgDir() string {
goroot, err := os.Getenv("GOROOT");
if err != nil || goroot == "" {
return ""
}
srcroot := path.Clean(goroot + "/src/pkg/");
pwd, err1 := os.Getenv("PWD"); // TODO(rsc): real pwd
if err1 != nil || pwd == "" {
return ""
}
if pwd == srcroot {
return ""
}
n := len(srcroot);
if len(pwd) < n || pwd[n] != '/' || pwd[0:n] != srcroot {
return ""
}
dir := pwd[n+1:len(pwd)];
return dir;
}
func ScanFiles(filenames []string) *Info {
// Build list of imports, local packages, and files.
// Exclude *_test.go and anything in package main.
// TODO(rsc): Build a binary from package main?
z := new(Info);
z.Args = os.Args;
z.Dir = PkgDir();
z.Char = theChar; // for template
z.ObjDir = ObjDir; // for template
z.Pkgmap = make(map[string] *Pkg);
z.Files = make(map[string] *File);
z.Imports = make(map[string] bool);
// Read Go files to find out packages and imports.
var pkg *Pkg;
for _, filename := range filenames {
if strings.Index(filename, "_test.") >= 0 {
continue;
}
f := new(File);
f.Name = filename;
if path.Ext(filename) == ".go" {
rune, _ := utf8.DecodeRuneInString(filename);
if rune != '_' && !unicode.IsLetter(rune) && !unicode.IsDecimalDigit(rune) {
// Ignore files with funny leading letters,
// to avoid editor files like .foo.go and ~foo.go.
continue;
}
pkgname, imp, err := PackageImports(filename);
if err != nil {
fatal("parsing %s: %s", filename, err);
}
if pkgname == "main" {
continue;
}
path := pkgname;
var ok bool;
pkg, ok = z.Pkgmap[path];
if !ok {
pkg = new(Pkg);
pkg.Name = pkgname;
pkg.Path = path;
z.Pkgmap[path] = pkg;
PushPkg(&z.Packages, pkg);
}
f.Pkg = pkg;
f.Imports = imp;
for _, name := range imp {
z.Imports[name] = true;
}
PushFile(&pkg.Files, f);
}
z.Files[filename] = f;
}
// Loop through files again, filling in more info.
for _, f := range z.Files {
if f.Pkg == nil {
// non-Go file: fill in package name.
// Must only be a single package in this directory.
if len(z.Pkgmap) != 1 {
fatal("cannot determine package for %s", f.Name);
}
f.Pkg = pkg;
}
// Go file: record dependencies on other packages in this directory.
for _, imp := range f.Imports {
pkg, ok := z.Pkgmap[imp];
if ok && pkg != f.Pkg {
PushPkg(&f.Deps, pkg);
}
}
}
// Update destination directory.
// If destination directory has same
// name as package name, cut it off.
dir, name := path.Split(z.Dir);
if len(z.Packages) == 1 && z.Packages[0].Name == name {
z.Dir = dir;
}
return z;
}
func PackageObj(pkg string) string {
return pkg + ".a"
}
func (z *Info) Build() {
// Create empty object directory tree.
RemoveAll(ObjDir);
obj := path.Join(ObjDir, z.Dir) + "/";
MkdirAll(obj);
// Create empty archives.
for pkgname := range z.Pkgmap {
ar := obj + PackageObj(pkgname);
os.Remove(ar);
Archive(ar, nil);
}
// Compile by repeated passes: build as many .6 as possible,
// put them in their archives, and repeat.
var pending, fail, success []*File;
for _, file := range z.Files {
PushFile(&pending, file);
}
sort.Sort(FileArray(pending));
var arfiles []string;
z.Phases = make([]*Phase, 0, len(z.Files));
for phase := 1; len(pending) > 0; phase++ {
// Run what we can.
fail = fail[0:0];
success = success[0:0];
for _, f := range pending {
if !Build(Compiler(f.Name), f.Name, 0) {
PushFile(&fail, f);
} else {
if *verbose {
fmt.Fprint(os.Stderr, f.Name, " ");
}
PushFile(&success, f);
}
}
if len(success) == 0 {
// Nothing ran; give up.
for _, f := range fail {
Build(Compiler(f.Name), f.Name, ShowErrors | ForceDisplay);
}
fatal("stalemate");
}
if *verbose {
fmt.Fprint(os.Stderr, "\n");
}
// Record phase data.
p := new(Phase);
p.ArCmds = make([]*ArCmd, 0, len(z.Pkgmap));
p.Phase = phase;
n := len(z.Phases);
z.Phases = z.Phases[0:n+1];
z.Phases[n] = p;
// Update archives.
for _, pkg := range z.Pkgmap {
arfiles = arfiles[0:0];
var files []*File;
for _, f := range success {
if f.Pkg == pkg {
PushString(&arfiles, Object(f.Name, theChar));
PushFile(&files, f);
}
f.Phase = phase;
}
if len(arfiles) > 0 {
Archive(obj + pkg.Name + ".a", arfiles);
n := len(p.ArCmds);
p.ArCmds = p.ArCmds[0:n+1];
p.ArCmds[n] = &ArCmd{pkg, files};
}
for _, filename := range arfiles {
os.Remove(filename);
}
}
pending, fail = fail, pending;
}
}
func (z *Info) Clean() {
RemoveAll(ObjDir);
for pkgname := range z.Pkgmap {
os.Remove(PackageObj(pkgname));
}
}
func Main() {
flag.Parse();
filenames := flag.Args();
if len(filenames) == 0 {
var err os.Error;
filenames, err= SourceFiles(".");
if err != nil {
fatal("reading .: %s", err.String());
}
}
state := ScanFiles(filenames);
state.Build();
if *writeMakefile {
t, err := template.Parse(makefileTemplate, makefileMap);
if err != nil {
fatal("template.Parse: %s", err.String());
}
err = t.Execute(state, os.Stdout);
if err != nil {
fatal("template.Expand: %s", err.String());
}
}
}
| src/cmd/gobuild/gobuild.go | 1 | https://github.com/golang/go/commit/a45c54d1a5370f8138e7988f2a64562196566eaa | [
0.9978982210159302,
0.08623958379030228,
0.00016409640375059098,
0.0001741392188705504,
0.27839845418930054
] |
{
"id": 4,
"code_window": [
"\t\tfor i := range v {\n",
"\t\t\ta[i] = v[i];\n",
"\t\t}\n",
"\t\tv = a;\n",
"\t}\n",
"\t*vp = v[0:n+1];\n",
"\tv[n] = p;\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep"
],
"after_edit": [
"\tv = v[0:n+1];\n"
],
"file_path": "src/cmd/gobuild/util.go",
"type": "replace",
"edit_start_line_idx": 84
} | /*
* The authors of this software are Rob Pike and Ken Thompson,
* with contributions from Mike Burrows and Sean Dorward.
*
* Copyright (c) 2002-2006 by Lucent Technologies.
* Portions Copyright (c) 2004 Google Inc.
*
* Permission to use, copy, modify, and distribute this software for any
* purpose without fee is hereby granted, provided that this entire notice
* is included in all copies of any software which is or includes a copy
* or modification of this software and in all copies of the supporting
* documentation for such software.
* THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR IMPLIED
* WARRANTY. IN PARTICULAR, NEITHER THE AUTHORS NOR LUCENT TECHNOLOGIES
* NOR GOOGLE INC MAKE ANY REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING
* THE MERCHANTABILITY OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR PURPOSE.
*/
#include <u.h>
#include <libc.h>
#include "fmtdef.h"
int
snprint(char *buf, int len, char *fmt, ...)
{
int n;
va_list args;
va_start(args, fmt);
n = vsnprint(buf, len, fmt, args);
va_end(args);
return n;
}
| src/lib9/fmt/snprint.c | 0 | https://github.com/golang/go/commit/a45c54d1a5370f8138e7988f2a64562196566eaa | [
0.002050933660939336,
0.000901943480130285,
0.00017290690448135138,
0.0006919666193425655,
0.0007860591867938638
] |
{
"id": 4,
"code_window": [
"\t\tfor i := range v {\n",
"\t\t\ta[i] = v[i];\n",
"\t\t}\n",
"\t\tv = a;\n",
"\t}\n",
"\t*vp = v[0:n+1];\n",
"\tv[n] = p;\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep"
],
"after_edit": [
"\tv = v[0:n+1];\n"
],
"file_path": "src/cmd/gobuild/util.go",
"type": "replace",
"edit_start_line_idx": 84
} | // Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
/*
The flag package implements command-line flag parsing.
Usage:
1) Define flags using flag.String(), Bool(), Int(), etc. Example:
import flag "flag"
var ip *int = flag.Int("flagname", 1234, "help message for flagname")
If you like, you can bind the flag to a variable using the Var() functions.
var flagvar int
func init() {
flag.IntVar(&flagvar, "flagname", 1234, "help message for flagname")
}
2) After all flags are defined, call
flag.Parse()
to parse the command line into the defined flags.
3) Flags may then be used directly. If you're using the flags themselves,
they are all pointers; if you bind to variables, they're values.
print("ip has value ", *ip, "\n");
print("flagvar has value ", flagvar, "\n");
4) After parsing, flag.Arg(i) is the i'th argument after the flags.
Args are indexed from 0 up to flag.NArg().
Command line flag syntax:
-flag
-flag=x
-flag x
One or two minus signs may be used; they are equivalent.
Flag parsing stops just before the first non-flag argument
("-" is a non-flag argument) or after the terminator "--".
Integer flags accept 1234, 0664, 0x1234 and may be negative.
Boolean flags may be 1, 0, t, f, true, false, TRUE, FALSE, True, False.
*/
package flag
import (
"fmt";
"os";
"strconv"
)
// BUG: atob belongs elsewhere
func atob(str string) (value bool, ok bool) {
switch str {
case "1", "t", "T", "true", "TRUE", "True":
return true, true;
case "0", "f", "F", "false", "FALSE", "False":
return false, true
}
return false, false
}
type (
boolValue struct;
intValue struct;
int64Value struct;
uintValue struct;
uint64Value struct;
stringValue struct;
)
// -- Bool Value
type boolValue struct {
p *bool;
}
func newBoolValue(val bool, p *bool) *boolValue {
*p = val;
return &boolValue{p}
}
func (b *boolValue) set(s string) bool {
v, ok := atob(s);
*b.p = v;
return ok
}
func (b *boolValue) String() string {
return fmt.Sprintf("%v", *b.p)
}
// -- Int Value
type intValue struct {
p *int;
}
func newIntValue(val int, p *int) *intValue {
*p = val;
return &intValue{p}
}
func (i *intValue) set(s string) bool {
v, err := strconv.Atoi(s);
*i.p = int(v);
return err == nil
}
func (i *intValue) String() string {
return fmt.Sprintf("%v", *i.p)
}
// -- Int64 Value
type int64Value struct {
p *int64;
}
func newInt64Value(val int64, p *int64) *int64Value {
*p = val;
return &int64Value{p}
}
func (i *int64Value) set(s string) bool {
v, err := strconv.Atoi64(s);
*i.p = v;
return err == nil;
}
func (i *int64Value) String() string {
return fmt.Sprintf("%v", *i.p)
}
// -- Uint Value
type uintValue struct {
p *uint;
}
func newUintValue(val uint, p *uint) *uintValue {
*p = val;
return &uintValue{p}
}
func (i *uintValue) set(s string) bool {
v, err := strconv.Atoui(s);
*i.p = uint(v);
return err == nil;
}
func (i *uintValue) String() string {
return fmt.Sprintf("%v", *i.p)
}
// -- uint64 Value
type uint64Value struct {
p *uint64;
}
func newUint64Value(val uint64, p *uint64) *uint64Value {
*p = val;
return &uint64Value{p}
}
func (i *uint64Value) set(s string) bool {
v, err := strconv.Atoui64(s);
*i.p = uint64(v);
return err == nil;
}
func (i *uint64Value) String() string {
return fmt.Sprintf("%v", *i.p)
}
// -- string Value
type stringValue struct {
p *string;
}
func newStringValue(val string, p *string) *stringValue {
*p = val;
return &stringValue{p}
}
func (s *stringValue) set(val string) bool {
*s.p = val;
return true;
}
func (s *stringValue) String() string {
return fmt.Sprintf("%s", *s.p)
}
// FlagValue is the interface to the dynamic value stored in a flag.
// (The default value is represented as a string.)
type FlagValue interface {
String() string;
set(string) bool;
}
// A Flag represents the state of a flag.
type Flag struct {
Name string; // name as it appears on command line
Usage string; // help message
Value FlagValue; // value as set
DefValue string; // default value (as text); for usage message
}
type allFlags struct {
actual map[string] *Flag;
formal map[string] *Flag;
first_arg int; // 0 is the program name, 1 is first arg
}
var flags *allFlags = &allFlags{make(map[string] *Flag), make(map[string] *Flag), 1}
// VisitAll visits the flags, calling fn for each. It visits all flags, even those not set.
func VisitAll(fn func(*Flag)) {
for k, f := range flags.formal {
fn(f)
}
}
// Visit visits the flags, calling fn for each. It visits only those flags that have been set.
func Visit(fn func(*Flag)) {
for k, f := range flags.actual {
fn(f)
}
}
// Lookup returns the Flag structure of the named flag, returning nil if none exists.
func Lookup(name string) *Flag {
f, ok := flags.formal[name];
if !ok {
return nil
}
return f
}
// Set sets the value of the named flag. It returns true if the set succeeded; false if
// there is no such flag defined.
func Set(name, value string) bool {
f, ok := flags.formal[name];
if !ok {
return false
}
ok = f.Value.set(value);
if !ok {
return false
}
flags.actual[name] = f;
return true;
}
// PrintDefaults prints to standard error the default values of all defined flags.
func PrintDefaults() {
VisitAll(func(f *Flag) {
format := " -%s=%s: %s\n";
if s, ok := f.Value.(*stringValue); ok {
// put quotes on the value
format = " -%s=%q: %s\n";
}
fmt.Fprintf(os.Stderr, format, f.Name, f.DefValue, f.Usage);
})
}
// Usage prints to standard error a default usage message documenting all defined flags and
// then calls os.Exit(1).
func Usage() {
if len(os.Args) > 0 {
fmt.Fprintf(os.Stderr, "Usage of %s:\n", os.Args[0]);
} else {
fmt.Fprintln(os.Stderr, "Usage:");
}
PrintDefaults();
os.Exit(1);
}
func NFlag() int {
return len(flags.actual)
}
// Arg returns the i'th command-line argument. Arg(0) is the first remaining argument
// after flags have been processed.
func Arg(i int) string {
i += flags.first_arg;
if i < 0 || i >= len(os.Args) {
return "";
}
return os.Args[i]
}
// NArg is the number of arguments remaining after flags have been processed.
func NArg() int {
return len(os.Args) - flags.first_arg
}
// Args returns the non-flag command-line arguments.
func Args() []string {
return os.Args[flags.first_arg:len(os.Args)];
}
func add(name string, value FlagValue, usage string) {
// Remember the default value as a string; it won't change.
f := &Flag{name, usage, value, value.String()};
dummy, alreadythere := flags.formal[name];
if alreadythere {
print("flag redefined: ", name, "\n");
panic("flag redefinition"); // Happens only if flags are declared with identical names
}
flags.formal[name] = f;
}
// BoolVar defines a bool flag with specified name, default value, and usage string.
// The argument p points to a bool variable in which to store the value of the flag.
func BoolVar(p *bool, name string, value bool, usage string) {
add(name, newBoolValue(value, p), usage);
}
// Bool defines a bool flag with specified name, default value, and usage string.
// The return value is the address of a bool variable that stores the value of the flag.
func Bool(name string, value bool, usage string) *bool {
p := new(bool);
BoolVar(p, name, value, usage);
return p;
}
// IntVar defines an int flag with specified name, default value, and usage string.
// The argument p points to an int variable in which to store the value of the flag.
func IntVar(p *int, name string, value int, usage string) {
add(name, newIntValue(value, p), usage);
}
// Int defines an int flag with specified name, default value, and usage string.
// The return value is the address of an int variable that stores the value of the flag.
func Int(name string, value int, usage string) *int {
p := new(int);
IntVar(p, name, value, usage);
return p;
}
// Int64Var defines an int64 flag with specified name, default value, and usage string.
// The argument p points to an int64 variable in which to store the value of the flag.
func Int64Var(p *int64, name string, value int64, usage string) {
add(name, newInt64Value(value, p), usage);
}
// Int64 defines an int64 flag with specified name, default value, and usage string.
// The return value is the address of an int64 variable that stores the value of the flag.
func Int64(name string, value int64, usage string) *int64 {
p := new(int64);
Int64Var(p, name, value, usage);
return p;
}
// UintVar defines a uint flag with specified name, default value, and usage string.
// The argument p points to a uint variable in which to store the value of the flag.
func UintVar(p *uint, name string, value uint, usage string) {
add(name, newUintValue(value, p), usage);
}
// Uint defines a uint flag with specified name, default value, and usage string.
// The return value is the address of a uint variable that stores the value of the flag.
func Uint(name string, value uint, usage string) *uint {
p := new(uint);
UintVar(p, name, value, usage);
return p;
}
// Uint64Var defines a uint64 flag with specified name, default value, and usage string.
// The argument p points to a uint64 variable in which to store the value of the flag.
func Uint64Var(p *uint64, name string, value uint64, usage string) {
add(name, newUint64Value(value, p), usage);
}
// Uint64 defines a uint64 flag with specified name, default value, and usage string.
// The return value is the address of a uint64 variable that stores the value of the flag.
func Uint64(name string, value uint64, usage string) *uint64 {
p := new(uint64);
Uint64Var(p, name, value, usage);
return p;
}
// StringVar defines a string flag with specified name, default value, and usage string.
// The argument p points to a string variable in which to store the value of the flag.
func StringVar(p *string, name, value string, usage string) {
add(name, newStringValue(value, p), usage);
}
// String defines a string flag with specified name, default value, and usage string.
// The return value is the address of a string variable that stores the value of the flag.
func String(name, value string, usage string) *string {
p := new(string);
StringVar(p, name, value, usage);
return p;
}
func (f *allFlags) parseOne(index int) (ok bool, next int)
{
s := os.Args[index];
f.first_arg = index; // until proven otherwise
if len(s) == 0 {
return false, -1
}
if s[0] != '-' {
return false, -1
}
num_minuses := 1;
if len(s) == 1 {
return false, index
}
if s[1] == '-' {
num_minuses++;
if len(s) == 2 { // "--" terminates the flags
return false, index + 1
}
}
name := s[num_minuses : len(s)];
if len(name) == 0 || name[0] == '-' || name[0] == '=' {
print("bad flag syntax: ", s, "\n");
Usage();
}
// it's a flag. does it have an argument?
has_value := false;
value := "";
for i := 1; i < len(name); i++ { // equals cannot be first
if name[i] == '=' {
value = name[i+1 : len(name)];
has_value = true;
name = name[0 : i];
break;
}
}
flag, alreadythere := flags.actual[name];
if alreadythere {
print("flag specified twice: -", name, "\n");
Usage();
}
m := flags.formal;
flag, alreadythere = m[name]; // BUG
if !alreadythere {
print("flag provided but not defined: -", name, "\n");
Usage();
}
if f, ok := flag.Value.(*boolValue); ok { // special case: doesn't need an arg
if has_value {
if !f.set(value) {
print("invalid boolean value ", value, " for flag: -", name, "\n");
Usage();
}
} else {
f.set("true")
}
} else {
// It must have a value, which might be the next argument.
if !has_value && index < len(os.Args)-1 {
// value is the next arg
has_value = true;
index++;
value = os.Args[index];
}
if !has_value {
print("flag needs an argument: -", name, "\n");
Usage();
}
ok = flag.Value.set(value);
if !ok {
print("invalid value ", value, " for flag: -", name, "\n");
Usage();
}
}
flags.actual[name] = flag;
return true, index + 1
}
// Parse parses the command-line flags. Must be called after all flags are defined
// and before any are accessed by the program.
func Parse() {
for i := 1; i < len(os.Args); {
ok, next := flags.parseOne(i);
if next > 0 {
flags.first_arg = next;
i = next;
}
if !ok {
break
}
}
}
| src/pkg/flag/flag.go | 0 | https://github.com/golang/go/commit/a45c54d1a5370f8138e7988f2a64562196566eaa | [
0.031918711960315704,
0.002775732660666108,
0.00016407699149567634,
0.00019816512940451503,
0.005457877181470394
] |
{
"id": 4,
"code_window": [
"\t\tfor i := range v {\n",
"\t\t\ta[i] = v[i];\n",
"\t\t}\n",
"\t\tv = a;\n",
"\t}\n",
"\t*vp = v[0:n+1];\n",
"\tv[n] = p;\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep"
],
"after_edit": [
"\tv = v[0:n+1];\n"
],
"file_path": "src/cmd/gobuild/util.go",
"type": "replace",
"edit_start_line_idx": 84
} | // Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package main
import (
"gobuild";
)
func main() {
gobuild.Main();
}
| src/cmd/gobuild/main.go | 0 | https://github.com/golang/go/commit/a45c54d1a5370f8138e7988f2a64562196566eaa | [
0.00017964783182833344,
0.00017672928515821695,
0.0001738107530400157,
0.00017672928515821695,
0.00000291853939415887
] |
{
"id": 5,
"code_window": [
"\tv[n] = p;\n",
"}\n",
"\n",
"func run(argv []string, flag int) (ok bool) {\n"
],
"labels": [
"add",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t*vp = v;\n"
],
"file_path": "src/cmd/gobuild/util.go",
"type": "add",
"edit_start_line_idx": 86
} | // Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package gobuild
import (
"flag";
"fmt";
"gobuild";
"io";
"os";
"path";
"sort";
"strings";
"template";
"unicode";
"utf8";
)
type Pkg struct
type File struct {
Name string;
Pkg *Pkg;
Imports []string;
Deps []*Pkg;
Phase int;
}
type Pkg struct {
Name string;
Path string;
Files []*File;
}
type ArCmd struct {
Pkg *Pkg;
Files []*File;
}
type Phase struct {
Phase int;
ArCmds []*ArCmd;
}
type Info struct {
Args []string;
Char string;
Dir string;
ObjDir string;
Pkgmap map[string] *Pkg;
Packages []*Pkg;
Files map[string] *File;
Imports map[string] bool;
Phases []*Phase;
MaxPhase int;
}
var verbose = flag.Bool("v", false, "verbose mode")
var writeMakefile = flag.Bool("m", false, "write Makefile to standard output")
func PushPkg(vp *[]*Pkg, p *Pkg) {
v := *vp;
n := len(v);
if n >= cap(v) {
m := 2*n + 10;
a := make([]*Pkg, n, m);
for i := range v {
a[i] = v[i];
}
v = a;
}
*vp = v[0:n+1];
v[n] = p;
}
func PushFile(vp *[]*File, p *File) {
v := *vp;
n := len(v);
if n >= cap(v) {
m := 2*n + 10;
a := make([]*File, n, m);
for i := range v {
a[i] = v[i];
}
v = a;
}
*vp = v[0:n+1];
v[n] = p;
}
// For sorting Files
type FileArray []*File
func (a FileArray) Len() int {
return len(a)
}
func (a FileArray) Less(i, j int) bool {
return a[i].Name < a[j].Name
}
func (a FileArray) Swap(i, j int) {
a[i], a[j] = a[j], a[i]
}
// If current directory is under $GOROOT/src/pkg, return the
// path relative to there. Otherwise return "".
func PkgDir() string {
goroot, err := os.Getenv("GOROOT");
if err != nil || goroot == "" {
return ""
}
srcroot := path.Clean(goroot + "/src/pkg/");
pwd, err1 := os.Getenv("PWD"); // TODO(rsc): real pwd
if err1 != nil || pwd == "" {
return ""
}
if pwd == srcroot {
return ""
}
n := len(srcroot);
if len(pwd) < n || pwd[n] != '/' || pwd[0:n] != srcroot {
return ""
}
dir := pwd[n+1:len(pwd)];
return dir;
}
func ScanFiles(filenames []string) *Info {
// Build list of imports, local packages, and files.
// Exclude *_test.go and anything in package main.
// TODO(rsc): Build a binary from package main?
z := new(Info);
z.Args = os.Args;
z.Dir = PkgDir();
z.Char = theChar; // for template
z.ObjDir = ObjDir; // for template
z.Pkgmap = make(map[string] *Pkg);
z.Files = make(map[string] *File);
z.Imports = make(map[string] bool);
// Read Go files to find out packages and imports.
var pkg *Pkg;
for _, filename := range filenames {
if strings.Index(filename, "_test.") >= 0 {
continue;
}
f := new(File);
f.Name = filename;
if path.Ext(filename) == ".go" {
rune, _ := utf8.DecodeRuneInString(filename);
if rune != '_' && !unicode.IsLetter(rune) && !unicode.IsDecimalDigit(rune) {
// Ignore files with funny leading letters,
// to avoid editor files like .foo.go and ~foo.go.
continue;
}
pkgname, imp, err := PackageImports(filename);
if err != nil {
fatal("parsing %s: %s", filename, err);
}
if pkgname == "main" {
continue;
}
path := pkgname;
var ok bool;
pkg, ok = z.Pkgmap[path];
if !ok {
pkg = new(Pkg);
pkg.Name = pkgname;
pkg.Path = path;
z.Pkgmap[path] = pkg;
PushPkg(&z.Packages, pkg);
}
f.Pkg = pkg;
f.Imports = imp;
for _, name := range imp {
z.Imports[name] = true;
}
PushFile(&pkg.Files, f);
}
z.Files[filename] = f;
}
// Loop through files again, filling in more info.
for _, f := range z.Files {
if f.Pkg == nil {
// non-Go file: fill in package name.
// Must only be a single package in this directory.
if len(z.Pkgmap) != 1 {
fatal("cannot determine package for %s", f.Name);
}
f.Pkg = pkg;
}
// Go file: record dependencies on other packages in this directory.
for _, imp := range f.Imports {
pkg, ok := z.Pkgmap[imp];
if ok && pkg != f.Pkg {
PushPkg(&f.Deps, pkg);
}
}
}
// Update destination directory.
// If destination directory has same
// name as package name, cut it off.
dir, name := path.Split(z.Dir);
if len(z.Packages) == 1 && z.Packages[0].Name == name {
z.Dir = dir;
}
return z;
}
func PackageObj(pkg string) string {
return pkg + ".a"
}
func (z *Info) Build() {
// Create empty object directory tree.
RemoveAll(ObjDir);
obj := path.Join(ObjDir, z.Dir) + "/";
MkdirAll(obj);
// Create empty archives.
for pkgname := range z.Pkgmap {
ar := obj + PackageObj(pkgname);
os.Remove(ar);
Archive(ar, nil);
}
// Compile by repeated passes: build as many .6 as possible,
// put them in their archives, and repeat.
var pending, fail, success []*File;
for _, file := range z.Files {
PushFile(&pending, file);
}
sort.Sort(FileArray(pending));
var arfiles []string;
z.Phases = make([]*Phase, 0, len(z.Files));
for phase := 1; len(pending) > 0; phase++ {
// Run what we can.
fail = fail[0:0];
success = success[0:0];
for _, f := range pending {
if !Build(Compiler(f.Name), f.Name, 0) {
PushFile(&fail, f);
} else {
if *verbose {
fmt.Fprint(os.Stderr, f.Name, " ");
}
PushFile(&success, f);
}
}
if len(success) == 0 {
// Nothing ran; give up.
for _, f := range fail {
Build(Compiler(f.Name), f.Name, ShowErrors | ForceDisplay);
}
fatal("stalemate");
}
if *verbose {
fmt.Fprint(os.Stderr, "\n");
}
// Record phase data.
p := new(Phase);
p.ArCmds = make([]*ArCmd, 0, len(z.Pkgmap));
p.Phase = phase;
n := len(z.Phases);
z.Phases = z.Phases[0:n+1];
z.Phases[n] = p;
// Update archives.
for _, pkg := range z.Pkgmap {
arfiles = arfiles[0:0];
var files []*File;
for _, f := range success {
if f.Pkg == pkg {
PushString(&arfiles, Object(f.Name, theChar));
PushFile(&files, f);
}
f.Phase = phase;
}
if len(arfiles) > 0 {
Archive(obj + pkg.Name + ".a", arfiles);
n := len(p.ArCmds);
p.ArCmds = p.ArCmds[0:n+1];
p.ArCmds[n] = &ArCmd{pkg, files};
}
for _, filename := range arfiles {
os.Remove(filename);
}
}
pending, fail = fail, pending;
}
}
func (z *Info) Clean() {
RemoveAll(ObjDir);
for pkgname := range z.Pkgmap {
os.Remove(PackageObj(pkgname));
}
}
func Main() {
flag.Parse();
filenames := flag.Args();
if len(filenames) == 0 {
var err os.Error;
filenames, err= SourceFiles(".");
if err != nil {
fatal("reading .: %s", err.String());
}
}
state := ScanFiles(filenames);
state.Build();
if *writeMakefile {
t, err := template.Parse(makefileTemplate, makefileMap);
if err != nil {
fatal("template.Parse: %s", err.String());
}
err = t.Execute(state, os.Stdout);
if err != nil {
fatal("template.Expand: %s", err.String());
}
}
}
| src/cmd/gobuild/gobuild.go | 1 | https://github.com/golang/go/commit/a45c54d1a5370f8138e7988f2a64562196566eaa | [
0.987241268157959,
0.08187345415353775,
0.00016991273150779307,
0.0002224165655206889,
0.23940670490264893
] |
{
"id": 5,
"code_window": [
"\tv[n] = p;\n",
"}\n",
"\n",
"func run(argv []string, flag int) (ok bool) {\n"
],
"labels": [
"add",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t*vp = v;\n"
],
"file_path": "src/cmd/gobuild/util.go",
"type": "add",
"edit_start_line_idx": 86
} | // $G $F.go && $L $F.$A && ./$A.out
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package main
import "os"
func main() {
s :=
0 +
123 +
0123 +
0000 +
0x0 +
0x123 +
0X0 +
0X123;
if s != 788 {
print("s is ", s, "; should be 788\n");
os.Exit(1);
}
}
| test/int_lit.go | 0 | https://github.com/golang/go/commit/a45c54d1a5370f8138e7988f2a64562196566eaa | [
0.001321494928561151,
0.0005571599467657506,
0.0001729824289213866,
0.00017700233729556203,
0.0005404689582064748
] |
{
"id": 5,
"code_window": [
"\tv[n] = p;\n",
"}\n",
"\n",
"func run(argv []string, flag int) (ok bool) {\n"
],
"labels": [
"add",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t*vp = v;\n"
],
"file_path": "src/cmd/gobuild/util.go",
"type": "add",
"edit_start_line_idx": 86
} | // Derived from Inferno utils/8c/list.c
// http://code.google.com/p/inferno-os/source/browse/utils/8c/list.c
//
// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
// Portions Copyright © 1995-1997 C H Forsyth ([email protected])
// Portions Copyright © 1997-1999 Vita Nuova Limited
// Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
// Portions Copyright © 2004,2006 Bruce Ellis
// Portions Copyright © 2005-2007 C H Forsyth ([email protected])
// Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
// Portions Copyright © 2009 The Go Authors. All rights reserved.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
#include "gg.h"
static int sconsize;
void
listinit(void)
{
fmtinstall('A', Aconv); // as
fmtinstall('P', Pconv); // Prog*
fmtinstall('D', Dconv); // Addr*
fmtinstall('R', Rconv); // reg
fmtinstall('Y', Yconv); // sconst
}
int
Pconv(Fmt *fp)
{
char str[STRINGSZ];
Prog *p;
p = va_arg(fp->args, Prog*);
sconsize = 8;
switch(p->as) {
default:
snprint(str, sizeof(str), "%.4ld (%4ld) %-7A %D,%D",
p->loc, p->lineno, p->as, &p->from, &p->to);
break;
case ADATA:
sconsize = p->from.scale;
snprint(str, sizeof(str), "%.4ld (%4ld) %-7A %D/%d,%D",
p->loc, p->lineno, p->as, &p->from, sconsize, &p->to);
break;
case ATEXT:
snprint(str, sizeof(str), "%.4ld (%4ld) %-7A %D,%lD",
p->loc, p->lineno, p->as, &p->from, &p->to);
break;
}
return fmtstrcpy(fp, str);
}
int
Dconv(Fmt *fp)
{
char str[100], s[100];
Addr *a;
int i;
uint32 d1, d2;
a = va_arg(fp->args, Addr*);
i = a->type;
if(i >= D_INDIR) {
if(a->offset)
snprint(str, sizeof(str), "%d(%R)", a->offset, i-D_INDIR);
else
snprint(str, sizeof(str), "(%R)", i-D_INDIR);
goto brk;
}
switch(i) {
default:
if(a->offset)
snprint(str, sizeof(str), "$%d,%R", a->offset, i);
else
snprint(str, sizeof(str), "%R", i);
break;
case D_NONE:
str[0] = 0;
break;
case D_BRANCH:
snprint(str, sizeof(str), "%d", a->branch->loc);
break;
case D_EXTERN:
snprint(str, sizeof(str), "%S+%d(SB)", a->sym, a->offset);
break;
case D_STATIC:
snprint(str, sizeof(str), "%S<>+%d(SB)", a->sym, a->offset);
break;
case D_AUTO:
snprint(str, sizeof(str), "%S+%d(SP)", a->sym, a->offset);
break;
case D_PARAM:
snprint(str, sizeof(str), "%S+%d(FP)", a->sym, a->offset);
break;
case D_CONST:
if(fp->flags & FmtLong) {
d1 = a->offset;
d2 = a->offset2;
snprint(str, sizeof(str), "$%ud-%ud", (ulong)d1, (ulong)d2);
break;
}
snprint(str, sizeof(str), "$%d", a->offset);
break;
case D_FCONST:
snprint(str, sizeof(str), "$(%.17e)", a->dval);
break;
case D_SCONST:
snprint(str, sizeof(str), "$\"%Y\"", a->sval);
break;
case D_ADDR:
a->type = a->index;
a->index = D_NONE;
snprint(str, sizeof(str), "$%D", a);
a->index = a->type;
a->type = D_ADDR;
goto conv;
}
brk:
if(a->index != D_NONE) {
snprint(s, sizeof(s), "(%R*%d)", (int)a->index, (int)a->scale);
strcat(str, s);
}
conv:
return fmtstrcpy(fp, str);
}
static char* regstr[] =
{
"AL", /* [D_AL] */
"CL",
"DL",
"BL",
"AH", /* [D_AH] */
"CH",
"DH",
"BH",
"AX", /* [D_AX] */
"CX",
"DX",
"BX",
"SP",
"BP",
"SI",
"DI",
"F0", /* [D_F0] */
"F1",
"F2",
"F3",
"F4",
"F5",
"F6",
"F7",
"CS", /* [D_CS] */
"SS",
"DS",
"ES",
"FS",
"GS",
"GDTR", /* [D_GDTR] */
"IDTR", /* [D_IDTR] */
"LDTR", /* [D_LDTR] */
"MSW", /* [D_MSW] */
"TASK", /* [D_TASK] */
"CR0", /* [D_CR] */
"CR1",
"CR2",
"CR3",
"CR4",
"CR5",
"CR6",
"CR7",
"DR0", /* [D_DR] */
"DR1",
"DR2",
"DR3",
"DR4",
"DR5",
"DR6",
"DR7",
"TR0", /* [D_TR] */
"TR1",
"TR2",
"TR3",
"TR4",
"TR5",
"TR6",
"TR7",
"NONE", /* [D_NONE] */
};
int
Rconv(Fmt *fp)
{
char str[STRINGSZ];
int r;
r = va_arg(fp->args, int);
if(r < 0 || r >= nelem(regstr) || regstr[r] == nil) {
snprint(str, sizeof(str), "BAD_R(%d)", r);
return fmtstrcpy(fp, str);
}
return fmtstrcpy(fp, regstr[r]);
}
int
Aconv(Fmt *fp)
{
int i;
i = va_arg(fp->args, int);
return fmtstrcpy(fp, anames[i]);
}
int
Yconv(Fmt *fp)
{
int i, c;
char str[30], *p, *a;
a = va_arg(fp->args, char*);
p = str;
for(i=0; i<sconsize; i++) {
c = a[i] & 0xff;
if((c >= 'a' && c <= 'z') ||
(c >= 'A' && c <= 'Z') ||
(c >= '0' && c <= '9')) {
*p++ = c;
continue;
}
*p++ = '\\';
switch(c) {
default:
if(c < 040 || c >= 0177)
break; /* not portable */
p[-1] = c;
continue;
case 0:
*p++ = 'z';
continue;
case '\\':
case '"':
*p++ = c;
continue;
case '\n':
*p++ = 'n';
continue;
case '\t':
*p++ = 't';
continue;
}
*p++ = (c>>6) + '0';
*p++ = ((c>>3) & 7) + '0';
*p++ = (c & 7) + '0';
}
*p = 0;
return fmtstrcpy(fp, str);
}
| src/cmd/8g/list.c | 0 | https://github.com/golang/go/commit/a45c54d1a5370f8138e7988f2a64562196566eaa | [
0.03359673544764519,
0.003226321656256914,
0.00016689894255250692,
0.00017513323109596968,
0.008340147323906422
] |
{
"id": 5,
"code_window": [
"\tv[n] = p;\n",
"}\n",
"\n",
"func run(argv []string, flag int) (ok bool) {\n"
],
"labels": [
"add",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t*vp = v;\n"
],
"file_path": "src/cmd/gobuild/util.go",
"type": "add",
"edit_start_line_idx": 86
} | // $G $D/$F.go && $L $F.$A && ./$A.out
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package main
func
main()
{
print("hello world\n");
}
| test/ken/simpprint.go | 0 | https://github.com/golang/go/commit/a45c54d1a5370f8138e7988f2a64562196566eaa | [
0.0001803339837351814,
0.00017565814778208733,
0.00017098229727707803,
0.00017565814778208733,
0.000004675843229051679
] |
{
"id": 1,
"code_window": [
"\treturn strings.HasSuffix(user, \",\"+l.UserDNSearchBaseDN)\n",
"}\n",
"\n",
"// GetNonExistentUserDistNames - find user accounts (DNs) that are no longer\n",
"// present in the LDAP server.\n",
"func (l *Config) GetNonExistentUserDistNames(userDistNames []string) ([]string, error) {\n",
"\tif !l.isUsingLookupBind {\n",
"\t\treturn nil, errors.New(\"current LDAP configuration does not permit looking for expired user accounts\")\n",
"\t}\n",
"\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"replace",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"// GetNonEligibleUserDistNames - find user accounts (DNs) that are no longer\n",
"// present in the LDAP server or do not meet filter criteria anymore\n",
"func (l *Config) GetNonEligibleUserDistNames(userDistNames []string) ([]string, error) {\n"
],
"file_path": "internal/config/identity/ldap/config.go",
"type": "replace",
"edit_start_line_idx": 480
} | // Copyright (c) 2015-2021 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package ldap
import (
"crypto/tls"
"crypto/x509"
"errors"
"fmt"
"math/rand"
"net"
"strconv"
"strings"
"time"
ldap "github.com/go-ldap/ldap/v3"
"github.com/minio/minio-go/v7/pkg/set"
"github.com/minio/minio/internal/auth"
"github.com/minio/minio/internal/config"
"github.com/minio/minio/internal/logger"
"github.com/minio/pkg/env"
)
const (
defaultLDAPExpiry = time.Hour * 1
dnDelimiter = ";"
minLDAPExpiry time.Duration = 15 * time.Minute
maxLDAPExpiry time.Duration = 365 * 24 * time.Hour
)
// Config contains AD/LDAP server connectivity information.
type Config struct {
Enabled bool `json:"enabled"`
// E.g. "ldap.minio.io:636"
ServerAddr string `json:"serverAddr"`
// STS credentials expiry duration
STSExpiryDuration string `json:"stsExpiryDuration"`
// Format string for usernames
UsernameFormat string `json:"usernameFormat"`
UsernameFormats []string `json:"-"`
// User DN search parameters
UserDNSearchBaseDN string `json:"userDNSearchBaseDN"`
UserDNSearchFilter string `json:"userDNSearchFilter"`
// Group search parameters
GroupSearchBaseDistName string `json:"groupSearchBaseDN"`
GroupSearchBaseDistNames []string `json:"-"`
GroupSearchFilter string `json:"groupSearchFilter"`
// Lookup bind LDAP service account
LookupBindDN string `json:"lookupBindDN"`
LookupBindPassword string `json:"lookupBindPassword"`
stsExpiryDuration time.Duration // contains converted value
tlsSkipVerify bool // allows skipping TLS verification
serverInsecure bool // allows plain text connection to LDAP server
serverStartTLS bool // allows using StartTLS connection to LDAP server
isUsingLookupBind bool
rootCAs *x509.CertPool
}
// LDAP keys and envs.
const (
ServerAddr = "server_addr"
STSExpiry = "sts_expiry"
LookupBindDN = "lookup_bind_dn"
LookupBindPassword = "lookup_bind_password"
UserDNSearchBaseDN = "user_dn_search_base_dn"
UserDNSearchFilter = "user_dn_search_filter"
UsernameFormat = "username_format"
GroupSearchFilter = "group_search_filter"
GroupSearchBaseDN = "group_search_base_dn"
TLSSkipVerify = "tls_skip_verify"
ServerInsecure = "server_insecure"
ServerStartTLS = "server_starttls"
EnvServerAddr = "MINIO_IDENTITY_LDAP_SERVER_ADDR"
EnvSTSExpiry = "MINIO_IDENTITY_LDAP_STS_EXPIRY"
EnvTLSSkipVerify = "MINIO_IDENTITY_LDAP_TLS_SKIP_VERIFY"
EnvServerInsecure = "MINIO_IDENTITY_LDAP_SERVER_INSECURE"
EnvServerStartTLS = "MINIO_IDENTITY_LDAP_SERVER_STARTTLS"
EnvUsernameFormat = "MINIO_IDENTITY_LDAP_USERNAME_FORMAT"
EnvUserDNSearchBaseDN = "MINIO_IDENTITY_LDAP_USER_DN_SEARCH_BASE_DN"
EnvUserDNSearchFilter = "MINIO_IDENTITY_LDAP_USER_DN_SEARCH_FILTER"
EnvGroupSearchFilter = "MINIO_IDENTITY_LDAP_GROUP_SEARCH_FILTER"
EnvGroupSearchBaseDN = "MINIO_IDENTITY_LDAP_GROUP_SEARCH_BASE_DN"
EnvLookupBindDN = "MINIO_IDENTITY_LDAP_LOOKUP_BIND_DN"
EnvLookupBindPassword = "MINIO_IDENTITY_LDAP_LOOKUP_BIND_PASSWORD"
)
var removedKeys = []string{
"username_search_filter",
"username_search_base_dn",
"group_name_attribute",
}
// DefaultKVS - default config for LDAP config
var (
DefaultKVS = config.KVS{
config.KV{
Key: ServerAddr,
Value: "",
},
config.KV{
Key: UsernameFormat,
Value: "",
},
config.KV{
Key: UserDNSearchBaseDN,
Value: "",
},
config.KV{
Key: UserDNSearchFilter,
Value: "",
},
config.KV{
Key: GroupSearchFilter,
Value: "",
},
config.KV{
Key: GroupSearchBaseDN,
Value: "",
},
config.KV{
Key: STSExpiry,
Value: "1h",
},
config.KV{
Key: TLSSkipVerify,
Value: config.EnableOff,
},
config.KV{
Key: ServerInsecure,
Value: config.EnableOff,
},
config.KV{
Key: ServerStartTLS,
Value: config.EnableOff,
},
config.KV{
Key: LookupBindDN,
Value: "",
},
config.KV{
Key: LookupBindPassword,
Value: "",
},
}
)
func getGroups(conn *ldap.Conn, sreq *ldap.SearchRequest) ([]string, error) {
var groups []string
sres, err := conn.Search(sreq)
if err != nil {
// Check if there is no matching result and return empty slice.
// Ref: https://ldap.com/ldap-result-code-reference/
if ldap.IsErrorWithCode(err, 32) {
return nil, nil
}
return nil, err
}
for _, entry := range sres.Entries {
// We only queried one attribute,
// so we only look up the first one.
groups = append(groups, entry.DN)
}
return groups, nil
}
func (l *Config) lookupBind(conn *ldap.Conn) error {
var err error
if l.LookupBindPassword == "" {
err = conn.UnauthenticatedBind(l.LookupBindDN)
} else {
err = conn.Bind(l.LookupBindDN, l.LookupBindPassword)
}
if ldap.IsErrorWithCode(err, 49) {
return fmt.Errorf("LDAP Lookup Bind user invalid credentials error: %w", err)
}
return err
}
// usernameFormatsBind - Iterates over all given username formats and expects
// that only one will succeed if the credentials are valid. The succeeding
// bindDN is returned or an error.
//
// In the rare case that multiple username formats succeed, implying that two
// (or more) distinct users in the LDAP directory have the same username and
// password, we return an error as we cannot identify the account intended by
// the user.
func (l *Config) usernameFormatsBind(conn *ldap.Conn, username, password string) (string, error) {
var bindDistNames []string
var errs = make([]error, len(l.UsernameFormats))
var successCount = 0
for i, usernameFormat := range l.UsernameFormats {
bindDN := fmt.Sprintf(usernameFormat, username)
// Bind with user credentials to validate the password
errs[i] = conn.Bind(bindDN, password)
if errs[i] == nil {
bindDistNames = append(bindDistNames, bindDN)
successCount++
} else if !ldap.IsErrorWithCode(errs[i], 49) {
return "", fmt.Errorf("LDAP Bind request failed with unexpected error: %w", errs[i])
}
}
if successCount == 0 {
var errStrings []string
for _, err := range errs {
if err != nil {
errStrings = append(errStrings, err.Error())
}
}
outErr := fmt.Sprintf("All username formats failed due to invalid credentials: %s", strings.Join(errStrings, "; "))
return "", errors.New(outErr)
}
if successCount > 1 {
successDistNames := strings.Join(bindDistNames, ", ")
errMsg := fmt.Sprintf("Multiple username formats succeeded - ambiguous user login (succeeded for: %s)", successDistNames)
return "", errors.New(errMsg)
}
return bindDistNames[0], nil
}
// lookupUserDN searches for the DN of the user given their username. conn is
// assumed to be using the lookup bind service account. It is required that the
// search result in at most one result.
func (l *Config) lookupUserDN(conn *ldap.Conn, username string) (string, error) {
filter := strings.Replace(l.UserDNSearchFilter, "%s", ldap.EscapeFilter(username), -1)
searchRequest := ldap.NewSearchRequest(
l.UserDNSearchBaseDN,
ldap.ScopeWholeSubtree, ldap.NeverDerefAliases, 0, 0, false,
filter,
[]string{}, // only need DN, so no pass no attributes here
nil,
)
searchResult, err := conn.Search(searchRequest)
if err != nil {
return "", err
}
if len(searchResult.Entries) == 0 {
return "", fmt.Errorf("User DN for %s not found", username)
}
if len(searchResult.Entries) != 1 {
return "", fmt.Errorf("Multiple DNs for %s found - please fix the search filter", username)
}
return searchResult.Entries[0].DN, nil
}
func (l *Config) searchForUserGroups(conn *ldap.Conn, username, bindDN string) ([]string, error) {
// User groups lookup.
var groups []string
if l.GroupSearchFilter != "" {
for _, groupSearchBase := range l.GroupSearchBaseDistNames {
filter := strings.Replace(l.GroupSearchFilter, "%s", ldap.EscapeFilter(username), -1)
filter = strings.Replace(filter, "%d", ldap.EscapeFilter(bindDN), -1)
searchRequest := ldap.NewSearchRequest(
groupSearchBase,
ldap.ScopeWholeSubtree, ldap.NeverDerefAliases, 0, 0, false,
filter,
nil,
nil,
)
var newGroups []string
newGroups, err := getGroups(conn, searchRequest)
if err != nil {
errRet := fmt.Errorf("Error finding groups of %s: %w", bindDN, err)
return nil, errRet
}
groups = append(groups, newGroups...)
}
}
return groups, nil
}
// LookupUserDN searches for the full DN and groups of a given username
func (l *Config) LookupUserDN(username string) (string, []string, error) {
if !l.isUsingLookupBind {
return "", nil, errors.New("current lookup mode does not support searching for User DN")
}
conn, err := l.Connect()
if err != nil {
return "", nil, err
}
defer conn.Close()
// Bind to the lookup user account
if err = l.lookupBind(conn); err != nil {
return "", nil, err
}
// Lookup user DN
bindDN, err := l.lookupUserDN(conn, username)
if err != nil {
errRet := fmt.Errorf("Unable to find user DN: %w", err)
return "", nil, errRet
}
groups, err := l.searchForUserGroups(conn, username, bindDN)
if err != nil {
return "", nil, err
}
return bindDN, groups, nil
}
// Bind - binds to ldap, searches LDAP and returns the distinguished name of the
// user and the list of groups.
func (l *Config) Bind(username, password string) (string, []string, error) {
conn, err := l.Connect()
if err != nil {
return "", nil, err
}
defer conn.Close()
var bindDN string
if l.isUsingLookupBind {
// Bind to the lookup user account
if err = l.lookupBind(conn); err != nil {
return "", nil, err
}
// Lookup user DN
bindDN, err = l.lookupUserDN(conn, username)
if err != nil {
errRet := fmt.Errorf("Unable to find user DN: %w", err)
return "", nil, errRet
}
// Authenticate the user credentials.
err = conn.Bind(bindDN, password)
if err != nil {
errRet := fmt.Errorf("LDAP auth failed for DN %s: %w", bindDN, err)
return "", nil, errRet
}
// Bind to the lookup user account again to perform group search.
if err = l.lookupBind(conn); err != nil {
return "", nil, err
}
} else {
// Verify login credentials by checking the username formats.
bindDN, err = l.usernameFormatsBind(conn, username, password)
if err != nil {
return "", nil, err
}
// Bind to the successful bindDN again.
err = conn.Bind(bindDN, password)
if err != nil {
errRet := fmt.Errorf("LDAP conn failed though auth for DN %s succeeded: %w", bindDN, err)
return "", nil, errRet
}
}
// User groups lookup.
groups, err := l.searchForUserGroups(conn, username, bindDN)
if err != nil {
return "", nil, err
}
return bindDN, groups, nil
}
// Connect connect to ldap server.
func (l *Config) Connect() (ldapConn *ldap.Conn, err error) {
if l == nil {
return nil, errors.New("LDAP is not configured")
}
serverHost, _, err := net.SplitHostPort(l.ServerAddr)
if err != nil {
serverHost = l.ServerAddr
// User default LDAP port if none specified "636"
l.ServerAddr = net.JoinHostPort(l.ServerAddr, "636")
}
if l.serverInsecure {
return ldap.Dial("tcp", l.ServerAddr)
}
tlsConfig := &tls.Config{
InsecureSkipVerify: l.tlsSkipVerify,
RootCAs: l.rootCAs,
ServerName: serverHost,
}
if l.serverStartTLS {
conn, err := ldap.Dial("tcp", l.ServerAddr)
if err != nil {
return nil, err
}
err = conn.StartTLS(tlsConfig)
return conn, err
}
return ldap.DialTLS("tcp", l.ServerAddr, tlsConfig)
}
// GetExpiryDuration - return parsed expiry duration.
func (l Config) GetExpiryDuration(dsecs string) (time.Duration, error) {
if dsecs == "" {
return l.stsExpiryDuration, nil
}
d, err := strconv.Atoi(dsecs)
if err != nil {
return 0, auth.ErrInvalidDuration
}
dur := time.Duration(d) * time.Second
if dur < minLDAPExpiry || dur > maxLDAPExpiry {
return 0, auth.ErrInvalidDuration
}
return dur, nil
}
func (l Config) testConnection() error {
conn, err := l.Connect()
if err != nil {
return fmt.Errorf("Error creating connection to LDAP server: %w", err)
}
defer conn.Close()
if l.isUsingLookupBind {
if err = l.lookupBind(conn); err != nil {
return fmt.Errorf("Error connecting as LDAP Lookup Bind user: %w", err)
}
return nil
}
// Generate some random user credentials for username formats mode test.
username := fmt.Sprintf("sometestuser%09d", rand.Int31n(1000000000))
charset := []byte("abcdefghijklmnopqrstuvwxyz" +
"ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789")
rand.Shuffle(len(charset), func(i, j int) {
charset[i], charset[j] = charset[j], charset[i]
})
password := string(charset[:20])
_, err = l.usernameFormatsBind(conn, username, password)
if err == nil {
// We don't expect to successfully guess a credential in this
// way.
return fmt.Errorf("Unexpected random credentials success for user=%s password=%s", username, password)
} else if strings.HasPrefix(err.Error(), "All username formats failed due to invalid credentials: ") {
return nil
}
return fmt.Errorf("LDAP connection test error: %w", err)
}
// IsLDAPUserDN determines if the given string could be a user DN from LDAP.
func (l Config) IsLDAPUserDN(user string) bool {
return strings.HasSuffix(user, ","+l.UserDNSearchBaseDN)
}
// GetNonExistentUserDistNames - find user accounts (DNs) that are no longer
// present in the LDAP server.
func (l *Config) GetNonExistentUserDistNames(userDistNames []string) ([]string, error) {
if !l.isUsingLookupBind {
return nil, errors.New("current LDAP configuration does not permit looking for expired user accounts")
}
conn, err := l.Connect()
if err != nil {
return nil, err
}
defer conn.Close()
// Bind to the lookup user account
if err = l.lookupBind(conn); err != nil {
return nil, err
}
nonExistentUsers := []string{}
for _, dn := range userDistNames {
searchRequest := ldap.NewSearchRequest(
dn,
ldap.ScopeBaseObject, ldap.NeverDerefAliases, 0, 0, false,
"(objectclass=*)",
[]string{}, // only need DN, so no pass no attributes here
nil,
)
searchResult, err := conn.Search(searchRequest)
if err != nil {
// Object does not exist error?
if ldap.IsErrorWithCode(err, 32) {
nonExistentUsers = append(nonExistentUsers, dn)
continue
}
return nil, err
}
if len(searchResult.Entries) == 0 {
// DN was not found - this means this user account is
// expired.
nonExistentUsers = append(nonExistentUsers, dn)
}
}
return nonExistentUsers, nil
}
// LookupGroupMemberships - for each DN finds the set of LDAP groups they are a
// member of.
func (l *Config) LookupGroupMemberships(userDistNames []string, userDNToUsernameMap map[string]string) (map[string]set.StringSet, error) {
if !l.isUsingLookupBind {
return nil, errors.New("current LDAP configuration does not permit this lookup")
}
conn, err := l.Connect()
if err != nil {
return nil, err
}
defer conn.Close()
// Bind to the lookup user account
if err = l.lookupBind(conn); err != nil {
return nil, err
}
res := make(map[string]set.StringSet, len(userDistNames))
for _, userDistName := range userDistNames {
username := userDNToUsernameMap[userDistName]
groups, err := l.searchForUserGroups(conn, username, userDistName)
if err != nil {
return nil, err
}
res[userDistName] = set.CreateStringSet(groups...)
}
return res, nil
}
// EnabledWithLookupBind - checks if ldap IDP is enabled in lookup bind mode.
func (l Config) EnabledWithLookupBind() bool {
return l.Enabled && l.isUsingLookupBind
}
// Enabled returns if jwks is enabled.
func Enabled(kvs config.KVS) bool {
return kvs.Get(ServerAddr) != ""
}
// Lookup - initializes LDAP config, overrides config, if any ENV values are set.
func Lookup(kvs config.KVS, rootCAs *x509.CertPool) (l Config, err error) {
l = Config{}
// Purge all removed keys first
for _, k := range removedKeys {
kvs.Delete(k)
}
if err = config.CheckValidKeys(config.IdentityLDAPSubSys, kvs, DefaultKVS); err != nil {
return l, err
}
ldapServer := env.Get(EnvServerAddr, kvs.Get(ServerAddr))
if ldapServer == "" {
return l, nil
}
l.Enabled = true
l.rootCAs = rootCAs
l.ServerAddr = ldapServer
l.stsExpiryDuration = defaultLDAPExpiry
if v := env.Get(EnvSTSExpiry, kvs.Get(STSExpiry)); v != "" {
logger.Info("DEPRECATION WARNING: Support for configuring the default LDAP credentials expiry duration will be removed by October 2021. Please use the `DurationSeconds` parameter in the LDAP STS API instead.")
expDur, err := time.ParseDuration(v)
if err != nil {
return l, errors.New("LDAP expiry time err:" + err.Error())
}
if expDur < minLDAPExpiry {
return l, fmt.Errorf("LDAP expiry time must be at least %s", minLDAPExpiry)
}
if expDur > maxLDAPExpiry {
return l, fmt.Errorf("LDAP expiry time may not exceed %s", maxLDAPExpiry)
}
l.STSExpiryDuration = v
l.stsExpiryDuration = expDur
}
// LDAP connection configuration
if v := env.Get(EnvServerInsecure, kvs.Get(ServerInsecure)); v != "" {
l.serverInsecure, err = config.ParseBool(v)
if err != nil {
return l, err
}
}
if v := env.Get(EnvServerStartTLS, kvs.Get(ServerStartTLS)); v != "" {
l.serverStartTLS, err = config.ParseBool(v)
if err != nil {
return l, err
}
}
if v := env.Get(EnvTLSSkipVerify, kvs.Get(TLSSkipVerify)); v != "" {
l.tlsSkipVerify, err = config.ParseBool(v)
if err != nil {
return l, err
}
}
// Lookup bind user configuration
lookupBindDN := env.Get(EnvLookupBindDN, kvs.Get(LookupBindDN))
lookupBindPassword := env.Get(EnvLookupBindPassword, kvs.Get(LookupBindPassword))
if lookupBindDN != "" {
l.LookupBindDN = lookupBindDN
l.LookupBindPassword = lookupBindPassword
l.isUsingLookupBind = true
// User DN search configuration
userDNSearchBaseDN := env.Get(EnvUserDNSearchBaseDN, kvs.Get(UserDNSearchBaseDN))
userDNSearchFilter := env.Get(EnvUserDNSearchFilter, kvs.Get(UserDNSearchFilter))
if userDNSearchFilter == "" || userDNSearchBaseDN == "" {
return l, errors.New("In lookup bind mode, userDN search base DN and userDN search filter are both required")
}
l.UserDNSearchBaseDN = userDNSearchBaseDN
l.UserDNSearchFilter = userDNSearchFilter
}
// Username format configuration.
if v := env.Get(EnvUsernameFormat, kvs.Get(UsernameFormat)); v != "" {
if !strings.Contains(v, "%s") {
return l, errors.New("LDAP username format does not support '%s' substitution")
}
l.UsernameFormats = strings.Split(v, dnDelimiter)
}
if len(l.UsernameFormats) > 0 {
logger.Info("DEPRECATION WARNING: Support for %s will be removed by October 2021, please migrate your LDAP settings to lookup bind mode", UsernameFormat)
}
// Either lookup bind mode or username format is supported, but not both.
if l.isUsingLookupBind && len(l.UsernameFormats) > 0 {
return l, errors.New("Lookup Bind mode and Username Format mode are not supported at the same time")
}
// At least one of bind mode or username format must be used.
if !l.isUsingLookupBind && len(l.UsernameFormats) == 0 {
return l, errors.New("Either Lookup Bind mode or Username Format mode is required")
}
// Test connection to LDAP server.
if err := l.testConnection(); err != nil {
return l, fmt.Errorf("Connection test for LDAP server failed: %w", err)
}
// Group search params configuration
grpSearchFilter := env.Get(EnvGroupSearchFilter, kvs.Get(GroupSearchFilter))
grpSearchBaseDN := env.Get(EnvGroupSearchBaseDN, kvs.Get(GroupSearchBaseDN))
// Either all group params must be set or none must be set.
if (grpSearchFilter != "" && grpSearchBaseDN == "") || (grpSearchFilter == "" && grpSearchBaseDN != "") {
return l, errors.New("All group related parameters must be set")
}
if grpSearchFilter != "" {
l.GroupSearchFilter = grpSearchFilter
l.GroupSearchBaseDistName = grpSearchBaseDN
l.GroupSearchBaseDistNames = strings.Split(l.GroupSearchBaseDistName, dnDelimiter)
}
return l, nil
}
| internal/config/identity/ldap/config.go | 1 | https://github.com/minio/minio/commit/47dfc1b1b09a3ef6bc21d7f39636fbbe81e2c16f | [
0.9963685274124146,
0.24948731064796448,
0.00016592862084507942,
0.024017056450247765,
0.3735116720199585
] |
{
"id": 1,
"code_window": [
"\treturn strings.HasSuffix(user, \",\"+l.UserDNSearchBaseDN)\n",
"}\n",
"\n",
"// GetNonExistentUserDistNames - find user accounts (DNs) that are no longer\n",
"// present in the LDAP server.\n",
"func (l *Config) GetNonExistentUserDistNames(userDistNames []string) ([]string, error) {\n",
"\tif !l.isUsingLookupBind {\n",
"\t\treturn nil, errors.New(\"current LDAP configuration does not permit looking for expired user accounts\")\n",
"\t}\n",
"\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"replace",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"// GetNonEligibleUserDistNames - find user accounts (DNs) that are no longer\n",
"// present in the LDAP server or do not meet filter criteria anymore\n",
"func (l *Config) GetNonEligibleUserDistNames(userDistNames []string) ([]string, error) {\n"
],
"file_path": "internal/config/identity/ldap/config.go",
"type": "replace",
"edit_start_line_idx": 480
} | # MinIO S3 Gateway [](https://slack.min.io)
MinIO S3 Gateway adds MinIO features like MinIO Console and disk caching to AWS S3 or any other AWS S3 compatible service.
## Run MinIO Gateway for AWS S3
As a prerequisite to run MinIO S3 gateway, you need valid AWS S3 access key and secret key by default. Optionally you can also set custom access/secret key, when you have rotating AWS IAM credentials or AWS credentials through environment variables (i.e. AWS_ACCESS_KEY_ID)
### Using Docker
```
podman run \
-p 9000:9000 \
-p 9001:9001 \
--name minio-s3 \
-e "MINIO_ROOT_USER=aws_s3_access_key" \
-e "MINIO_ROOT_PASSWORD=aws_s3_secret_key" \
minio/minio gateway s3 --console-address ":9001"
```
### Using Binary
```
export MINIO_ROOT_USER=aws_s3_access_key
export MINIO_ROOT_PASSWORD=aws_s3_secret_key
minio gateway s3
```
### Using Binary in EC2
Using IAM rotating credentials for AWS S3
If you are using an S3 enabled IAM role on an EC2 instance for S3 access, MinIO will still require env vars MINIO_ROOT_USER and MINIO_ROOT_PASSWORD to be set for its internal use. These may be set to any value which meets the length requirements. Access key length should be at least 3, and secret key length at least 8 characters.
```
export MINIO_ROOT_USER=custom_access_key
export MINIO_ROOT_PASSWORD=custom_secret_key
minio gateway s3
```
MinIO gateway will automatically look for list of credential styles in following order, if your backend URL is AWS S3.
- AWS env vars (i.e. AWS_ACCESS_KEY_ID)
- AWS creds file (i.e. AWS_SHARED_CREDENTIALS_FILE or ~/.aws/credentials)
- IAM profile based credentials. (performs an HTTP call to a pre-defined endpoint, only valid inside configured ec2 instances)
Minimum permissions required if you wish to provide restricted access with your AWS credentials, please make sure you have following IAM policies attached for your AWS user or roles.
```json
{
"Version": "2012-10-17",
"Statement": [
{
"Sid": "readonly",
"Effect": "Allow",
"Action": [
"s3:GetObject"
],
"Resource": "arn:aws:s3:::testbucket/*"
},
{
"Sid": "readonly",
"Effect": "Allow",
"Action": [
"s3:GetBucketPolicy",
"s3:HeadBucket",
"s3:ListBucket"
],
"Resource": "arn:aws:s3:::testbucket"
}
]
}
```
## Run MinIO Gateway for AWS S3 compatible services
As a prerequisite to run MinIO S3 gateway on an AWS S3 compatible service, you need valid access key, secret key and service endpoint.
## Run MinIO Gateway with double-encryption
MinIO gateway to S3 supports encryption of data at rest. Three types of encryption modes are supported
- encryption can be set to ``pass-through`` to backend only for SSE-S3, SSE-C is not allowed passthrough.
- ``single encryption`` (at the gateway)
- ``double encryption`` (single encryption at gateway and pass through to backend)
This can be specified by setting MINIO_GATEWAY_SSE environment variable. If MINIO_GATEWAY_SSE and KMS are not setup, all encryption headers are passed through to the backend. If KMS environment variables are set up, ``single encryption`` is automatically performed at the gateway and encrypted object is saved at the backend.
To specify ``double encryption``, MINIO_GATEWAY_SSE environment variable needs to be set to "s3" for sse-s3
and "c" for sse-c encryption. More than one encryption option can be set, delimited by ";". Objects are encrypted at the gateway and the gateway also does a pass-through to backend. Note that in the case of SSE-C encryption, gateway derives a unique SSE-C key for pass through from the SSE-C client key using a key derivation function (KDF).
```sh
curl -sSL --tlsv1.2 \
-O 'https://raw.githubusercontent.com/minio/kes/master/root.key' \
-O 'https://raw.githubusercontent.com/minio/kes/master/root.cert'
```
```sh
export MINIO_GATEWAY_SSE="s3;c"
export MINIO_KMS_KES_ENDPOINT=https://play.min.io:7373
export MINIO_KMS_KES_KEY_FILE=root.key
export MINIO_KMS_KES_CERT_FILE=root.cert
export MINIO_KMS_KES_KEY_NAME=my-minio-key
minio gateway s3
```
### Using Docker
```
podman run -p 9000:9000 --name minio-s3 \
-e "MINIO_ROOT_USER=access_key" \
-e "MINIO_ROOT_PASSWORD=secret_key" \
minio/minio gateway s3 https://s3_compatible_service_endpoint:port
```
### Using Binary
```
export MINIO_ROOT_USER=access_key
export MINIO_ROOT_PASSWORD=secret_key
minio gateway s3 https://s3_compatible_service_endpoint:port
```
## MinIO Caching
MinIO edge caching allows storing content closer to the applications. Frequently accessed objects are stored in a local disk based cache. Edge caching with MinIO gateway feature allows
- Dramatic improvements for time to first byte for any object.
- Avoid S3 [data transfer charges](https://aws.amazon.com/s3/pricing/).
Refer [this document](https://docs.min.io/docs/minio-disk-cache-guide.html) to get started with MinIO Caching.
## MinIO Console
MinIO Gateway comes with an embedded web based object browser. Point your web browser to http://127.0.0.1:9000 to ensure that your server has started successfully.
| Dashboard | Creating a bucket |
| ------------- | ------------- |
|  |  |
With MinIO S3 gateway, you can use MinIO Console to explore AWS S3 based objects.
### Known limitations
- Bucket notification APIs are not supported.
## Explore Further
- [`mc` command-line interface](https://docs.min.io/docs/minio-client-quickstart-guide)
- [`aws` command-line interface](https://docs.min.io/docs/aws-cli-with-minio)
- [`minio-go` Go SDK](https://docs.min.io/docs/golang-client-quickstart-guide)
| docs/gateway/s3.md | 0 | https://github.com/minio/minio/commit/47dfc1b1b09a3ef6bc21d7f39636fbbe81e2c16f | [
0.00017040646343957633,
0.0001654619991313666,
0.00016062558279372752,
0.0001649244804866612,
0.000003204797167200013
] |
{
"id": 1,
"code_window": [
"\treturn strings.HasSuffix(user, \",\"+l.UserDNSearchBaseDN)\n",
"}\n",
"\n",
"// GetNonExistentUserDistNames - find user accounts (DNs) that are no longer\n",
"// present in the LDAP server.\n",
"func (l *Config) GetNonExistentUserDistNames(userDistNames []string) ([]string, error) {\n",
"\tif !l.isUsingLookupBind {\n",
"\t\treturn nil, errors.New(\"current LDAP configuration does not permit looking for expired user accounts\")\n",
"\t}\n",
"\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"replace",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"// GetNonEligibleUserDistNames - find user accounts (DNs) that are no longer\n",
"// present in the LDAP server or do not meet filter criteria anymore\n",
"func (l *Config) GetNonEligibleUserDistNames(userDistNames []string) ([]string, error) {\n"
],
"file_path": "internal/config/identity/ldap/config.go",
"type": "replace",
"edit_start_line_idx": 480
} | // Copyright (c) 2015-2021 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package cmd
import (
"context"
"errors"
"fmt"
"io"
"net/http"
"strconv"
"strings"
"sync"
"sync/atomic"
"time"
objectlock "github.com/minio/minio/internal/bucket/object/lock"
"github.com/minio/minio/internal/color"
"github.com/minio/minio/internal/config/cache"
"github.com/minio/minio/internal/hash"
xhttp "github.com/minio/minio/internal/http"
"github.com/minio/minio/internal/logger"
"github.com/minio/minio/internal/sync/errgroup"
"github.com/minio/pkg/wildcard"
)
const (
cacheBlkSize = 1 << 20
cacheGCInterval = time.Minute * 30
writeBackStatusHeader = ReservedMetadataPrefixLower + "write-back-status"
writeBackRetryHeader = ReservedMetadataPrefixLower + "write-back-retry"
)
type cacheCommitStatus string
const (
// CommitPending - cache writeback with backend is pending.
CommitPending cacheCommitStatus = "pending"
// CommitComplete - cache writeback completed ok.
CommitComplete cacheCommitStatus = "complete"
// CommitFailed - cache writeback needs a retry.
CommitFailed cacheCommitStatus = "failed"
)
// String returns string representation of status
func (s cacheCommitStatus) String() string {
return string(s)
}
// CacheStorageInfo - represents total, free capacity of
// underlying cache storage.
type CacheStorageInfo struct {
Total uint64 // Total cache disk space.
Free uint64 // Free cache available space.
}
// CacheObjectLayer implements primitives for cache object API layer.
type CacheObjectLayer interface {
// Object operations.
GetObjectNInfo(ctx context.Context, bucket, object string, rs *HTTPRangeSpec, h http.Header, lockType LockType, opts ObjectOptions) (gr *GetObjectReader, err error)
GetObjectInfo(ctx context.Context, bucket, object string, opts ObjectOptions) (objInfo ObjectInfo, err error)
DeleteObject(ctx context.Context, bucket, object string, opts ObjectOptions) (ObjectInfo, error)
DeleteObjects(ctx context.Context, bucket string, objects []ObjectToDelete, opts ObjectOptions) ([]DeletedObject, []error)
PutObject(ctx context.Context, bucket, object string, data *PutObjReader, opts ObjectOptions) (objInfo ObjectInfo, err error)
CopyObject(ctx context.Context, srcBucket, srcObject, destBucket, destObject string, srcInfo ObjectInfo, srcOpts, dstOpts ObjectOptions) (objInfo ObjectInfo, err error)
// Storage operations.
StorageInfo(ctx context.Context) CacheStorageInfo
CacheStats() *CacheStats
}
// Abstracts disk caching - used by the S3 layer
type cacheObjects struct {
// slice of cache drives
cache []*diskCache
// file path patterns to exclude from cache
exclude []string
// number of accesses after which to cache an object
after int
// commit objects in async manner
commitWriteback bool
// if true migration is in progress from v1 to v2
migrating bool
// mutex to protect migration bool
migMutex sync.Mutex
// retry queue for writeback cache mode to reattempt upload to backend
wbRetryCh chan ObjectInfo
// Cache stats
cacheStats *CacheStats
InnerGetObjectNInfoFn func(ctx context.Context, bucket, object string, rs *HTTPRangeSpec, h http.Header, lockType LockType, opts ObjectOptions) (gr *GetObjectReader, err error)
InnerGetObjectInfoFn func(ctx context.Context, bucket, object string, opts ObjectOptions) (objInfo ObjectInfo, err error)
InnerDeleteObjectFn func(ctx context.Context, bucket, object string, opts ObjectOptions) (objInfo ObjectInfo, err error)
InnerPutObjectFn func(ctx context.Context, bucket, object string, data *PutObjReader, opts ObjectOptions) (objInfo ObjectInfo, err error)
InnerCopyObjectFn func(ctx context.Context, srcBucket, srcObject, destBucket, destObject string, srcInfo ObjectInfo, srcOpts, dstOpts ObjectOptions) (objInfo ObjectInfo, err error)
}
func (c *cacheObjects) incHitsToMeta(ctx context.Context, dcache *diskCache, bucket, object string, size int64, eTag string, rs *HTTPRangeSpec) error {
metadata := map[string]string{"etag": eTag}
return dcache.SaveMetadata(ctx, bucket, object, metadata, size, rs, "", true)
}
// Backend metadata could have changed through server side copy - reset cache metadata if that is the case
func (c *cacheObjects) updateMetadataIfChanged(ctx context.Context, dcache *diskCache, bucket, object string, bkObjectInfo, cacheObjInfo ObjectInfo, rs *HTTPRangeSpec) error {
bkMeta := make(map[string]string, len(bkObjectInfo.UserDefined))
cacheMeta := make(map[string]string, len(cacheObjInfo.UserDefined))
for k, v := range bkObjectInfo.UserDefined {
if strings.HasPrefix(strings.ToLower(k), ReservedMetadataPrefixLower) {
// Do not need to send any internal metadata
continue
}
bkMeta[http.CanonicalHeaderKey(k)] = v
}
for k, v := range cacheObjInfo.UserDefined {
if strings.HasPrefix(strings.ToLower(k), ReservedMetadataPrefixLower) {
// Do not need to send any internal metadata
continue
}
cacheMeta[http.CanonicalHeaderKey(k)] = v
}
if !isMetadataSame(bkMeta, cacheMeta) ||
bkObjectInfo.ETag != cacheObjInfo.ETag ||
bkObjectInfo.ContentType != cacheObjInfo.ContentType ||
!bkObjectInfo.Expires.Equal(cacheObjInfo.Expires) {
return dcache.SaveMetadata(ctx, bucket, object, getMetadata(bkObjectInfo), bkObjectInfo.Size, nil, "", false)
}
return c.incHitsToMeta(ctx, dcache, bucket, object, cacheObjInfo.Size, cacheObjInfo.ETag, rs)
}
// DeleteObject clears cache entry if backend delete operation succeeds
func (c *cacheObjects) DeleteObject(ctx context.Context, bucket, object string, opts ObjectOptions) (objInfo ObjectInfo, err error) {
if objInfo, err = c.InnerDeleteObjectFn(ctx, bucket, object, opts); err != nil {
return
}
if c.isCacheExclude(bucket, object) || c.skipCache() {
return
}
dcache, cerr := c.getCacheLoc(bucket, object)
if cerr != nil {
return objInfo, cerr
}
dcache.Delete(ctx, bucket, object)
return
}
// DeleteObjects batch deletes objects in slice, and clears any cached entries
func (c *cacheObjects) DeleteObjects(ctx context.Context, bucket string, objects []ObjectToDelete, opts ObjectOptions) ([]DeletedObject, []error) {
errs := make([]error, len(objects))
objInfos := make([]ObjectInfo, len(objects))
for idx, object := range objects {
opts.VersionID = object.VersionID
objInfos[idx], errs[idx] = c.DeleteObject(ctx, bucket, object.ObjectName, opts)
}
deletedObjects := make([]DeletedObject, len(objInfos))
for idx := range errs {
if errs[idx] != nil {
continue
}
if objInfos[idx].DeleteMarker {
deletedObjects[idx] = DeletedObject{
DeleteMarker: objInfos[idx].DeleteMarker,
DeleteMarkerVersionID: objInfos[idx].VersionID,
}
continue
}
deletedObjects[idx] = DeletedObject{
ObjectName: objInfos[idx].Name,
VersionID: objInfos[idx].VersionID,
}
}
return deletedObjects, errs
}
// construct a metadata k-v map
func getMetadata(objInfo ObjectInfo) map[string]string {
metadata := make(map[string]string, len(objInfo.UserDefined)+4)
metadata["etag"] = objInfo.ETag
metadata["content-type"] = objInfo.ContentType
if objInfo.ContentEncoding != "" {
metadata["content-encoding"] = objInfo.ContentEncoding
}
if !objInfo.Expires.Equal(timeSentinel) {
metadata["expires"] = objInfo.Expires.Format(http.TimeFormat)
}
metadata["last-modified"] = objInfo.ModTime.Format(http.TimeFormat)
for k, v := range objInfo.UserDefined {
metadata[k] = v
}
return metadata
}
// marks cache hit
func (c *cacheObjects) incCacheStats(size int64) {
c.cacheStats.incHit()
c.cacheStats.incBytesServed(size)
}
func (c *cacheObjects) GetObjectNInfo(ctx context.Context, bucket, object string, rs *HTTPRangeSpec, h http.Header, lockType LockType, opts ObjectOptions) (gr *GetObjectReader, err error) {
if c.isCacheExclude(bucket, object) || c.skipCache() {
return c.InnerGetObjectNInfoFn(ctx, bucket, object, rs, h, lockType, opts)
}
var cc *cacheControl
var cacheObjSize int64
// fetch diskCache if object is currently cached or nearest available cache drive
dcache, err := c.getCacheToLoc(ctx, bucket, object)
if err != nil {
return c.InnerGetObjectNInfoFn(ctx, bucket, object, rs, h, lockType, opts)
}
cacheReader, numCacheHits, cacheErr := dcache.Get(ctx, bucket, object, rs, h, opts)
if cacheErr == nil {
cacheObjSize = cacheReader.ObjInfo.Size
if rs != nil {
if _, len, err := rs.GetOffsetLength(cacheObjSize); err == nil {
cacheObjSize = len
}
}
cc = cacheControlOpts(cacheReader.ObjInfo)
if cc != nil && (!cc.isStale(cacheReader.ObjInfo.ModTime) ||
cc.onlyIfCached) {
// This is a cache hit, mark it so
bytesServed := cacheReader.ObjInfo.Size
if rs != nil {
if _, len, err := rs.GetOffsetLength(bytesServed); err == nil {
bytesServed = len
}
}
c.cacheStats.incHit()
c.cacheStats.incBytesServed(bytesServed)
c.incHitsToMeta(ctx, dcache, bucket, object, cacheReader.ObjInfo.Size, cacheReader.ObjInfo.ETag, rs)
return cacheReader, nil
}
if cc != nil && cc.noStore {
cacheReader.Close()
c.cacheStats.incMiss()
bReader, err := c.InnerGetObjectNInfoFn(ctx, bucket, object, rs, h, lockType, opts)
bReader.ObjInfo.CacheLookupStatus = CacheHit
bReader.ObjInfo.CacheStatus = CacheMiss
return bReader, err
}
}
objInfo, err := c.InnerGetObjectInfoFn(ctx, bucket, object, opts)
if backendDownError(err) && cacheErr == nil {
c.incCacheStats(cacheObjSize)
return cacheReader, nil
} else if err != nil {
if cacheErr == nil {
cacheReader.Close()
}
if _, ok := err.(ObjectNotFound); ok {
if cacheErr == nil {
// Delete cached entry if backend object
// was deleted.
dcache.Delete(ctx, bucket, object)
}
}
c.cacheStats.incMiss()
return nil, err
}
if !objInfo.IsCacheable() {
if cacheErr == nil {
cacheReader.Close()
}
c.cacheStats.incMiss()
return c.InnerGetObjectNInfoFn(ctx, bucket, object, rs, h, lockType, opts)
}
// skip cache for objects with locks
objRetention := objectlock.GetObjectRetentionMeta(objInfo.UserDefined)
legalHold := objectlock.GetObjectLegalHoldMeta(objInfo.UserDefined)
if objRetention.Mode.Valid() || legalHold.Status.Valid() {
if cacheErr == nil {
cacheReader.Close()
}
c.cacheStats.incMiss()
return c.InnerGetObjectNInfoFn(ctx, bucket, object, rs, h, lockType, opts)
}
if cacheErr == nil {
// if ETag matches for stale cache entry, serve from cache
if cacheReader.ObjInfo.ETag == objInfo.ETag {
// Update metadata in case server-side copy might have changed object metadata
c.updateMetadataIfChanged(ctx, dcache, bucket, object, objInfo, cacheReader.ObjInfo, rs)
c.incCacheStats(cacheObjSize)
return cacheReader, nil
}
cacheReader.Close()
// Object is stale, so delete from cache
dcache.Delete(ctx, bucket, object)
}
// Reaching here implies cache miss
c.cacheStats.incMiss()
bkReader, bkErr := c.InnerGetObjectNInfoFn(ctx, bucket, object, rs, h, lockType, opts)
if bkErr != nil {
return bkReader, bkErr
}
// If object has less hits than configured cache after, just increment the hit counter
// but do not cache it.
if numCacheHits < c.after {
c.incHitsToMeta(ctx, dcache, bucket, object, objInfo.Size, objInfo.ETag, rs)
return bkReader, bkErr
}
// Record if cache has a hit that was invalidated by ETag verification
if cacheErr == nil {
bkReader.ObjInfo.CacheLookupStatus = CacheHit
}
// Check if we can add it without exceeding total cache size.
if !dcache.diskSpaceAvailable(objInfo.Size) {
return bkReader, bkErr
}
if rs != nil && !dcache.enableRange {
go func() {
// if range caching is disabled, download entire object.
rs = nil
// fill cache in the background for range GET requests
bReader, bErr := c.InnerGetObjectNInfoFn(GlobalContext, bucket, object, rs, h, lockType, opts)
if bErr != nil {
return
}
defer bReader.Close()
oi, _, _, err := dcache.statRange(GlobalContext, bucket, object, rs)
// avoid cache overwrite if another background routine filled cache
if err != nil || oi.ETag != bReader.ObjInfo.ETag {
// use a new context to avoid locker prematurely timing out operation when the GetObjectNInfo returns.
dcache.Put(GlobalContext, bucket, object, bReader, bReader.ObjInfo.Size, rs, ObjectOptions{
UserDefined: getMetadata(bReader.ObjInfo),
}, false)
return
}
}()
return bkReader, bkErr
}
// Initialize pipe.
pr, pw := io.Pipe()
var wg sync.WaitGroup
teeReader := io.TeeReader(bkReader, pw)
userDefined := getMetadata(bkReader.ObjInfo)
wg.Add(1)
go func() {
_, putErr := dcache.Put(ctx, bucket, object,
io.LimitReader(pr, bkReader.ObjInfo.Size),
bkReader.ObjInfo.Size, rs, ObjectOptions{
UserDefined: userDefined,
}, false)
// close the read end of the pipe, so the error gets
// propagated to teeReader
pr.CloseWithError(putErr)
wg.Done()
}()
cleanupBackend := func() {
pw.CloseWithError(bkReader.Close())
wg.Wait()
}
return NewGetObjectReaderFromReader(teeReader, bkReader.ObjInfo, opts, cleanupBackend)
}
// Returns ObjectInfo from cache if available.
func (c *cacheObjects) GetObjectInfo(ctx context.Context, bucket, object string, opts ObjectOptions) (ObjectInfo, error) {
getObjectInfoFn := c.InnerGetObjectInfoFn
if c.isCacheExclude(bucket, object) || c.skipCache() {
return getObjectInfoFn(ctx, bucket, object, opts)
}
// fetch diskCache if object is currently cached or nearest available cache drive
dcache, err := c.getCacheToLoc(ctx, bucket, object)
if err != nil {
return getObjectInfoFn(ctx, bucket, object, opts)
}
var cc *cacheControl
// if cache control setting is valid, avoid HEAD operation to backend
cachedObjInfo, _, cerr := dcache.Stat(ctx, bucket, object)
if cerr == nil {
cc = cacheControlOpts(cachedObjInfo)
if cc == nil || (cc != nil && !cc.isStale(cachedObjInfo.ModTime)) {
// This is a cache hit, mark it so
c.cacheStats.incHit()
return cachedObjInfo, nil
}
}
objInfo, err := getObjectInfoFn(ctx, bucket, object, opts)
if err != nil {
if _, ok := err.(ObjectNotFound); ok {
// Delete the cached entry if backend object was deleted.
dcache.Delete(ctx, bucket, object)
c.cacheStats.incMiss()
return ObjectInfo{}, err
}
if !backendDownError(err) {
c.cacheStats.incMiss()
return ObjectInfo{}, err
}
if cerr == nil {
// This is a cache hit, mark it so
c.cacheStats.incHit()
return cachedObjInfo, nil
}
c.cacheStats.incMiss()
return ObjectInfo{}, BackendDown{}
}
// Reaching here implies cache miss
c.cacheStats.incMiss()
// when backend is up, do a sanity check on cached object
if cerr != nil {
return objInfo, nil
}
if cachedObjInfo.ETag != objInfo.ETag {
// Delete the cached entry if the backend object was replaced.
dcache.Delete(ctx, bucket, object)
}
return objInfo, nil
}
// CopyObject reverts to backend after evicting any stale cache entries
func (c *cacheObjects) CopyObject(ctx context.Context, srcBucket, srcObject, dstBucket, dstObject string, srcInfo ObjectInfo, srcOpts, dstOpts ObjectOptions) (objInfo ObjectInfo, err error) {
copyObjectFn := c.InnerCopyObjectFn
if c.isCacheExclude(srcBucket, srcObject) || c.skipCache() {
return copyObjectFn(ctx, srcBucket, srcObject, dstBucket, dstObject, srcInfo, srcOpts, dstOpts)
}
if srcBucket != dstBucket || srcObject != dstObject {
return copyObjectFn(ctx, srcBucket, srcObject, dstBucket, dstObject, srcInfo, srcOpts, dstOpts)
}
// fetch diskCache if object is currently cached or nearest available cache drive
dcache, err := c.getCacheToLoc(ctx, srcBucket, srcObject)
if err != nil {
return copyObjectFn(ctx, srcBucket, srcObject, dstBucket, dstObject, srcInfo, srcOpts, dstOpts)
}
// if currently cached, evict old entry and revert to backend.
if cachedObjInfo, _, cerr := dcache.Stat(ctx, srcBucket, srcObject); cerr == nil {
cc := cacheControlOpts(cachedObjInfo)
if cc == nil || !cc.isStale(cachedObjInfo.ModTime) {
dcache.Delete(ctx, srcBucket, srcObject)
}
}
return copyObjectFn(ctx, srcBucket, srcObject, dstBucket, dstObject, srcInfo, srcOpts, dstOpts)
}
// StorageInfo - returns underlying storage statistics.
func (c *cacheObjects) StorageInfo(ctx context.Context) (cInfo CacheStorageInfo) {
var total, free uint64
for _, cache := range c.cache {
if cache == nil {
continue
}
info, err := getDiskInfo(cache.dir)
logger.GetReqInfo(ctx).AppendTags("cachePath", cache.dir)
logger.LogIf(ctx, err)
total += info.Total
free += info.Free
}
return CacheStorageInfo{
Total: total,
Free: free,
}
}
// CacheStats - returns underlying storage statistics.
func (c *cacheObjects) CacheStats() (cs *CacheStats) {
return c.cacheStats
}
// skipCache() returns true if cache migration is in progress
func (c *cacheObjects) skipCache() bool {
c.migMutex.Lock()
defer c.migMutex.Unlock()
return c.migrating
}
// Returns true if object should be excluded from cache
func (c *cacheObjects) isCacheExclude(bucket, object string) bool {
// exclude directories from cache
if strings.HasSuffix(object, SlashSeparator) {
return true
}
for _, pattern := range c.exclude {
matchStr := fmt.Sprintf("%s/%s", bucket, object)
if ok := wildcard.MatchSimple(pattern, matchStr); ok {
return true
}
}
return false
}
// choose a cache deterministically based on hash of bucket,object. The hash index is treated as
// a hint. In the event that the cache drive at hash index is offline, treat the list of cache drives
// as a circular buffer and walk through them starting at hash index until an online drive is found.
func (c *cacheObjects) getCacheLoc(bucket, object string) (*diskCache, error) {
index := c.hashIndex(bucket, object)
numDisks := len(c.cache)
for k := 0; k < numDisks; k++ {
i := (index + k) % numDisks
if c.cache[i] == nil {
continue
}
if c.cache[i].IsOnline() {
return c.cache[i], nil
}
}
return nil, errDiskNotFound
}
// get cache disk where object is currently cached for a GET operation. If object does not exist at that location,
// treat the list of cache drives as a circular buffer and walk through them starting at hash index
// until an online drive is found.If object is not found, fall back to the first online cache drive
// closest to the hash index, so that object can be re-cached.
func (c *cacheObjects) getCacheToLoc(ctx context.Context, bucket, object string) (*diskCache, error) {
index := c.hashIndex(bucket, object)
numDisks := len(c.cache)
// save first online cache disk closest to the hint index
var firstOnlineDisk *diskCache
for k := 0; k < numDisks; k++ {
i := (index + k) % numDisks
if c.cache[i] == nil {
continue
}
if c.cache[i].IsOnline() {
if firstOnlineDisk == nil {
firstOnlineDisk = c.cache[i]
}
if c.cache[i].Exists(ctx, bucket, object) {
return c.cache[i], nil
}
}
}
if firstOnlineDisk != nil {
return firstOnlineDisk, nil
}
return nil, errDiskNotFound
}
// Compute a unique hash sum for bucket and object
func (c *cacheObjects) hashIndex(bucket, object string) int {
return crcHashMod(pathJoin(bucket, object), len(c.cache))
}
// newCache initializes the cacheFSObjects for the "drives" specified in config.json
// or the global env overrides.
func newCache(config cache.Config) ([]*diskCache, bool, error) {
var caches []*diskCache
ctx := logger.SetReqInfo(GlobalContext, &logger.ReqInfo{})
formats, migrating, err := loadAndValidateCacheFormat(ctx, config.Drives)
if err != nil {
return nil, false, err
}
for i, dir := range config.Drives {
// skip diskCache creation for cache drives missing a format.json
if formats[i] == nil {
caches = append(caches, nil)
continue
}
if err := checkAtimeSupport(dir); err != nil {
return nil, false, errors.New("Atime support required for disk caching")
}
cache, err := newDiskCache(ctx, dir, config)
if err != nil {
return nil, false, err
}
caches = append(caches, cache)
}
return caches, migrating, nil
}
func (c *cacheObjects) migrateCacheFromV1toV2(ctx context.Context) {
logStartupMessage(color.Blue("Cache migration initiated ...."))
g := errgroup.WithNErrs(len(c.cache))
for index, dc := range c.cache {
if dc == nil {
continue
}
index := index
g.Go(func() error {
// start migration from V1 to V2
return migrateOldCache(ctx, c.cache[index])
}, index)
}
errCnt := 0
for _, err := range g.Wait() {
if err != nil {
errCnt++
logger.LogIf(ctx, err)
continue
}
}
if errCnt > 0 {
return
}
// update migration status
c.migMutex.Lock()
defer c.migMutex.Unlock()
c.migrating = false
logStartupMessage(color.Blue("Cache migration completed successfully."))
}
// PutObject - caches the uploaded object for single Put operations
func (c *cacheObjects) PutObject(ctx context.Context, bucket, object string, r *PutObjReader, opts ObjectOptions) (objInfo ObjectInfo, err error) {
putObjectFn := c.InnerPutObjectFn
dcache, err := c.getCacheToLoc(ctx, bucket, object)
if err != nil {
// disk cache could not be located,execute backend call.
return putObjectFn(ctx, bucket, object, r, opts)
}
size := r.Size()
if c.skipCache() {
return putObjectFn(ctx, bucket, object, r, opts)
}
// fetch from backend if there is no space on cache drive
if !dcache.diskSpaceAvailable(size) {
return putObjectFn(ctx, bucket, object, r, opts)
}
if opts.ServerSideEncryption != nil {
dcache.Delete(ctx, bucket, object)
return putObjectFn(ctx, bucket, object, r, opts)
}
// skip cache for objects with locks
objRetention := objectlock.GetObjectRetentionMeta(opts.UserDefined)
legalHold := objectlock.GetObjectLegalHoldMeta(opts.UserDefined)
if objRetention.Mode.Valid() || legalHold.Status.Valid() {
dcache.Delete(ctx, bucket, object)
return putObjectFn(ctx, bucket, object, r, opts)
}
// fetch from backend if cache exclude pattern or cache-control
// directive set to exclude
if c.isCacheExclude(bucket, object) {
dcache.Delete(ctx, bucket, object)
return putObjectFn(ctx, bucket, object, r, opts)
}
if c.commitWriteback {
oi, err := dcache.Put(ctx, bucket, object, r, r.Size(), nil, opts, false)
if err != nil {
return ObjectInfo{}, err
}
go c.uploadObject(GlobalContext, oi)
return oi, nil
}
objInfo, err = putObjectFn(ctx, bucket, object, r, opts)
if err == nil {
go func() {
// fill cache in the background
bReader, bErr := c.InnerGetObjectNInfoFn(GlobalContext, bucket, object, nil, http.Header{}, readLock, ObjectOptions{})
if bErr != nil {
return
}
defer bReader.Close()
oi, _, err := dcache.Stat(GlobalContext, bucket, object)
// avoid cache overwrite if another background routine filled cache
if err != nil || oi.ETag != bReader.ObjInfo.ETag {
dcache.Put(GlobalContext, bucket, object, bReader, bReader.ObjInfo.Size, nil, ObjectOptions{UserDefined: getMetadata(bReader.ObjInfo)}, false)
}
}()
}
return objInfo, err
}
// upload cached object to backend in async commit mode.
func (c *cacheObjects) uploadObject(ctx context.Context, oi ObjectInfo) {
dcache, err := c.getCacheToLoc(ctx, oi.Bucket, oi.Name)
if err != nil {
// disk cache could not be located.
logger.LogIf(ctx, fmt.Errorf("Could not upload %s/%s to backend: %w", oi.Bucket, oi.Name, err))
return
}
cReader, _, bErr := dcache.Get(ctx, oi.Bucket, oi.Name, nil, http.Header{}, ObjectOptions{})
if bErr != nil {
return
}
defer cReader.Close()
if cReader.ObjInfo.ETag != oi.ETag {
return
}
st := cacheCommitStatus(oi.UserDefined[writeBackStatusHeader])
if st == CommitComplete || st.String() == "" {
return
}
hashReader, err := hash.NewReader(cReader, oi.Size, "", "", oi.Size)
if err != nil {
return
}
var opts ObjectOptions
opts.UserDefined = make(map[string]string)
opts.UserDefined[xhttp.ContentMD5] = oi.UserDefined["content-md5"]
objInfo, err := c.InnerPutObjectFn(ctx, oi.Bucket, oi.Name, NewPutObjReader(hashReader), opts)
wbCommitStatus := CommitComplete
if err != nil {
wbCommitStatus = CommitFailed
}
meta := cloneMSS(cReader.ObjInfo.UserDefined)
retryCnt := 0
if wbCommitStatus == CommitFailed {
retryCnt, _ = strconv.Atoi(meta[writeBackRetryHeader])
retryCnt++
meta[writeBackRetryHeader] = strconv.Itoa(retryCnt)
} else {
delete(meta, writeBackRetryHeader)
}
meta[writeBackStatusHeader] = wbCommitStatus.String()
meta["etag"] = oi.ETag
dcache.SaveMetadata(ctx, oi.Bucket, oi.Name, meta, objInfo.Size, nil, "", false)
if retryCnt > 0 {
// slow down retries
time.Sleep(time.Second * time.Duration(retryCnt%10+1))
c.queueWritebackRetry(oi)
}
}
func (c *cacheObjects) queueWritebackRetry(oi ObjectInfo) {
select {
case c.wbRetryCh <- oi:
c.uploadObject(GlobalContext, oi)
default:
}
}
// Returns cacheObjects for use by Server.
func newServerCacheObjects(ctx context.Context, config cache.Config) (CacheObjectLayer, error) {
// list of disk caches for cache "drives" specified in config.json or MINIO_CACHE_DRIVES env var.
cache, migrateSw, err := newCache(config)
if err != nil {
return nil, err
}
c := &cacheObjects{
cache: cache,
exclude: config.Exclude,
after: config.After,
migrating: migrateSw,
migMutex: sync.Mutex{},
commitWriteback: config.CommitWriteback,
cacheStats: newCacheStats(),
InnerGetObjectInfoFn: func(ctx context.Context, bucket, object string, opts ObjectOptions) (ObjectInfo, error) {
return newObjectLayerFn().GetObjectInfo(ctx, bucket, object, opts)
},
InnerGetObjectNInfoFn: func(ctx context.Context, bucket, object string, rs *HTTPRangeSpec, h http.Header, lockType LockType, opts ObjectOptions) (gr *GetObjectReader, err error) {
return newObjectLayerFn().GetObjectNInfo(ctx, bucket, object, rs, h, lockType, opts)
},
InnerDeleteObjectFn: func(ctx context.Context, bucket, object string, opts ObjectOptions) (ObjectInfo, error) {
return newObjectLayerFn().DeleteObject(ctx, bucket, object, opts)
},
InnerPutObjectFn: func(ctx context.Context, bucket, object string, data *PutObjReader, opts ObjectOptions) (objInfo ObjectInfo, err error) {
return newObjectLayerFn().PutObject(ctx, bucket, object, data, opts)
},
InnerCopyObjectFn: func(ctx context.Context, srcBucket, srcObject, destBucket, destObject string, srcInfo ObjectInfo, srcOpts, dstOpts ObjectOptions) (objInfo ObjectInfo, err error) {
return newObjectLayerFn().CopyObject(ctx, srcBucket, srcObject, destBucket, destObject, srcInfo, srcOpts, dstOpts)
},
}
c.cacheStats.GetDiskStats = func() []CacheDiskStats {
cacheDiskStats := make([]CacheDiskStats, len(c.cache))
for i := range c.cache {
dcache := c.cache[i]
cacheDiskStats[i] = CacheDiskStats{}
if dcache != nil {
info, err := getDiskInfo(dcache.dir)
logger.LogIf(ctx, err)
cacheDiskStats[i].UsageSize = info.Used
cacheDiskStats[i].TotalCapacity = info.Total
cacheDiskStats[i].Dir = dcache.stats.Dir
atomic.StoreInt32(&cacheDiskStats[i].UsageState, atomic.LoadInt32(&dcache.stats.UsageState))
atomic.StoreUint64(&cacheDiskStats[i].UsagePercent, atomic.LoadUint64(&dcache.stats.UsagePercent))
}
}
return cacheDiskStats
}
if migrateSw {
go c.migrateCacheFromV1toV2(ctx)
}
go c.gc(ctx)
if c.commitWriteback {
c.wbRetryCh = make(chan ObjectInfo, 10000)
go func() {
<-GlobalContext.Done()
close(c.wbRetryCh)
}()
go c.queuePendingWriteback(ctx)
}
return c, nil
}
func (c *cacheObjects) gc(ctx context.Context) {
ticker := time.NewTicker(cacheGCInterval)
defer ticker.Stop()
for {
select {
case <-ctx.Done():
return
case <-ticker.C:
if c.migrating {
continue
}
for _, dcache := range c.cache {
if dcache != nil {
// Check if there is disk.
// Will queue a GC scan if at high watermark.
dcache.diskSpaceAvailable(0)
}
}
}
}
}
// queues any pending or failed async commits when server restarts
func (c *cacheObjects) queuePendingWriteback(ctx context.Context) {
for _, dcache := range c.cache {
if dcache != nil {
for {
select {
case <-ctx.Done():
return
case oi, ok := <-dcache.retryWritebackCh:
if !ok {
goto next
}
c.queueWritebackRetry(oi)
default:
time.Sleep(time.Second * 1)
}
}
next:
}
}
}
| cmd/disk-cache.go | 0 | https://github.com/minio/minio/commit/47dfc1b1b09a3ef6bc21d7f39636fbbe81e2c16f | [
0.0037957250606268644,
0.0002788701676763594,
0.00016276062524411827,
0.00016960113134700805,
0.0004701527359429747
] |
{
"id": 1,
"code_window": [
"\treturn strings.HasSuffix(user, \",\"+l.UserDNSearchBaseDN)\n",
"}\n",
"\n",
"// GetNonExistentUserDistNames - find user accounts (DNs) that are no longer\n",
"// present in the LDAP server.\n",
"func (l *Config) GetNonExistentUserDistNames(userDistNames []string) ([]string, error) {\n",
"\tif !l.isUsingLookupBind {\n",
"\t\treturn nil, errors.New(\"current LDAP configuration does not permit looking for expired user accounts\")\n",
"\t}\n",
"\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"replace",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"// GetNonEligibleUserDistNames - find user accounts (DNs) that are no longer\n",
"// present in the LDAP server or do not meet filter criteria anymore\n",
"func (l *Config) GetNonEligibleUserDistNames(userDistNames []string) ([]string, error) {\n"
],
"file_path": "internal/config/identity/ldap/config.go",
"type": "replace",
"edit_start_line_idx": 480
} | package cmd
// Code generated by github.com/tinylib/msgp DO NOT EDIT.
import (
"github.com/tinylib/msgp/msgp"
)
// DecodeMsg implements msgp.Decodable
func (z *BucketReplicationStats) DecodeMsg(dc *msgp.Reader) (err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, err = dc.ReadMapHeader()
if err != nil {
err = msgp.WrapError(err)
return
}
for zb0001 > 0 {
zb0001--
field, err = dc.ReadMapKeyPtr()
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "PendingSize":
z.PendingSize, err = dc.ReadUint64()
if err != nil {
err = msgp.WrapError(err, "PendingSize")
return
}
case "ReplicatedSize":
z.ReplicatedSize, err = dc.ReadUint64()
if err != nil {
err = msgp.WrapError(err, "ReplicatedSize")
return
}
case "ReplicaSize":
z.ReplicaSize, err = dc.ReadUint64()
if err != nil {
err = msgp.WrapError(err, "ReplicaSize")
return
}
case "FailedSize":
z.FailedSize, err = dc.ReadUint64()
if err != nil {
err = msgp.WrapError(err, "FailedSize")
return
}
case "PendingCount":
z.PendingCount, err = dc.ReadUint64()
if err != nil {
err = msgp.WrapError(err, "PendingCount")
return
}
case "FailedCount":
z.FailedCount, err = dc.ReadUint64()
if err != nil {
err = msgp.WrapError(err, "FailedCount")
return
}
default:
err = dc.Skip()
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
return
}
// EncodeMsg implements msgp.Encodable
func (z *BucketReplicationStats) EncodeMsg(en *msgp.Writer) (err error) {
// map header, size 6
// write "PendingSize"
err = en.Append(0x86, 0xab, 0x50, 0x65, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x53, 0x69, 0x7a, 0x65)
if err != nil {
return
}
err = en.WriteUint64(z.PendingSize)
if err != nil {
err = msgp.WrapError(err, "PendingSize")
return
}
// write "ReplicatedSize"
err = en.Append(0xae, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x65, 0x64, 0x53, 0x69, 0x7a, 0x65)
if err != nil {
return
}
err = en.WriteUint64(z.ReplicatedSize)
if err != nil {
err = msgp.WrapError(err, "ReplicatedSize")
return
}
// write "ReplicaSize"
err = en.Append(0xab, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x53, 0x69, 0x7a, 0x65)
if err != nil {
return
}
err = en.WriteUint64(z.ReplicaSize)
if err != nil {
err = msgp.WrapError(err, "ReplicaSize")
return
}
// write "FailedSize"
err = en.Append(0xaa, 0x46, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x53, 0x69, 0x7a, 0x65)
if err != nil {
return
}
err = en.WriteUint64(z.FailedSize)
if err != nil {
err = msgp.WrapError(err, "FailedSize")
return
}
// write "PendingCount"
err = en.Append(0xac, 0x50, 0x65, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x75, 0x6e, 0x74)
if err != nil {
return
}
err = en.WriteUint64(z.PendingCount)
if err != nil {
err = msgp.WrapError(err, "PendingCount")
return
}
// write "FailedCount"
err = en.Append(0xab, 0x46, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x43, 0x6f, 0x75, 0x6e, 0x74)
if err != nil {
return
}
err = en.WriteUint64(z.FailedCount)
if err != nil {
err = msgp.WrapError(err, "FailedCount")
return
}
return
}
// MarshalMsg implements msgp.Marshaler
func (z *BucketReplicationStats) MarshalMsg(b []byte) (o []byte, err error) {
o = msgp.Require(b, z.Msgsize())
// map header, size 6
// string "PendingSize"
o = append(o, 0x86, 0xab, 0x50, 0x65, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x53, 0x69, 0x7a, 0x65)
o = msgp.AppendUint64(o, z.PendingSize)
// string "ReplicatedSize"
o = append(o, 0xae, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x65, 0x64, 0x53, 0x69, 0x7a, 0x65)
o = msgp.AppendUint64(o, z.ReplicatedSize)
// string "ReplicaSize"
o = append(o, 0xab, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x53, 0x69, 0x7a, 0x65)
o = msgp.AppendUint64(o, z.ReplicaSize)
// string "FailedSize"
o = append(o, 0xaa, 0x46, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x53, 0x69, 0x7a, 0x65)
o = msgp.AppendUint64(o, z.FailedSize)
// string "PendingCount"
o = append(o, 0xac, 0x50, 0x65, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x75, 0x6e, 0x74)
o = msgp.AppendUint64(o, z.PendingCount)
// string "FailedCount"
o = append(o, 0xab, 0x46, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x43, 0x6f, 0x75, 0x6e, 0x74)
o = msgp.AppendUint64(o, z.FailedCount)
return
}
// UnmarshalMsg implements msgp.Unmarshaler
func (z *BucketReplicationStats) UnmarshalMsg(bts []byte) (o []byte, err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
for zb0001 > 0 {
zb0001--
field, bts, err = msgp.ReadMapKeyZC(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "PendingSize":
z.PendingSize, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "PendingSize")
return
}
case "ReplicatedSize":
z.ReplicatedSize, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "ReplicatedSize")
return
}
case "ReplicaSize":
z.ReplicaSize, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "ReplicaSize")
return
}
case "FailedSize":
z.FailedSize, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "FailedSize")
return
}
case "PendingCount":
z.PendingCount, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "PendingCount")
return
}
case "FailedCount":
z.FailedCount, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "FailedCount")
return
}
default:
bts, err = msgp.Skip(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
o = bts
return
}
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (z *BucketReplicationStats) Msgsize() (s int) {
s = 1 + 12 + msgp.Uint64Size + 15 + msgp.Uint64Size + 12 + msgp.Uint64Size + 11 + msgp.Uint64Size + 13 + msgp.Uint64Size + 12 + msgp.Uint64Size
return
}
// DecodeMsg implements msgp.Decodable
func (z *BucketStats) DecodeMsg(dc *msgp.Reader) (err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, err = dc.ReadMapHeader()
if err != nil {
err = msgp.WrapError(err)
return
}
for zb0001 > 0 {
zb0001--
field, err = dc.ReadMapKeyPtr()
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "ReplicationStats":
err = z.ReplicationStats.DecodeMsg(dc)
if err != nil {
err = msgp.WrapError(err, "ReplicationStats")
return
}
default:
err = dc.Skip()
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
return
}
// EncodeMsg implements msgp.Encodable
func (z *BucketStats) EncodeMsg(en *msgp.Writer) (err error) {
// map header, size 1
// write "ReplicationStats"
err = en.Append(0x81, 0xb0, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x73)
if err != nil {
return
}
err = z.ReplicationStats.EncodeMsg(en)
if err != nil {
err = msgp.WrapError(err, "ReplicationStats")
return
}
return
}
// MarshalMsg implements msgp.Marshaler
func (z *BucketStats) MarshalMsg(b []byte) (o []byte, err error) {
o = msgp.Require(b, z.Msgsize())
// map header, size 1
// string "ReplicationStats"
o = append(o, 0x81, 0xb0, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x73)
o, err = z.ReplicationStats.MarshalMsg(o)
if err != nil {
err = msgp.WrapError(err, "ReplicationStats")
return
}
return
}
// UnmarshalMsg implements msgp.Unmarshaler
func (z *BucketStats) UnmarshalMsg(bts []byte) (o []byte, err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
for zb0001 > 0 {
zb0001--
field, bts, err = msgp.ReadMapKeyZC(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "ReplicationStats":
bts, err = z.ReplicationStats.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "ReplicationStats")
return
}
default:
bts, err = msgp.Skip(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
o = bts
return
}
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (z *BucketStats) Msgsize() (s int) {
s = 1 + 17 + z.ReplicationStats.Msgsize()
return
}
| cmd/bucket-stats_gen.go | 0 | https://github.com/minio/minio/commit/47dfc1b1b09a3ef6bc21d7f39636fbbe81e2c16f | [
0.00018560444004833698,
0.00016896559100132436,
0.00016520638018846512,
0.00016789653454907238,
0.00000384445593226701
] |
{
"id": 2,
"code_window": [
"\t// Bind to the lookup user account\n",
"\tif err = l.lookupBind(conn); err != nil {\n",
"\t\treturn nil, err\n",
"\t}\n",
"\n",
"\tnonExistentUsers := []string{}\n",
"\tfor _, dn := range userDistNames {\n",
"\t\tsearchRequest := ldap.NewSearchRequest(\n",
"\t\t\tdn,\n",
"\t\t\tldap.ScopeBaseObject, ldap.NeverDerefAliases, 0, 0, false,\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t// Evaluate the filter again with generic wildcard instead of specific values\n",
"\tfilter := strings.Replace(l.UserDNSearchFilter, \"%s\", \"*\", -1)\n",
"\n"
],
"file_path": "internal/config/identity/ldap/config.go",
"type": "add",
"edit_start_line_idx": 498
} | // Copyright (c) 2015-2021 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package ldap
import (
"crypto/tls"
"crypto/x509"
"errors"
"fmt"
"math/rand"
"net"
"strconv"
"strings"
"time"
ldap "github.com/go-ldap/ldap/v3"
"github.com/minio/minio-go/v7/pkg/set"
"github.com/minio/minio/internal/auth"
"github.com/minio/minio/internal/config"
"github.com/minio/minio/internal/logger"
"github.com/minio/pkg/env"
)
const (
defaultLDAPExpiry = time.Hour * 1
dnDelimiter = ";"
minLDAPExpiry time.Duration = 15 * time.Minute
maxLDAPExpiry time.Duration = 365 * 24 * time.Hour
)
// Config contains AD/LDAP server connectivity information.
type Config struct {
Enabled bool `json:"enabled"`
// E.g. "ldap.minio.io:636"
ServerAddr string `json:"serverAddr"`
// STS credentials expiry duration
STSExpiryDuration string `json:"stsExpiryDuration"`
// Format string for usernames
UsernameFormat string `json:"usernameFormat"`
UsernameFormats []string `json:"-"`
// User DN search parameters
UserDNSearchBaseDN string `json:"userDNSearchBaseDN"`
UserDNSearchFilter string `json:"userDNSearchFilter"`
// Group search parameters
GroupSearchBaseDistName string `json:"groupSearchBaseDN"`
GroupSearchBaseDistNames []string `json:"-"`
GroupSearchFilter string `json:"groupSearchFilter"`
// Lookup bind LDAP service account
LookupBindDN string `json:"lookupBindDN"`
LookupBindPassword string `json:"lookupBindPassword"`
stsExpiryDuration time.Duration // contains converted value
tlsSkipVerify bool // allows skipping TLS verification
serverInsecure bool // allows plain text connection to LDAP server
serverStartTLS bool // allows using StartTLS connection to LDAP server
isUsingLookupBind bool
rootCAs *x509.CertPool
}
// LDAP keys and envs.
const (
ServerAddr = "server_addr"
STSExpiry = "sts_expiry"
LookupBindDN = "lookup_bind_dn"
LookupBindPassword = "lookup_bind_password"
UserDNSearchBaseDN = "user_dn_search_base_dn"
UserDNSearchFilter = "user_dn_search_filter"
UsernameFormat = "username_format"
GroupSearchFilter = "group_search_filter"
GroupSearchBaseDN = "group_search_base_dn"
TLSSkipVerify = "tls_skip_verify"
ServerInsecure = "server_insecure"
ServerStartTLS = "server_starttls"
EnvServerAddr = "MINIO_IDENTITY_LDAP_SERVER_ADDR"
EnvSTSExpiry = "MINIO_IDENTITY_LDAP_STS_EXPIRY"
EnvTLSSkipVerify = "MINIO_IDENTITY_LDAP_TLS_SKIP_VERIFY"
EnvServerInsecure = "MINIO_IDENTITY_LDAP_SERVER_INSECURE"
EnvServerStartTLS = "MINIO_IDENTITY_LDAP_SERVER_STARTTLS"
EnvUsernameFormat = "MINIO_IDENTITY_LDAP_USERNAME_FORMAT"
EnvUserDNSearchBaseDN = "MINIO_IDENTITY_LDAP_USER_DN_SEARCH_BASE_DN"
EnvUserDNSearchFilter = "MINIO_IDENTITY_LDAP_USER_DN_SEARCH_FILTER"
EnvGroupSearchFilter = "MINIO_IDENTITY_LDAP_GROUP_SEARCH_FILTER"
EnvGroupSearchBaseDN = "MINIO_IDENTITY_LDAP_GROUP_SEARCH_BASE_DN"
EnvLookupBindDN = "MINIO_IDENTITY_LDAP_LOOKUP_BIND_DN"
EnvLookupBindPassword = "MINIO_IDENTITY_LDAP_LOOKUP_BIND_PASSWORD"
)
var removedKeys = []string{
"username_search_filter",
"username_search_base_dn",
"group_name_attribute",
}
// DefaultKVS - default config for LDAP config
var (
DefaultKVS = config.KVS{
config.KV{
Key: ServerAddr,
Value: "",
},
config.KV{
Key: UsernameFormat,
Value: "",
},
config.KV{
Key: UserDNSearchBaseDN,
Value: "",
},
config.KV{
Key: UserDNSearchFilter,
Value: "",
},
config.KV{
Key: GroupSearchFilter,
Value: "",
},
config.KV{
Key: GroupSearchBaseDN,
Value: "",
},
config.KV{
Key: STSExpiry,
Value: "1h",
},
config.KV{
Key: TLSSkipVerify,
Value: config.EnableOff,
},
config.KV{
Key: ServerInsecure,
Value: config.EnableOff,
},
config.KV{
Key: ServerStartTLS,
Value: config.EnableOff,
},
config.KV{
Key: LookupBindDN,
Value: "",
},
config.KV{
Key: LookupBindPassword,
Value: "",
},
}
)
func getGroups(conn *ldap.Conn, sreq *ldap.SearchRequest) ([]string, error) {
var groups []string
sres, err := conn.Search(sreq)
if err != nil {
// Check if there is no matching result and return empty slice.
// Ref: https://ldap.com/ldap-result-code-reference/
if ldap.IsErrorWithCode(err, 32) {
return nil, nil
}
return nil, err
}
for _, entry := range sres.Entries {
// We only queried one attribute,
// so we only look up the first one.
groups = append(groups, entry.DN)
}
return groups, nil
}
func (l *Config) lookupBind(conn *ldap.Conn) error {
var err error
if l.LookupBindPassword == "" {
err = conn.UnauthenticatedBind(l.LookupBindDN)
} else {
err = conn.Bind(l.LookupBindDN, l.LookupBindPassword)
}
if ldap.IsErrorWithCode(err, 49) {
return fmt.Errorf("LDAP Lookup Bind user invalid credentials error: %w", err)
}
return err
}
// usernameFormatsBind - Iterates over all given username formats and expects
// that only one will succeed if the credentials are valid. The succeeding
// bindDN is returned or an error.
//
// In the rare case that multiple username formats succeed, implying that two
// (or more) distinct users in the LDAP directory have the same username and
// password, we return an error as we cannot identify the account intended by
// the user.
func (l *Config) usernameFormatsBind(conn *ldap.Conn, username, password string) (string, error) {
var bindDistNames []string
var errs = make([]error, len(l.UsernameFormats))
var successCount = 0
for i, usernameFormat := range l.UsernameFormats {
bindDN := fmt.Sprintf(usernameFormat, username)
// Bind with user credentials to validate the password
errs[i] = conn.Bind(bindDN, password)
if errs[i] == nil {
bindDistNames = append(bindDistNames, bindDN)
successCount++
} else if !ldap.IsErrorWithCode(errs[i], 49) {
return "", fmt.Errorf("LDAP Bind request failed with unexpected error: %w", errs[i])
}
}
if successCount == 0 {
var errStrings []string
for _, err := range errs {
if err != nil {
errStrings = append(errStrings, err.Error())
}
}
outErr := fmt.Sprintf("All username formats failed due to invalid credentials: %s", strings.Join(errStrings, "; "))
return "", errors.New(outErr)
}
if successCount > 1 {
successDistNames := strings.Join(bindDistNames, ", ")
errMsg := fmt.Sprintf("Multiple username formats succeeded - ambiguous user login (succeeded for: %s)", successDistNames)
return "", errors.New(errMsg)
}
return bindDistNames[0], nil
}
// lookupUserDN searches for the DN of the user given their username. conn is
// assumed to be using the lookup bind service account. It is required that the
// search result in at most one result.
func (l *Config) lookupUserDN(conn *ldap.Conn, username string) (string, error) {
filter := strings.Replace(l.UserDNSearchFilter, "%s", ldap.EscapeFilter(username), -1)
searchRequest := ldap.NewSearchRequest(
l.UserDNSearchBaseDN,
ldap.ScopeWholeSubtree, ldap.NeverDerefAliases, 0, 0, false,
filter,
[]string{}, // only need DN, so no pass no attributes here
nil,
)
searchResult, err := conn.Search(searchRequest)
if err != nil {
return "", err
}
if len(searchResult.Entries) == 0 {
return "", fmt.Errorf("User DN for %s not found", username)
}
if len(searchResult.Entries) != 1 {
return "", fmt.Errorf("Multiple DNs for %s found - please fix the search filter", username)
}
return searchResult.Entries[0].DN, nil
}
func (l *Config) searchForUserGroups(conn *ldap.Conn, username, bindDN string) ([]string, error) {
// User groups lookup.
var groups []string
if l.GroupSearchFilter != "" {
for _, groupSearchBase := range l.GroupSearchBaseDistNames {
filter := strings.Replace(l.GroupSearchFilter, "%s", ldap.EscapeFilter(username), -1)
filter = strings.Replace(filter, "%d", ldap.EscapeFilter(bindDN), -1)
searchRequest := ldap.NewSearchRequest(
groupSearchBase,
ldap.ScopeWholeSubtree, ldap.NeverDerefAliases, 0, 0, false,
filter,
nil,
nil,
)
var newGroups []string
newGroups, err := getGroups(conn, searchRequest)
if err != nil {
errRet := fmt.Errorf("Error finding groups of %s: %w", bindDN, err)
return nil, errRet
}
groups = append(groups, newGroups...)
}
}
return groups, nil
}
// LookupUserDN searches for the full DN and groups of a given username
func (l *Config) LookupUserDN(username string) (string, []string, error) {
if !l.isUsingLookupBind {
return "", nil, errors.New("current lookup mode does not support searching for User DN")
}
conn, err := l.Connect()
if err != nil {
return "", nil, err
}
defer conn.Close()
// Bind to the lookup user account
if err = l.lookupBind(conn); err != nil {
return "", nil, err
}
// Lookup user DN
bindDN, err := l.lookupUserDN(conn, username)
if err != nil {
errRet := fmt.Errorf("Unable to find user DN: %w", err)
return "", nil, errRet
}
groups, err := l.searchForUserGroups(conn, username, bindDN)
if err != nil {
return "", nil, err
}
return bindDN, groups, nil
}
// Bind - binds to ldap, searches LDAP and returns the distinguished name of the
// user and the list of groups.
func (l *Config) Bind(username, password string) (string, []string, error) {
conn, err := l.Connect()
if err != nil {
return "", nil, err
}
defer conn.Close()
var bindDN string
if l.isUsingLookupBind {
// Bind to the lookup user account
if err = l.lookupBind(conn); err != nil {
return "", nil, err
}
// Lookup user DN
bindDN, err = l.lookupUserDN(conn, username)
if err != nil {
errRet := fmt.Errorf("Unable to find user DN: %w", err)
return "", nil, errRet
}
// Authenticate the user credentials.
err = conn.Bind(bindDN, password)
if err != nil {
errRet := fmt.Errorf("LDAP auth failed for DN %s: %w", bindDN, err)
return "", nil, errRet
}
// Bind to the lookup user account again to perform group search.
if err = l.lookupBind(conn); err != nil {
return "", nil, err
}
} else {
// Verify login credentials by checking the username formats.
bindDN, err = l.usernameFormatsBind(conn, username, password)
if err != nil {
return "", nil, err
}
// Bind to the successful bindDN again.
err = conn.Bind(bindDN, password)
if err != nil {
errRet := fmt.Errorf("LDAP conn failed though auth for DN %s succeeded: %w", bindDN, err)
return "", nil, errRet
}
}
// User groups lookup.
groups, err := l.searchForUserGroups(conn, username, bindDN)
if err != nil {
return "", nil, err
}
return bindDN, groups, nil
}
// Connect connect to ldap server.
func (l *Config) Connect() (ldapConn *ldap.Conn, err error) {
if l == nil {
return nil, errors.New("LDAP is not configured")
}
serverHost, _, err := net.SplitHostPort(l.ServerAddr)
if err != nil {
serverHost = l.ServerAddr
// User default LDAP port if none specified "636"
l.ServerAddr = net.JoinHostPort(l.ServerAddr, "636")
}
if l.serverInsecure {
return ldap.Dial("tcp", l.ServerAddr)
}
tlsConfig := &tls.Config{
InsecureSkipVerify: l.tlsSkipVerify,
RootCAs: l.rootCAs,
ServerName: serverHost,
}
if l.serverStartTLS {
conn, err := ldap.Dial("tcp", l.ServerAddr)
if err != nil {
return nil, err
}
err = conn.StartTLS(tlsConfig)
return conn, err
}
return ldap.DialTLS("tcp", l.ServerAddr, tlsConfig)
}
// GetExpiryDuration - return parsed expiry duration.
func (l Config) GetExpiryDuration(dsecs string) (time.Duration, error) {
if dsecs == "" {
return l.stsExpiryDuration, nil
}
d, err := strconv.Atoi(dsecs)
if err != nil {
return 0, auth.ErrInvalidDuration
}
dur := time.Duration(d) * time.Second
if dur < minLDAPExpiry || dur > maxLDAPExpiry {
return 0, auth.ErrInvalidDuration
}
return dur, nil
}
func (l Config) testConnection() error {
conn, err := l.Connect()
if err != nil {
return fmt.Errorf("Error creating connection to LDAP server: %w", err)
}
defer conn.Close()
if l.isUsingLookupBind {
if err = l.lookupBind(conn); err != nil {
return fmt.Errorf("Error connecting as LDAP Lookup Bind user: %w", err)
}
return nil
}
// Generate some random user credentials for username formats mode test.
username := fmt.Sprintf("sometestuser%09d", rand.Int31n(1000000000))
charset := []byte("abcdefghijklmnopqrstuvwxyz" +
"ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789")
rand.Shuffle(len(charset), func(i, j int) {
charset[i], charset[j] = charset[j], charset[i]
})
password := string(charset[:20])
_, err = l.usernameFormatsBind(conn, username, password)
if err == nil {
// We don't expect to successfully guess a credential in this
// way.
return fmt.Errorf("Unexpected random credentials success for user=%s password=%s", username, password)
} else if strings.HasPrefix(err.Error(), "All username formats failed due to invalid credentials: ") {
return nil
}
return fmt.Errorf("LDAP connection test error: %w", err)
}
// IsLDAPUserDN determines if the given string could be a user DN from LDAP.
func (l Config) IsLDAPUserDN(user string) bool {
return strings.HasSuffix(user, ","+l.UserDNSearchBaseDN)
}
// GetNonExistentUserDistNames - find user accounts (DNs) that are no longer
// present in the LDAP server.
func (l *Config) GetNonExistentUserDistNames(userDistNames []string) ([]string, error) {
if !l.isUsingLookupBind {
return nil, errors.New("current LDAP configuration does not permit looking for expired user accounts")
}
conn, err := l.Connect()
if err != nil {
return nil, err
}
defer conn.Close()
// Bind to the lookup user account
if err = l.lookupBind(conn); err != nil {
return nil, err
}
nonExistentUsers := []string{}
for _, dn := range userDistNames {
searchRequest := ldap.NewSearchRequest(
dn,
ldap.ScopeBaseObject, ldap.NeverDerefAliases, 0, 0, false,
"(objectclass=*)",
[]string{}, // only need DN, so no pass no attributes here
nil,
)
searchResult, err := conn.Search(searchRequest)
if err != nil {
// Object does not exist error?
if ldap.IsErrorWithCode(err, 32) {
nonExistentUsers = append(nonExistentUsers, dn)
continue
}
return nil, err
}
if len(searchResult.Entries) == 0 {
// DN was not found - this means this user account is
// expired.
nonExistentUsers = append(nonExistentUsers, dn)
}
}
return nonExistentUsers, nil
}
// LookupGroupMemberships - for each DN finds the set of LDAP groups they are a
// member of.
func (l *Config) LookupGroupMemberships(userDistNames []string, userDNToUsernameMap map[string]string) (map[string]set.StringSet, error) {
if !l.isUsingLookupBind {
return nil, errors.New("current LDAP configuration does not permit this lookup")
}
conn, err := l.Connect()
if err != nil {
return nil, err
}
defer conn.Close()
// Bind to the lookup user account
if err = l.lookupBind(conn); err != nil {
return nil, err
}
res := make(map[string]set.StringSet, len(userDistNames))
for _, userDistName := range userDistNames {
username := userDNToUsernameMap[userDistName]
groups, err := l.searchForUserGroups(conn, username, userDistName)
if err != nil {
return nil, err
}
res[userDistName] = set.CreateStringSet(groups...)
}
return res, nil
}
// EnabledWithLookupBind - checks if ldap IDP is enabled in lookup bind mode.
func (l Config) EnabledWithLookupBind() bool {
return l.Enabled && l.isUsingLookupBind
}
// Enabled returns if jwks is enabled.
func Enabled(kvs config.KVS) bool {
return kvs.Get(ServerAddr) != ""
}
// Lookup - initializes LDAP config, overrides config, if any ENV values are set.
func Lookup(kvs config.KVS, rootCAs *x509.CertPool) (l Config, err error) {
l = Config{}
// Purge all removed keys first
for _, k := range removedKeys {
kvs.Delete(k)
}
if err = config.CheckValidKeys(config.IdentityLDAPSubSys, kvs, DefaultKVS); err != nil {
return l, err
}
ldapServer := env.Get(EnvServerAddr, kvs.Get(ServerAddr))
if ldapServer == "" {
return l, nil
}
l.Enabled = true
l.rootCAs = rootCAs
l.ServerAddr = ldapServer
l.stsExpiryDuration = defaultLDAPExpiry
if v := env.Get(EnvSTSExpiry, kvs.Get(STSExpiry)); v != "" {
logger.Info("DEPRECATION WARNING: Support for configuring the default LDAP credentials expiry duration will be removed by October 2021. Please use the `DurationSeconds` parameter in the LDAP STS API instead.")
expDur, err := time.ParseDuration(v)
if err != nil {
return l, errors.New("LDAP expiry time err:" + err.Error())
}
if expDur < minLDAPExpiry {
return l, fmt.Errorf("LDAP expiry time must be at least %s", minLDAPExpiry)
}
if expDur > maxLDAPExpiry {
return l, fmt.Errorf("LDAP expiry time may not exceed %s", maxLDAPExpiry)
}
l.STSExpiryDuration = v
l.stsExpiryDuration = expDur
}
// LDAP connection configuration
if v := env.Get(EnvServerInsecure, kvs.Get(ServerInsecure)); v != "" {
l.serverInsecure, err = config.ParseBool(v)
if err != nil {
return l, err
}
}
if v := env.Get(EnvServerStartTLS, kvs.Get(ServerStartTLS)); v != "" {
l.serverStartTLS, err = config.ParseBool(v)
if err != nil {
return l, err
}
}
if v := env.Get(EnvTLSSkipVerify, kvs.Get(TLSSkipVerify)); v != "" {
l.tlsSkipVerify, err = config.ParseBool(v)
if err != nil {
return l, err
}
}
// Lookup bind user configuration
lookupBindDN := env.Get(EnvLookupBindDN, kvs.Get(LookupBindDN))
lookupBindPassword := env.Get(EnvLookupBindPassword, kvs.Get(LookupBindPassword))
if lookupBindDN != "" {
l.LookupBindDN = lookupBindDN
l.LookupBindPassword = lookupBindPassword
l.isUsingLookupBind = true
// User DN search configuration
userDNSearchBaseDN := env.Get(EnvUserDNSearchBaseDN, kvs.Get(UserDNSearchBaseDN))
userDNSearchFilter := env.Get(EnvUserDNSearchFilter, kvs.Get(UserDNSearchFilter))
if userDNSearchFilter == "" || userDNSearchBaseDN == "" {
return l, errors.New("In lookup bind mode, userDN search base DN and userDN search filter are both required")
}
l.UserDNSearchBaseDN = userDNSearchBaseDN
l.UserDNSearchFilter = userDNSearchFilter
}
// Username format configuration.
if v := env.Get(EnvUsernameFormat, kvs.Get(UsernameFormat)); v != "" {
if !strings.Contains(v, "%s") {
return l, errors.New("LDAP username format does not support '%s' substitution")
}
l.UsernameFormats = strings.Split(v, dnDelimiter)
}
if len(l.UsernameFormats) > 0 {
logger.Info("DEPRECATION WARNING: Support for %s will be removed by October 2021, please migrate your LDAP settings to lookup bind mode", UsernameFormat)
}
// Either lookup bind mode or username format is supported, but not both.
if l.isUsingLookupBind && len(l.UsernameFormats) > 0 {
return l, errors.New("Lookup Bind mode and Username Format mode are not supported at the same time")
}
// At least one of bind mode or username format must be used.
if !l.isUsingLookupBind && len(l.UsernameFormats) == 0 {
return l, errors.New("Either Lookup Bind mode or Username Format mode is required")
}
// Test connection to LDAP server.
if err := l.testConnection(); err != nil {
return l, fmt.Errorf("Connection test for LDAP server failed: %w", err)
}
// Group search params configuration
grpSearchFilter := env.Get(EnvGroupSearchFilter, kvs.Get(GroupSearchFilter))
grpSearchBaseDN := env.Get(EnvGroupSearchBaseDN, kvs.Get(GroupSearchBaseDN))
// Either all group params must be set or none must be set.
if (grpSearchFilter != "" && grpSearchBaseDN == "") || (grpSearchFilter == "" && grpSearchBaseDN != "") {
return l, errors.New("All group related parameters must be set")
}
if grpSearchFilter != "" {
l.GroupSearchFilter = grpSearchFilter
l.GroupSearchBaseDistName = grpSearchBaseDN
l.GroupSearchBaseDistNames = strings.Split(l.GroupSearchBaseDistName, dnDelimiter)
}
return l, nil
}
| internal/config/identity/ldap/config.go | 1 | https://github.com/minio/minio/commit/47dfc1b1b09a3ef6bc21d7f39636fbbe81e2c16f | [
0.9992138147354126,
0.2148575782775879,
0.00015936241834424436,
0.0028856750577688217,
0.39227229356765747
] |
{
"id": 2,
"code_window": [
"\t// Bind to the lookup user account\n",
"\tif err = l.lookupBind(conn); err != nil {\n",
"\t\treturn nil, err\n",
"\t}\n",
"\n",
"\tnonExistentUsers := []string{}\n",
"\tfor _, dn := range userDistNames {\n",
"\t\tsearchRequest := ldap.NewSearchRequest(\n",
"\t\t\tdn,\n",
"\t\t\tldap.ScopeBaseObject, ldap.NeverDerefAliases, 0, 0, false,\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t// Evaluate the filter again with generic wildcard instead of specific values\n",
"\tfilter := strings.Replace(l.UserDNSearchFilter, \"%s\", \"*\", -1)\n",
"\n"
],
"file_path": "internal/config/identity/ldap/config.go",
"type": "add",
"edit_start_line_idx": 498
} | // Copyright (c) 2015-2021 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package dsync
import (
"fmt"
"sync"
"sync/atomic"
"time"
)
const WriteLock = -1
type lockServer struct {
mutex sync.Mutex
// Map of locks, with negative value indicating (exclusive) write lock
// and positive values indicating number of read locks
lockMap map[string]int64
// Refresh returns lock not found if set to true
lockNotFound bool
// Set to true if you want peers servers to do not respond
responseDelay int64
}
func (l *lockServer) setRefreshReply(refreshed bool) {
l.mutex.Lock()
defer l.mutex.Unlock()
l.lockNotFound = !refreshed
}
func (l *lockServer) setResponseDelay(responseDelay time.Duration) {
atomic.StoreInt64(&l.responseDelay, int64(responseDelay))
}
func (l *lockServer) Lock(args *LockArgs, reply *bool) error {
if d := atomic.LoadInt64(&l.responseDelay); d != 0 {
time.Sleep(time.Duration(d))
}
l.mutex.Lock()
defer l.mutex.Unlock()
if _, *reply = l.lockMap[args.Resources[0]]; !*reply {
l.lockMap[args.Resources[0]] = WriteLock // No locks held on the given name, so claim write lock
}
*reply = !*reply // Negate *reply to return true when lock is granted or false otherwise
return nil
}
func (l *lockServer) Unlock(args *LockArgs, reply *bool) error {
if d := atomic.LoadInt64(&l.responseDelay); d != 0 {
time.Sleep(time.Duration(d))
}
l.mutex.Lock()
defer l.mutex.Unlock()
var locksHeld int64
if locksHeld, *reply = l.lockMap[args.Resources[0]]; !*reply { // No lock is held on the given name
return fmt.Errorf("Unlock attempted on an unlocked entity: %s", args.Resources[0])
}
if *reply = locksHeld == WriteLock; !*reply { // Unless it is a write lock
return fmt.Errorf("Unlock attempted on a read locked entity: %s (%d read locks active)", args.Resources[0], locksHeld)
}
delete(l.lockMap, args.Resources[0]) // Remove the write lock
return nil
}
const ReadLock = 1
func (l *lockServer) RLock(args *LockArgs, reply *bool) error {
if d := atomic.LoadInt64(&l.responseDelay); d != 0 {
time.Sleep(time.Duration(d))
}
l.mutex.Lock()
defer l.mutex.Unlock()
var locksHeld int64
if locksHeld, *reply = l.lockMap[args.Resources[0]]; !*reply {
l.lockMap[args.Resources[0]] = ReadLock // No locks held on the given name, so claim (first) read lock
*reply = true
} else {
if *reply = locksHeld != WriteLock; *reply { // Unless there is a write lock
l.lockMap[args.Resources[0]] = locksHeld + ReadLock // Grant another read lock
}
}
return nil
}
func (l *lockServer) RUnlock(args *LockArgs, reply *bool) error {
if d := atomic.LoadInt64(&l.responseDelay); d != 0 {
time.Sleep(time.Duration(d))
}
l.mutex.Lock()
defer l.mutex.Unlock()
var locksHeld int64
if locksHeld, *reply = l.lockMap[args.Resources[0]]; !*reply { // No lock is held on the given name
return fmt.Errorf("RUnlock attempted on an unlocked entity: %s", args.Resources[0])
}
if *reply = locksHeld != WriteLock; !*reply { // A write-lock is held, cannot release a read lock
return fmt.Errorf("RUnlock attempted on a write locked entity: %s", args.Resources[0])
}
if locksHeld > ReadLock {
l.lockMap[args.Resources[0]] = locksHeld - ReadLock // Remove one of the read locks held
} else {
delete(l.lockMap, args.Resources[0]) // Remove the (last) read lock
}
return nil
}
func (l *lockServer) Refresh(args *LockArgs, reply *bool) error {
if d := atomic.LoadInt64(&l.responseDelay); d != 0 {
time.Sleep(time.Duration(d))
}
l.mutex.Lock()
defer l.mutex.Unlock()
*reply = !l.lockNotFound
return nil
}
func (l *lockServer) ForceUnlock(args *LockArgs, reply *bool) error {
if d := atomic.LoadInt64(&l.responseDelay); d != 0 {
time.Sleep(time.Duration(d))
}
l.mutex.Lock()
defer l.mutex.Unlock()
if len(args.UID) != 0 {
return fmt.Errorf("ForceUnlock called with non-empty UID: %s", args.UID)
}
delete(l.lockMap, args.Resources[0]) // Remove the lock (irrespective of write or read lock)
*reply = true
return nil
}
| internal/dsync/dsync-server_test.go | 0 | https://github.com/minio/minio/commit/47dfc1b1b09a3ef6bc21d7f39636fbbe81e2c16f | [
0.0056176092475652695,
0.0005174624384380877,
0.0001658791006775573,
0.00017367818509228528,
0.0013169199228286743
] |
{
"id": 2,
"code_window": [
"\t// Bind to the lookup user account\n",
"\tif err = l.lookupBind(conn); err != nil {\n",
"\t\treturn nil, err\n",
"\t}\n",
"\n",
"\tnonExistentUsers := []string{}\n",
"\tfor _, dn := range userDistNames {\n",
"\t\tsearchRequest := ldap.NewSearchRequest(\n",
"\t\t\tdn,\n",
"\t\t\tldap.ScopeBaseObject, ldap.NeverDerefAliases, 0, 0, false,\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t// Evaluate the filter again with generic wildcard instead of specific values\n",
"\tfilter := strings.Replace(l.UserDNSearchFilter, \"%s\", \"*\", -1)\n",
"\n"
],
"file_path": "internal/config/identity/ldap/config.go",
"type": "add",
"edit_start_line_idx": 498
} | // Copyright (c) 2015-2021 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package dns
import (
"context"
"crypto/tls"
"crypto/x509"
"errors"
"fmt"
"io"
"net"
"net/http"
"net/url"
"strconv"
"strings"
"time"
"github.com/golang-jwt/jwt"
"github.com/minio/minio/internal/config"
xhttp "github.com/minio/minio/internal/http"
)
var (
defaultOperatorContextTimeout = 10 * time.Second
// ErrNotImplemented - Indicates the functionality which is not implemented
ErrNotImplemented = errors.New("The method is not implemented")
)
func (c *OperatorDNS) addAuthHeader(r *http.Request) error {
if c.username == "" || c.password == "" {
return nil
}
claims := &jwt.StandardClaims{
ExpiresAt: int64(15 * time.Minute),
Issuer: c.username,
Subject: config.EnvDNSWebhook,
}
token := jwt.NewWithClaims(jwt.SigningMethodHS512, claims)
ss, err := token.SignedString([]byte(c.password))
if err != nil {
return err
}
r.Header.Set("Authorization", "Bearer "+ss)
return nil
}
func (c *OperatorDNS) endpoint(bucket string, delete bool) (string, error) {
u, err := url.Parse(c.Endpoint)
if err != nil {
return "", err
}
q := u.Query()
q.Add("bucket", bucket)
q.Add("delete", strconv.FormatBool(delete))
u.RawQuery = q.Encode()
return u.String(), nil
}
// Put - Adds DNS entries into operator webhook server
func (c *OperatorDNS) Put(bucket string) error {
ctx, cancel := context.WithTimeout(context.Background(), defaultOperatorContextTimeout)
defer cancel()
e, err := c.endpoint(bucket, false)
if err != nil {
return newError(bucket, err)
}
req, err := http.NewRequestWithContext(ctx, http.MethodPost, e, nil)
if err != nil {
return newError(bucket, err)
}
if err = c.addAuthHeader(req); err != nil {
return newError(bucket, err)
}
resp, err := c.httpClient.Do(req)
if err != nil {
if derr := c.Delete(bucket); derr != nil {
return newError(bucket, derr)
}
}
var errorStringBuilder strings.Builder
io.Copy(&errorStringBuilder, io.LimitReader(resp.Body, resp.ContentLength))
xhttp.DrainBody(resp.Body)
if resp.StatusCode != http.StatusOK {
errorString := errorStringBuilder.String()
switch resp.StatusCode {
case http.StatusConflict:
return ErrBucketConflict(Error{bucket, errors.New(errorString)})
}
return newError(bucket, fmt.Errorf("service create for bucket %s, failed with status %s, error %s", bucket, resp.Status, errorString))
}
return nil
}
func newError(bucket string, err error) error {
e := Error{bucket, err}
if strings.Contains(err.Error(), "invalid bucket name") {
return ErrInvalidBucketName(e)
}
return e
}
// Delete - Removes DNS entries added in Put().
func (c *OperatorDNS) Delete(bucket string) error {
ctx, cancel := context.WithTimeout(context.Background(), defaultOperatorContextTimeout)
defer cancel()
e, err := c.endpoint(bucket, true)
if err != nil {
return err
}
req, err := http.NewRequestWithContext(ctx, http.MethodPost, e, nil)
if err != nil {
return err
}
if err = c.addAuthHeader(req); err != nil {
return err
}
resp, err := c.httpClient.Do(req)
if err != nil {
return err
}
xhttp.DrainBody(resp.Body)
if resp.StatusCode != http.StatusOK {
return fmt.Errorf("request to delete the service for bucket %s, failed with status %s", bucket, resp.Status)
}
return nil
}
// DeleteRecord - Removes a specific DNS entry
// No Op for Operator because operator deals on with bucket entries
func (c *OperatorDNS) DeleteRecord(record SrvRecord) error {
return ErrNotImplemented
}
// Close closes the internal http client
func (c *OperatorDNS) Close() error {
c.httpClient.CloseIdleConnections()
return nil
}
// List - Retrieves list of DNS entries for the domain.
// This is a No Op for Operator because, there is no intent to enforce global
// namespace at MinIO level with this DNS entry. The global namespace in
// enforced by the Kubernetes Operator
func (c *OperatorDNS) List() (srvRecords map[string][]SrvRecord, err error) {
return nil, ErrNotImplemented
}
// Get - Retrieves DNS records for a bucket.
// This is a No Op for Operator because, there is no intent to enforce global
// namespace at MinIO level with this DNS entry. The global namespace in
// enforced by the Kubernetes Operator
func (c *OperatorDNS) Get(bucket string) (srvRecords []SrvRecord, err error) {
return nil, ErrNotImplemented
}
// String stringer name for this implementation of dns.Store
func (c *OperatorDNS) String() string {
return "webhookDNS"
}
// OperatorDNS - represents dns config for MinIO k8s operator.
type OperatorDNS struct {
httpClient *http.Client
Endpoint string
rootCAs *x509.CertPool
username string
password string
}
// OperatorOption - functional options pattern style for OperatorDNS
type OperatorOption func(*OperatorDNS)
// Authentication - custom username and password for authenticating at the endpoint
func Authentication(username, password string) OperatorOption {
return func(args *OperatorDNS) {
args.username = username
args.password = password
}
}
// RootCAs - add custom trust certs pool
func RootCAs(CAs *x509.CertPool) OperatorOption {
return func(args *OperatorDNS) {
args.rootCAs = CAs
}
}
// NewOperatorDNS - initialize a new K8S Operator DNS set/unset values.
func NewOperatorDNS(endpoint string, setters ...OperatorOption) (Store, error) {
if endpoint == "" {
return nil, errors.New("invalid argument")
}
args := &OperatorDNS{
Endpoint: endpoint,
}
for _, setter := range setters {
setter(args)
}
args.httpClient = &http.Client{
Transport: &http.Transport{
Proxy: http.ProxyFromEnvironment,
DialContext: (&net.Dialer{
Timeout: 3 * time.Second,
KeepAlive: 5 * time.Second,
}).DialContext,
ResponseHeaderTimeout: 3 * time.Second,
TLSHandshakeTimeout: 3 * time.Second,
ExpectContinueTimeout: 3 * time.Second,
TLSClientConfig: &tls.Config{
RootCAs: args.rootCAs,
},
// Go net/http automatically unzip if content-type is
// gzip disable this feature, as we are always interested
// in raw stream.
DisableCompression: true,
},
}
return args, nil
}
| internal/config/dns/operator_dns.go | 0 | https://github.com/minio/minio/commit/47dfc1b1b09a3ef6bc21d7f39636fbbe81e2c16f | [
0.002611494855955243,
0.0003158731269650161,
0.00016088050324469805,
0.00017238293366972357,
0.0004932954907417297
] |
{
"id": 2,
"code_window": [
"\t// Bind to the lookup user account\n",
"\tif err = l.lookupBind(conn); err != nil {\n",
"\t\treturn nil, err\n",
"\t}\n",
"\n",
"\tnonExistentUsers := []string{}\n",
"\tfor _, dn := range userDistNames {\n",
"\t\tsearchRequest := ldap.NewSearchRequest(\n",
"\t\t\tdn,\n",
"\t\t\tldap.ScopeBaseObject, ldap.NeverDerefAliases, 0, 0, false,\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t// Evaluate the filter again with generic wildcard instead of specific values\n",
"\tfilter := strings.Replace(l.UserDNSearchFilter, \"%s\", \"*\", -1)\n",
"\n"
],
"file_path": "internal/config/identity/ldap/config.go",
"type": "add",
"edit_start_line_idx": 498
} | // Copyright (c) 2015-2021 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package cmd
import (
"context"
"errors"
"github.com/minio/minio/internal/logger"
"github.com/minio/minio-go/v7/pkg/tags"
bucketsse "github.com/minio/minio/internal/bucket/encryption"
"github.com/minio/minio/internal/bucket/lifecycle"
"github.com/minio/minio/internal/bucket/versioning"
"github.com/minio/pkg/bucket/policy"
"github.com/minio/madmin-go"
)
// GatewayUnsupported list of unsupported call stubs for gateway.
type GatewayUnsupported struct{}
// BackendInfo returns the underlying backend information
func (a GatewayUnsupported) BackendInfo() madmin.BackendInfo {
return madmin.BackendInfo{Type: madmin.Gateway}
}
// LocalStorageInfo returns the local disks information, mainly used
// in prometheus - for gateway this just a no-op
func (a GatewayUnsupported) LocalStorageInfo(ctx context.Context) (StorageInfo, []error) {
logger.CriticalIf(ctx, errors.New("not implemented"))
return StorageInfo{}, nil
}
// NSScanner - scanner is not implemented for gateway
func (a GatewayUnsupported) NSScanner(ctx context.Context, bf *bloomFilter, updates chan<- madmin.DataUsageInfo) error {
logger.CriticalIf(ctx, errors.New("not implemented"))
return NotImplemented{}
}
// PutObjectMetadata - not implemented for gateway.
func (a GatewayUnsupported) PutObjectMetadata(ctx context.Context, bucket, object string, opts ObjectOptions) (ObjectInfo, error) {
logger.CriticalIf(ctx, errors.New("not implemented"))
return ObjectInfo{}, NotImplemented{}
}
// NewNSLock is a dummy stub for gateway.
func (a GatewayUnsupported) NewNSLock(bucket string, objects ...string) RWLocker {
logger.CriticalIf(context.Background(), errors.New("not implemented"))
return nil
}
// SetDriveCounts no-op
func (a GatewayUnsupported) SetDriveCounts() []int {
return nil
}
// ListMultipartUploads lists all multipart uploads.
func (a GatewayUnsupported) ListMultipartUploads(ctx context.Context, bucket string, prefix string, keyMarker string, uploadIDMarker string, delimiter string, maxUploads int) (lmi ListMultipartsInfo, err error) {
return lmi, NotImplemented{}
}
// NewMultipartUpload upload object in multiple parts
func (a GatewayUnsupported) NewMultipartUpload(ctx context.Context, bucket string, object string, opts ObjectOptions) (uploadID string, err error) {
return "", NotImplemented{}
}
// CopyObjectPart copy part of object to uploadID for another object
func (a GatewayUnsupported) CopyObjectPart(ctx context.Context, srcBucket, srcObject, destBucket, destObject, uploadID string, partID int, startOffset, length int64, srcInfo ObjectInfo, srcOpts, dstOpts ObjectOptions) (pi PartInfo, err error) {
return pi, NotImplemented{}
}
// PutObjectPart puts a part of object in bucket
func (a GatewayUnsupported) PutObjectPart(ctx context.Context, bucket string, object string, uploadID string, partID int, data *PutObjReader, opts ObjectOptions) (pi PartInfo, err error) {
logger.LogIf(ctx, NotImplemented{})
return pi, NotImplemented{}
}
// GetMultipartInfo returns metadata associated with the uploadId
func (a GatewayUnsupported) GetMultipartInfo(ctx context.Context, bucket string, object string, uploadID string, opts ObjectOptions) (MultipartInfo, error) {
logger.LogIf(ctx, NotImplemented{})
return MultipartInfo{}, NotImplemented{}
}
// ListObjectVersions returns all object parts for specified object in specified bucket
func (a GatewayUnsupported) ListObjectVersions(ctx context.Context, bucket, prefix, marker, versionMarker, delimiter string, maxKeys int) (ListObjectVersionsInfo, error) {
logger.LogIf(ctx, NotImplemented{})
return ListObjectVersionsInfo{}, NotImplemented{}
}
// ListObjectParts returns all object parts for specified object in specified bucket
func (a GatewayUnsupported) ListObjectParts(ctx context.Context, bucket string, object string, uploadID string, partNumberMarker int, maxParts int, opts ObjectOptions) (lpi ListPartsInfo, err error) {
logger.LogIf(ctx, NotImplemented{})
return lpi, NotImplemented{}
}
// AbortMultipartUpload aborts a ongoing multipart upload
func (a GatewayUnsupported) AbortMultipartUpload(ctx context.Context, bucket string, object string, uploadID string, opts ObjectOptions) error {
return NotImplemented{}
}
// CompleteMultipartUpload completes ongoing multipart upload and finalizes object
func (a GatewayUnsupported) CompleteMultipartUpload(ctx context.Context, bucket string, object string, uploadID string, uploadedParts []CompletePart, opts ObjectOptions) (oi ObjectInfo, err error) {
logger.LogIf(ctx, NotImplemented{})
return oi, NotImplemented{}
}
// SetBucketPolicy sets policy on bucket
func (a GatewayUnsupported) SetBucketPolicy(ctx context.Context, bucket string, bucketPolicy *policy.Policy) error {
logger.LogIf(ctx, NotImplemented{})
return NotImplemented{}
}
// GetBucketPolicy will get policy on bucket
func (a GatewayUnsupported) GetBucketPolicy(ctx context.Context, bucket string) (bucketPolicy *policy.Policy, err error) {
return nil, NotImplemented{}
}
// DeleteBucketPolicy deletes all policies on bucket
func (a GatewayUnsupported) DeleteBucketPolicy(ctx context.Context, bucket string) error {
return NotImplemented{}
}
// SetBucketVersioning enables versioning on a bucket.
func (a GatewayUnsupported) SetBucketVersioning(ctx context.Context, bucket string, v *versioning.Versioning) error {
logger.LogIf(ctx, NotImplemented{})
return NotImplemented{}
}
// GetBucketVersioning retrieves versioning configuration of a bucket.
func (a GatewayUnsupported) GetBucketVersioning(ctx context.Context, bucket string) (*versioning.Versioning, error) {
logger.LogIf(ctx, NotImplemented{})
return nil, NotImplemented{}
}
// SetBucketLifecycle enables lifecycle policies on a bucket.
func (a GatewayUnsupported) SetBucketLifecycle(ctx context.Context, bucket string, lifecycle *lifecycle.Lifecycle) error {
logger.LogIf(ctx, NotImplemented{})
return NotImplemented{}
}
// GetBucketLifecycle retrieves lifecycle configuration of a bucket.
func (a GatewayUnsupported) GetBucketLifecycle(ctx context.Context, bucket string) (*lifecycle.Lifecycle, error) {
return nil, NotImplemented{}
}
// DeleteBucketLifecycle deletes all lifecycle policies on a bucket
func (a GatewayUnsupported) DeleteBucketLifecycle(ctx context.Context, bucket string) error {
return NotImplemented{}
}
// GetBucketSSEConfig returns bucket encryption config on a bucket
func (a GatewayUnsupported) GetBucketSSEConfig(ctx context.Context, bucket string) (*bucketsse.BucketSSEConfig, error) {
return nil, NotImplemented{}
}
// SetBucketSSEConfig sets bucket encryption config on a bucket
func (a GatewayUnsupported) SetBucketSSEConfig(ctx context.Context, bucket string, config *bucketsse.BucketSSEConfig) error {
return NotImplemented{}
}
// DeleteBucketSSEConfig deletes bucket encryption config on a bucket
func (a GatewayUnsupported) DeleteBucketSSEConfig(ctx context.Context, bucket string) error {
return NotImplemented{}
}
// HealFormat - Not implemented stub
func (a GatewayUnsupported) HealFormat(ctx context.Context, dryRun bool) (madmin.HealResultItem, error) {
return madmin.HealResultItem{}, NotImplemented{}
}
// HealBucket - Not implemented stub
func (a GatewayUnsupported) HealBucket(ctx context.Context, bucket string, opts madmin.HealOpts) (madmin.HealResultItem, error) {
return madmin.HealResultItem{}, NotImplemented{}
}
// HealObject - Not implemented stub
func (a GatewayUnsupported) HealObject(ctx context.Context, bucket, object, versionID string, opts madmin.HealOpts) (h madmin.HealResultItem, e error) {
return h, NotImplemented{}
}
// ListObjectsV2 - Not implemented stub
func (a GatewayUnsupported) ListObjectsV2(ctx context.Context, bucket, prefix, continuationToken, delimiter string, maxKeys int, fetchOwner bool, startAfter string) (result ListObjectsV2Info, err error) {
return result, NotImplemented{}
}
// Walk - Not implemented stub
func (a GatewayUnsupported) Walk(ctx context.Context, bucket, prefix string, results chan<- ObjectInfo, opts ObjectOptions) error {
return NotImplemented{}
}
// HealObjects - Not implemented stub
func (a GatewayUnsupported) HealObjects(ctx context.Context, bucket, prefix string, opts madmin.HealOpts, fn HealObjectFn) (e error) {
return NotImplemented{}
}
// CopyObject copies a blob from source container to destination container.
func (a GatewayUnsupported) CopyObject(ctx context.Context, srcBucket string, srcObject string, destBucket string, destObject string,
srcInfo ObjectInfo, srcOpts, dstOpts ObjectOptions) (objInfo ObjectInfo, err error) {
return objInfo, NotImplemented{}
}
// GetMetrics - no op
func (a GatewayUnsupported) GetMetrics(ctx context.Context) (*BackendMetrics, error) {
logger.LogIf(ctx, NotImplemented{})
return &BackendMetrics{}, NotImplemented{}
}
// PutObjectTags - not implemented.
func (a GatewayUnsupported) PutObjectTags(ctx context.Context, bucket, object string, tags string, opts ObjectOptions) (ObjectInfo, error) {
logger.LogIf(ctx, NotImplemented{})
return ObjectInfo{}, NotImplemented{}
}
// GetObjectTags - not implemented.
func (a GatewayUnsupported) GetObjectTags(ctx context.Context, bucket, object string, opts ObjectOptions) (*tags.Tags, error) {
logger.LogIf(ctx, NotImplemented{})
return nil, NotImplemented{}
}
// DeleteObjectTags - not implemented.
func (a GatewayUnsupported) DeleteObjectTags(ctx context.Context, bucket, object string, opts ObjectOptions) (ObjectInfo, error) {
logger.LogIf(ctx, NotImplemented{})
return ObjectInfo{}, NotImplemented{}
}
// IsNotificationSupported returns whether bucket notification is applicable for this layer.
func (a GatewayUnsupported) IsNotificationSupported() bool {
return false
}
// IsListenSupported returns whether listen bucket notification is applicable for this layer.
func (a GatewayUnsupported) IsListenSupported() bool {
return false
}
// IsEncryptionSupported returns whether server side encryption is implemented for this layer.
func (a GatewayUnsupported) IsEncryptionSupported() bool {
return false
}
// IsTaggingSupported returns whether object tagging is supported or not for this layer.
func (a GatewayUnsupported) IsTaggingSupported() bool {
return false
}
// IsCompressionSupported returns whether compression is applicable for this layer.
func (a GatewayUnsupported) IsCompressionSupported() bool {
return false
}
// Health - No Op.
func (a GatewayUnsupported) Health(_ context.Context, _ HealthOptions) HealthResult {
return HealthResult{}
}
// ReadHealth - No Op.
func (a GatewayUnsupported) ReadHealth(_ context.Context) bool {
return true
}
// TransitionObject - transition object content to target tier.
func (a GatewayUnsupported) TransitionObject(ctx context.Context, bucket, object string, opts ObjectOptions) error {
return NotImplemented{}
}
// RestoreTransitionedObject - restore transitioned object content locally on this cluster.
func (a GatewayUnsupported) RestoreTransitionedObject(ctx context.Context, bucket, object string, opts ObjectOptions) error {
return NotImplemented{}
}
| cmd/gateway-unsupported.go | 0 | https://github.com/minio/minio/commit/47dfc1b1b09a3ef6bc21d7f39636fbbe81e2c16f | [
0.0019866637885570526,
0.0003241479571443051,
0.00016221028636209667,
0.0001693747180979699,
0.0004427045932970941
] |
{
"id": 3,
"code_window": [
"\tfor _, dn := range userDistNames {\n",
"\t\tsearchRequest := ldap.NewSearchRequest(\n",
"\t\t\tdn,\n",
"\t\t\tldap.ScopeBaseObject, ldap.NeverDerefAliases, 0, 0, false,\n",
"\t\t\t\"(objectclass=*)\",\n",
"\t\t\t[]string{}, // only need DN, so no pass no attributes here\n",
"\t\t\tnil,\n",
"\t\t)\n",
"\n",
"\t\tsearchResult, err := conn.Search(searchRequest)\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\tfilter,\n"
],
"file_path": "internal/config/identity/ldap/config.go",
"type": "replace",
"edit_start_line_idx": 503
} | // Copyright (c) 2015-2021 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package ldap
import (
"crypto/tls"
"crypto/x509"
"errors"
"fmt"
"math/rand"
"net"
"strconv"
"strings"
"time"
ldap "github.com/go-ldap/ldap/v3"
"github.com/minio/minio-go/v7/pkg/set"
"github.com/minio/minio/internal/auth"
"github.com/minio/minio/internal/config"
"github.com/minio/minio/internal/logger"
"github.com/minio/pkg/env"
)
const (
defaultLDAPExpiry = time.Hour * 1
dnDelimiter = ";"
minLDAPExpiry time.Duration = 15 * time.Minute
maxLDAPExpiry time.Duration = 365 * 24 * time.Hour
)
// Config contains AD/LDAP server connectivity information.
type Config struct {
Enabled bool `json:"enabled"`
// E.g. "ldap.minio.io:636"
ServerAddr string `json:"serverAddr"`
// STS credentials expiry duration
STSExpiryDuration string `json:"stsExpiryDuration"`
// Format string for usernames
UsernameFormat string `json:"usernameFormat"`
UsernameFormats []string `json:"-"`
// User DN search parameters
UserDNSearchBaseDN string `json:"userDNSearchBaseDN"`
UserDNSearchFilter string `json:"userDNSearchFilter"`
// Group search parameters
GroupSearchBaseDistName string `json:"groupSearchBaseDN"`
GroupSearchBaseDistNames []string `json:"-"`
GroupSearchFilter string `json:"groupSearchFilter"`
// Lookup bind LDAP service account
LookupBindDN string `json:"lookupBindDN"`
LookupBindPassword string `json:"lookupBindPassword"`
stsExpiryDuration time.Duration // contains converted value
tlsSkipVerify bool // allows skipping TLS verification
serverInsecure bool // allows plain text connection to LDAP server
serverStartTLS bool // allows using StartTLS connection to LDAP server
isUsingLookupBind bool
rootCAs *x509.CertPool
}
// LDAP keys and envs.
const (
ServerAddr = "server_addr"
STSExpiry = "sts_expiry"
LookupBindDN = "lookup_bind_dn"
LookupBindPassword = "lookup_bind_password"
UserDNSearchBaseDN = "user_dn_search_base_dn"
UserDNSearchFilter = "user_dn_search_filter"
UsernameFormat = "username_format"
GroupSearchFilter = "group_search_filter"
GroupSearchBaseDN = "group_search_base_dn"
TLSSkipVerify = "tls_skip_verify"
ServerInsecure = "server_insecure"
ServerStartTLS = "server_starttls"
EnvServerAddr = "MINIO_IDENTITY_LDAP_SERVER_ADDR"
EnvSTSExpiry = "MINIO_IDENTITY_LDAP_STS_EXPIRY"
EnvTLSSkipVerify = "MINIO_IDENTITY_LDAP_TLS_SKIP_VERIFY"
EnvServerInsecure = "MINIO_IDENTITY_LDAP_SERVER_INSECURE"
EnvServerStartTLS = "MINIO_IDENTITY_LDAP_SERVER_STARTTLS"
EnvUsernameFormat = "MINIO_IDENTITY_LDAP_USERNAME_FORMAT"
EnvUserDNSearchBaseDN = "MINIO_IDENTITY_LDAP_USER_DN_SEARCH_BASE_DN"
EnvUserDNSearchFilter = "MINIO_IDENTITY_LDAP_USER_DN_SEARCH_FILTER"
EnvGroupSearchFilter = "MINIO_IDENTITY_LDAP_GROUP_SEARCH_FILTER"
EnvGroupSearchBaseDN = "MINIO_IDENTITY_LDAP_GROUP_SEARCH_BASE_DN"
EnvLookupBindDN = "MINIO_IDENTITY_LDAP_LOOKUP_BIND_DN"
EnvLookupBindPassword = "MINIO_IDENTITY_LDAP_LOOKUP_BIND_PASSWORD"
)
var removedKeys = []string{
"username_search_filter",
"username_search_base_dn",
"group_name_attribute",
}
// DefaultKVS - default config for LDAP config
var (
DefaultKVS = config.KVS{
config.KV{
Key: ServerAddr,
Value: "",
},
config.KV{
Key: UsernameFormat,
Value: "",
},
config.KV{
Key: UserDNSearchBaseDN,
Value: "",
},
config.KV{
Key: UserDNSearchFilter,
Value: "",
},
config.KV{
Key: GroupSearchFilter,
Value: "",
},
config.KV{
Key: GroupSearchBaseDN,
Value: "",
},
config.KV{
Key: STSExpiry,
Value: "1h",
},
config.KV{
Key: TLSSkipVerify,
Value: config.EnableOff,
},
config.KV{
Key: ServerInsecure,
Value: config.EnableOff,
},
config.KV{
Key: ServerStartTLS,
Value: config.EnableOff,
},
config.KV{
Key: LookupBindDN,
Value: "",
},
config.KV{
Key: LookupBindPassword,
Value: "",
},
}
)
func getGroups(conn *ldap.Conn, sreq *ldap.SearchRequest) ([]string, error) {
var groups []string
sres, err := conn.Search(sreq)
if err != nil {
// Check if there is no matching result and return empty slice.
// Ref: https://ldap.com/ldap-result-code-reference/
if ldap.IsErrorWithCode(err, 32) {
return nil, nil
}
return nil, err
}
for _, entry := range sres.Entries {
// We only queried one attribute,
// so we only look up the first one.
groups = append(groups, entry.DN)
}
return groups, nil
}
func (l *Config) lookupBind(conn *ldap.Conn) error {
var err error
if l.LookupBindPassword == "" {
err = conn.UnauthenticatedBind(l.LookupBindDN)
} else {
err = conn.Bind(l.LookupBindDN, l.LookupBindPassword)
}
if ldap.IsErrorWithCode(err, 49) {
return fmt.Errorf("LDAP Lookup Bind user invalid credentials error: %w", err)
}
return err
}
// usernameFormatsBind - Iterates over all given username formats and expects
// that only one will succeed if the credentials are valid. The succeeding
// bindDN is returned or an error.
//
// In the rare case that multiple username formats succeed, implying that two
// (or more) distinct users in the LDAP directory have the same username and
// password, we return an error as we cannot identify the account intended by
// the user.
func (l *Config) usernameFormatsBind(conn *ldap.Conn, username, password string) (string, error) {
var bindDistNames []string
var errs = make([]error, len(l.UsernameFormats))
var successCount = 0
for i, usernameFormat := range l.UsernameFormats {
bindDN := fmt.Sprintf(usernameFormat, username)
// Bind with user credentials to validate the password
errs[i] = conn.Bind(bindDN, password)
if errs[i] == nil {
bindDistNames = append(bindDistNames, bindDN)
successCount++
} else if !ldap.IsErrorWithCode(errs[i], 49) {
return "", fmt.Errorf("LDAP Bind request failed with unexpected error: %w", errs[i])
}
}
if successCount == 0 {
var errStrings []string
for _, err := range errs {
if err != nil {
errStrings = append(errStrings, err.Error())
}
}
outErr := fmt.Sprintf("All username formats failed due to invalid credentials: %s", strings.Join(errStrings, "; "))
return "", errors.New(outErr)
}
if successCount > 1 {
successDistNames := strings.Join(bindDistNames, ", ")
errMsg := fmt.Sprintf("Multiple username formats succeeded - ambiguous user login (succeeded for: %s)", successDistNames)
return "", errors.New(errMsg)
}
return bindDistNames[0], nil
}
// lookupUserDN searches for the DN of the user given their username. conn is
// assumed to be using the lookup bind service account. It is required that the
// search result in at most one result.
func (l *Config) lookupUserDN(conn *ldap.Conn, username string) (string, error) {
filter := strings.Replace(l.UserDNSearchFilter, "%s", ldap.EscapeFilter(username), -1)
searchRequest := ldap.NewSearchRequest(
l.UserDNSearchBaseDN,
ldap.ScopeWholeSubtree, ldap.NeverDerefAliases, 0, 0, false,
filter,
[]string{}, // only need DN, so no pass no attributes here
nil,
)
searchResult, err := conn.Search(searchRequest)
if err != nil {
return "", err
}
if len(searchResult.Entries) == 0 {
return "", fmt.Errorf("User DN for %s not found", username)
}
if len(searchResult.Entries) != 1 {
return "", fmt.Errorf("Multiple DNs for %s found - please fix the search filter", username)
}
return searchResult.Entries[0].DN, nil
}
func (l *Config) searchForUserGroups(conn *ldap.Conn, username, bindDN string) ([]string, error) {
// User groups lookup.
var groups []string
if l.GroupSearchFilter != "" {
for _, groupSearchBase := range l.GroupSearchBaseDistNames {
filter := strings.Replace(l.GroupSearchFilter, "%s", ldap.EscapeFilter(username), -1)
filter = strings.Replace(filter, "%d", ldap.EscapeFilter(bindDN), -1)
searchRequest := ldap.NewSearchRequest(
groupSearchBase,
ldap.ScopeWholeSubtree, ldap.NeverDerefAliases, 0, 0, false,
filter,
nil,
nil,
)
var newGroups []string
newGroups, err := getGroups(conn, searchRequest)
if err != nil {
errRet := fmt.Errorf("Error finding groups of %s: %w", bindDN, err)
return nil, errRet
}
groups = append(groups, newGroups...)
}
}
return groups, nil
}
// LookupUserDN searches for the full DN and groups of a given username
func (l *Config) LookupUserDN(username string) (string, []string, error) {
if !l.isUsingLookupBind {
return "", nil, errors.New("current lookup mode does not support searching for User DN")
}
conn, err := l.Connect()
if err != nil {
return "", nil, err
}
defer conn.Close()
// Bind to the lookup user account
if err = l.lookupBind(conn); err != nil {
return "", nil, err
}
// Lookup user DN
bindDN, err := l.lookupUserDN(conn, username)
if err != nil {
errRet := fmt.Errorf("Unable to find user DN: %w", err)
return "", nil, errRet
}
groups, err := l.searchForUserGroups(conn, username, bindDN)
if err != nil {
return "", nil, err
}
return bindDN, groups, nil
}
// Bind - binds to ldap, searches LDAP and returns the distinguished name of the
// user and the list of groups.
func (l *Config) Bind(username, password string) (string, []string, error) {
conn, err := l.Connect()
if err != nil {
return "", nil, err
}
defer conn.Close()
var bindDN string
if l.isUsingLookupBind {
// Bind to the lookup user account
if err = l.lookupBind(conn); err != nil {
return "", nil, err
}
// Lookup user DN
bindDN, err = l.lookupUserDN(conn, username)
if err != nil {
errRet := fmt.Errorf("Unable to find user DN: %w", err)
return "", nil, errRet
}
// Authenticate the user credentials.
err = conn.Bind(bindDN, password)
if err != nil {
errRet := fmt.Errorf("LDAP auth failed for DN %s: %w", bindDN, err)
return "", nil, errRet
}
// Bind to the lookup user account again to perform group search.
if err = l.lookupBind(conn); err != nil {
return "", nil, err
}
} else {
// Verify login credentials by checking the username formats.
bindDN, err = l.usernameFormatsBind(conn, username, password)
if err != nil {
return "", nil, err
}
// Bind to the successful bindDN again.
err = conn.Bind(bindDN, password)
if err != nil {
errRet := fmt.Errorf("LDAP conn failed though auth for DN %s succeeded: %w", bindDN, err)
return "", nil, errRet
}
}
// User groups lookup.
groups, err := l.searchForUserGroups(conn, username, bindDN)
if err != nil {
return "", nil, err
}
return bindDN, groups, nil
}
// Connect connect to ldap server.
func (l *Config) Connect() (ldapConn *ldap.Conn, err error) {
if l == nil {
return nil, errors.New("LDAP is not configured")
}
serverHost, _, err := net.SplitHostPort(l.ServerAddr)
if err != nil {
serverHost = l.ServerAddr
// User default LDAP port if none specified "636"
l.ServerAddr = net.JoinHostPort(l.ServerAddr, "636")
}
if l.serverInsecure {
return ldap.Dial("tcp", l.ServerAddr)
}
tlsConfig := &tls.Config{
InsecureSkipVerify: l.tlsSkipVerify,
RootCAs: l.rootCAs,
ServerName: serverHost,
}
if l.serverStartTLS {
conn, err := ldap.Dial("tcp", l.ServerAddr)
if err != nil {
return nil, err
}
err = conn.StartTLS(tlsConfig)
return conn, err
}
return ldap.DialTLS("tcp", l.ServerAddr, tlsConfig)
}
// GetExpiryDuration - return parsed expiry duration.
func (l Config) GetExpiryDuration(dsecs string) (time.Duration, error) {
if dsecs == "" {
return l.stsExpiryDuration, nil
}
d, err := strconv.Atoi(dsecs)
if err != nil {
return 0, auth.ErrInvalidDuration
}
dur := time.Duration(d) * time.Second
if dur < minLDAPExpiry || dur > maxLDAPExpiry {
return 0, auth.ErrInvalidDuration
}
return dur, nil
}
func (l Config) testConnection() error {
conn, err := l.Connect()
if err != nil {
return fmt.Errorf("Error creating connection to LDAP server: %w", err)
}
defer conn.Close()
if l.isUsingLookupBind {
if err = l.lookupBind(conn); err != nil {
return fmt.Errorf("Error connecting as LDAP Lookup Bind user: %w", err)
}
return nil
}
// Generate some random user credentials for username formats mode test.
username := fmt.Sprintf("sometestuser%09d", rand.Int31n(1000000000))
charset := []byte("abcdefghijklmnopqrstuvwxyz" +
"ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789")
rand.Shuffle(len(charset), func(i, j int) {
charset[i], charset[j] = charset[j], charset[i]
})
password := string(charset[:20])
_, err = l.usernameFormatsBind(conn, username, password)
if err == nil {
// We don't expect to successfully guess a credential in this
// way.
return fmt.Errorf("Unexpected random credentials success for user=%s password=%s", username, password)
} else if strings.HasPrefix(err.Error(), "All username formats failed due to invalid credentials: ") {
return nil
}
return fmt.Errorf("LDAP connection test error: %w", err)
}
// IsLDAPUserDN determines if the given string could be a user DN from LDAP.
func (l Config) IsLDAPUserDN(user string) bool {
return strings.HasSuffix(user, ","+l.UserDNSearchBaseDN)
}
// GetNonExistentUserDistNames - find user accounts (DNs) that are no longer
// present in the LDAP server.
func (l *Config) GetNonExistentUserDistNames(userDistNames []string) ([]string, error) {
if !l.isUsingLookupBind {
return nil, errors.New("current LDAP configuration does not permit looking for expired user accounts")
}
conn, err := l.Connect()
if err != nil {
return nil, err
}
defer conn.Close()
// Bind to the lookup user account
if err = l.lookupBind(conn); err != nil {
return nil, err
}
nonExistentUsers := []string{}
for _, dn := range userDistNames {
searchRequest := ldap.NewSearchRequest(
dn,
ldap.ScopeBaseObject, ldap.NeverDerefAliases, 0, 0, false,
"(objectclass=*)",
[]string{}, // only need DN, so no pass no attributes here
nil,
)
searchResult, err := conn.Search(searchRequest)
if err != nil {
// Object does not exist error?
if ldap.IsErrorWithCode(err, 32) {
nonExistentUsers = append(nonExistentUsers, dn)
continue
}
return nil, err
}
if len(searchResult.Entries) == 0 {
// DN was not found - this means this user account is
// expired.
nonExistentUsers = append(nonExistentUsers, dn)
}
}
return nonExistentUsers, nil
}
// LookupGroupMemberships - for each DN finds the set of LDAP groups they are a
// member of.
func (l *Config) LookupGroupMemberships(userDistNames []string, userDNToUsernameMap map[string]string) (map[string]set.StringSet, error) {
if !l.isUsingLookupBind {
return nil, errors.New("current LDAP configuration does not permit this lookup")
}
conn, err := l.Connect()
if err != nil {
return nil, err
}
defer conn.Close()
// Bind to the lookup user account
if err = l.lookupBind(conn); err != nil {
return nil, err
}
res := make(map[string]set.StringSet, len(userDistNames))
for _, userDistName := range userDistNames {
username := userDNToUsernameMap[userDistName]
groups, err := l.searchForUserGroups(conn, username, userDistName)
if err != nil {
return nil, err
}
res[userDistName] = set.CreateStringSet(groups...)
}
return res, nil
}
// EnabledWithLookupBind - checks if ldap IDP is enabled in lookup bind mode.
func (l Config) EnabledWithLookupBind() bool {
return l.Enabled && l.isUsingLookupBind
}
// Enabled returns if jwks is enabled.
func Enabled(kvs config.KVS) bool {
return kvs.Get(ServerAddr) != ""
}
// Lookup - initializes LDAP config, overrides config, if any ENV values are set.
func Lookup(kvs config.KVS, rootCAs *x509.CertPool) (l Config, err error) {
l = Config{}
// Purge all removed keys first
for _, k := range removedKeys {
kvs.Delete(k)
}
if err = config.CheckValidKeys(config.IdentityLDAPSubSys, kvs, DefaultKVS); err != nil {
return l, err
}
ldapServer := env.Get(EnvServerAddr, kvs.Get(ServerAddr))
if ldapServer == "" {
return l, nil
}
l.Enabled = true
l.rootCAs = rootCAs
l.ServerAddr = ldapServer
l.stsExpiryDuration = defaultLDAPExpiry
if v := env.Get(EnvSTSExpiry, kvs.Get(STSExpiry)); v != "" {
logger.Info("DEPRECATION WARNING: Support for configuring the default LDAP credentials expiry duration will be removed by October 2021. Please use the `DurationSeconds` parameter in the LDAP STS API instead.")
expDur, err := time.ParseDuration(v)
if err != nil {
return l, errors.New("LDAP expiry time err:" + err.Error())
}
if expDur < minLDAPExpiry {
return l, fmt.Errorf("LDAP expiry time must be at least %s", minLDAPExpiry)
}
if expDur > maxLDAPExpiry {
return l, fmt.Errorf("LDAP expiry time may not exceed %s", maxLDAPExpiry)
}
l.STSExpiryDuration = v
l.stsExpiryDuration = expDur
}
// LDAP connection configuration
if v := env.Get(EnvServerInsecure, kvs.Get(ServerInsecure)); v != "" {
l.serverInsecure, err = config.ParseBool(v)
if err != nil {
return l, err
}
}
if v := env.Get(EnvServerStartTLS, kvs.Get(ServerStartTLS)); v != "" {
l.serverStartTLS, err = config.ParseBool(v)
if err != nil {
return l, err
}
}
if v := env.Get(EnvTLSSkipVerify, kvs.Get(TLSSkipVerify)); v != "" {
l.tlsSkipVerify, err = config.ParseBool(v)
if err != nil {
return l, err
}
}
// Lookup bind user configuration
lookupBindDN := env.Get(EnvLookupBindDN, kvs.Get(LookupBindDN))
lookupBindPassword := env.Get(EnvLookupBindPassword, kvs.Get(LookupBindPassword))
if lookupBindDN != "" {
l.LookupBindDN = lookupBindDN
l.LookupBindPassword = lookupBindPassword
l.isUsingLookupBind = true
// User DN search configuration
userDNSearchBaseDN := env.Get(EnvUserDNSearchBaseDN, kvs.Get(UserDNSearchBaseDN))
userDNSearchFilter := env.Get(EnvUserDNSearchFilter, kvs.Get(UserDNSearchFilter))
if userDNSearchFilter == "" || userDNSearchBaseDN == "" {
return l, errors.New("In lookup bind mode, userDN search base DN and userDN search filter are both required")
}
l.UserDNSearchBaseDN = userDNSearchBaseDN
l.UserDNSearchFilter = userDNSearchFilter
}
// Username format configuration.
if v := env.Get(EnvUsernameFormat, kvs.Get(UsernameFormat)); v != "" {
if !strings.Contains(v, "%s") {
return l, errors.New("LDAP username format does not support '%s' substitution")
}
l.UsernameFormats = strings.Split(v, dnDelimiter)
}
if len(l.UsernameFormats) > 0 {
logger.Info("DEPRECATION WARNING: Support for %s will be removed by October 2021, please migrate your LDAP settings to lookup bind mode", UsernameFormat)
}
// Either lookup bind mode or username format is supported, but not both.
if l.isUsingLookupBind && len(l.UsernameFormats) > 0 {
return l, errors.New("Lookup Bind mode and Username Format mode are not supported at the same time")
}
// At least one of bind mode or username format must be used.
if !l.isUsingLookupBind && len(l.UsernameFormats) == 0 {
return l, errors.New("Either Lookup Bind mode or Username Format mode is required")
}
// Test connection to LDAP server.
if err := l.testConnection(); err != nil {
return l, fmt.Errorf("Connection test for LDAP server failed: %w", err)
}
// Group search params configuration
grpSearchFilter := env.Get(EnvGroupSearchFilter, kvs.Get(GroupSearchFilter))
grpSearchBaseDN := env.Get(EnvGroupSearchBaseDN, kvs.Get(GroupSearchBaseDN))
// Either all group params must be set or none must be set.
if (grpSearchFilter != "" && grpSearchBaseDN == "") || (grpSearchFilter == "" && grpSearchBaseDN != "") {
return l, errors.New("All group related parameters must be set")
}
if grpSearchFilter != "" {
l.GroupSearchFilter = grpSearchFilter
l.GroupSearchBaseDistName = grpSearchBaseDN
l.GroupSearchBaseDistNames = strings.Split(l.GroupSearchBaseDistName, dnDelimiter)
}
return l, nil
}
| internal/config/identity/ldap/config.go | 1 | https://github.com/minio/minio/commit/47dfc1b1b09a3ef6bc21d7f39636fbbe81e2c16f | [
0.9982759952545166,
0.15335409343242645,
0.00016183503612410277,
0.0014547941973432899,
0.3291763663291931
] |
{
"id": 3,
"code_window": [
"\tfor _, dn := range userDistNames {\n",
"\t\tsearchRequest := ldap.NewSearchRequest(\n",
"\t\t\tdn,\n",
"\t\t\tldap.ScopeBaseObject, ldap.NeverDerefAliases, 0, 0, false,\n",
"\t\t\t\"(objectclass=*)\",\n",
"\t\t\t[]string{}, // only need DN, so no pass no attributes here\n",
"\t\t\tnil,\n",
"\t\t)\n",
"\n",
"\t\tsearchResult, err := conn.Search(searchRequest)\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\tfilter,\n"
],
"file_path": "internal/config/identity/ldap/config.go",
"type": "replace",
"edit_start_line_idx": 503
} | // Copyright (c) 2015-2021 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package config
import (
"bufio"
"fmt"
"io"
"regexp"
"strings"
"github.com/minio/madmin-go"
"github.com/minio/minio-go/v7/pkg/set"
"github.com/minio/minio/internal/auth"
"github.com/minio/pkg/env"
)
// Error config error type
type Error struct {
Err string
}
// Errorf - formats according to a format specifier and returns
// the string as a value that satisfies error of type config.Error
func Errorf(format string, a ...interface{}) error {
return Error{Err: fmt.Sprintf(format, a...)}
}
func (e Error) Error() string {
return e.Err
}
// Default keys
const (
Default = madmin.Default
Enable = madmin.EnableKey
Comment = madmin.CommentKey
// Enable values
EnableOn = madmin.EnableOn
EnableOff = madmin.EnableOff
RegionName = "name"
AccessKey = "access_key"
SecretKey = "secret_key"
)
// Top level config constants.
const (
CredentialsSubSys = "credentials"
PolicyOPASubSys = "policy_opa"
IdentityOpenIDSubSys = "identity_openid"
IdentityLDAPSubSys = "identity_ldap"
CacheSubSys = "cache"
RegionSubSys = "region"
EtcdSubSys = "etcd"
StorageClassSubSys = "storage_class"
APISubSys = "api"
CompressionSubSys = "compression"
LoggerWebhookSubSys = "logger_webhook"
AuditWebhookSubSys = "audit_webhook"
AuditKafkaSubSys = "audit_kafka"
HealSubSys = "heal"
ScannerSubSys = "scanner"
CrawlerSubSys = "crawler"
// Add new constants here if you add new fields to config.
)
// Notification config constants.
const (
NotifyKafkaSubSys = "notify_kafka"
NotifyMQTTSubSys = "notify_mqtt"
NotifyMySQLSubSys = "notify_mysql"
NotifyNATSSubSys = "notify_nats"
NotifyNSQSubSys = "notify_nsq"
NotifyESSubSys = "notify_elasticsearch"
NotifyAMQPSubSys = "notify_amqp"
NotifyPostgresSubSys = "notify_postgres"
NotifyRedisSubSys = "notify_redis"
NotifyWebhookSubSys = "notify_webhook"
// Add new constants here if you add new fields to config.
)
// SubSystems - all supported sub-systems
var SubSystems = set.CreateStringSet(
CredentialsSubSys,
RegionSubSys,
EtcdSubSys,
CacheSubSys,
APISubSys,
StorageClassSubSys,
CompressionSubSys,
LoggerWebhookSubSys,
AuditWebhookSubSys,
AuditKafkaSubSys,
PolicyOPASubSys,
IdentityLDAPSubSys,
IdentityOpenIDSubSys,
ScannerSubSys,
HealSubSys,
NotifyAMQPSubSys,
NotifyESSubSys,
NotifyKafkaSubSys,
NotifyMQTTSubSys,
NotifyMySQLSubSys,
NotifyNATSSubSys,
NotifyNSQSubSys,
NotifyPostgresSubSys,
NotifyRedisSubSys,
NotifyWebhookSubSys,
)
// SubSystemsDynamic - all sub-systems that have dynamic config.
var SubSystemsDynamic = set.CreateStringSet(
APISubSys,
CompressionSubSys,
ScannerSubSys,
HealSubSys,
)
// SubSystemsSingleTargets - subsystems which only support single target.
var SubSystemsSingleTargets = set.CreateStringSet([]string{
CredentialsSubSys,
RegionSubSys,
EtcdSubSys,
CacheSubSys,
APISubSys,
StorageClassSubSys,
CompressionSubSys,
PolicyOPASubSys,
IdentityLDAPSubSys,
IdentityOpenIDSubSys,
HealSubSys,
ScannerSubSys,
}...)
// Constant separators
const (
SubSystemSeparator = madmin.SubSystemSeparator
KvSeparator = madmin.KvSeparator
KvSpaceSeparator = madmin.KvSpaceSeparator
KvComment = madmin.KvComment
KvNewline = madmin.KvNewline
KvDoubleQuote = madmin.KvDoubleQuote
KvSingleQuote = madmin.KvSingleQuote
// Env prefix used for all envs in MinIO
EnvPrefix = "MINIO_"
EnvWordDelimiter = `_`
)
// DefaultKVS - default kvs for all sub-systems
var DefaultKVS map[string]KVS
// RegisterDefaultKVS - this function saves input kvsMap
// globally, this should be called only once preferably
// during `init()`.
func RegisterDefaultKVS(kvsMap map[string]KVS) {
DefaultKVS = map[string]KVS{}
for subSys, kvs := range kvsMap {
DefaultKVS[subSys] = kvs
}
}
// HelpSubSysMap - help for all individual KVS for each sub-systems
// also carries a special empty sub-system which dumps
// help for each sub-system key.
var HelpSubSysMap map[string]HelpKVS
// RegisterHelpSubSys - this function saves
// input help KVS for each sub-system globally,
// this function should be called only once
// preferably in during `init()`.
func RegisterHelpSubSys(helpKVSMap map[string]HelpKVS) {
HelpSubSysMap = map[string]HelpKVS{}
for subSys, hkvs := range helpKVSMap {
HelpSubSysMap[subSys] = hkvs
}
}
// KV - is a shorthand of each key value.
type KV struct {
Key string `json:"key"`
Value string `json:"value"`
}
// KVS - is a shorthand for some wrapper functions
// to operate on list of key values.
type KVS []KV
// Empty - return if kv is empty
func (kvs KVS) Empty() bool {
return len(kvs) == 0
}
// Keys returns the list of keys for the current KVS
func (kvs KVS) Keys() []string {
var keys = make([]string, len(kvs))
var foundComment bool
for i := range kvs {
if kvs[i].Key == madmin.CommentKey {
foundComment = true
}
keys[i] = kvs[i].Key
}
// Comment KV not found, add it explicitly.
if !foundComment {
keys = append(keys, madmin.CommentKey)
}
return keys
}
func (kvs KVS) String() string {
var s strings.Builder
for _, kv := range kvs {
// Do not need to print if state is on
if kv.Key == Enable && kv.Value == EnableOn {
continue
}
s.WriteString(kv.Key)
s.WriteString(KvSeparator)
spc := madmin.HasSpace(kv.Value)
if spc {
s.WriteString(KvDoubleQuote)
}
s.WriteString(kv.Value)
if spc {
s.WriteString(KvDoubleQuote)
}
s.WriteString(KvSpaceSeparator)
}
return s.String()
}
// Merge environment values with on disk KVS, environment values overrides
// anything on the disk.
func Merge(cfgKVS map[string]KVS, envname string, defaultKVS KVS) map[string]KVS {
newCfgKVS := make(map[string]KVS)
for _, e := range env.List(envname) {
tgt := strings.TrimPrefix(e, envname+Default)
if tgt == envname {
tgt = Default
}
newCfgKVS[tgt] = defaultKVS
}
for tgt, kv := range cfgKVS {
newCfgKVS[tgt] = kv
}
return newCfgKVS
}
// Set sets a value, if not sets a default value.
func (kvs *KVS) Set(key, value string) {
for i, kv := range *kvs {
if kv.Key == key {
(*kvs)[i] = KV{
Key: key,
Value: value,
}
return
}
}
*kvs = append(*kvs, KV{
Key: key,
Value: value,
})
}
// Get - returns the value of a key, if not found returns empty.
func (kvs KVS) Get(key string) string {
v, ok := kvs.Lookup(key)
if ok {
return v
}
return ""
}
// Delete - deletes the key if present from the KV list.
func (kvs *KVS) Delete(key string) {
for i, kv := range *kvs {
if kv.Key == key {
*kvs = append((*kvs)[:i], (*kvs)[i+1:]...)
return
}
}
}
// Lookup - lookup a key in a list of KVS
func (kvs KVS) Lookup(key string) (string, bool) {
for _, kv := range kvs {
if kv.Key == key {
return kv.Value, true
}
}
return "", false
}
// Config - MinIO server config structure.
type Config map[string]map[string]KVS
// DelFrom - deletes all keys in the input reader.
func (c Config) DelFrom(r io.Reader) error {
scanner := bufio.NewScanner(r)
for scanner.Scan() {
// Skip any empty lines, or comment like characters
text := scanner.Text()
if text == "" || strings.HasPrefix(text, KvComment) {
continue
}
if err := c.DelKVS(text); err != nil {
return err
}
}
return scanner.Err()
}
// ReadConfig - read content from input and write into c.
// Returns whether all parameters were dynamic.
func (c Config) ReadConfig(r io.Reader) (dynOnly bool, err error) {
var n int
scanner := bufio.NewScanner(r)
dynOnly = true
for scanner.Scan() {
// Skip any empty lines, or comment like characters
text := scanner.Text()
if text == "" || strings.HasPrefix(text, KvComment) {
continue
}
dynamic, err := c.SetKVS(text, DefaultKVS)
if err != nil {
return false, err
}
dynOnly = dynOnly && dynamic
n += len(text)
}
if err := scanner.Err(); err != nil {
return false, err
}
return dynOnly, nil
}
// RedactSensitiveInfo - removes sensitive information
// like urls and credentials from the configuration
func (c Config) RedactSensitiveInfo() Config {
nc := c.Clone()
for configName, configVals := range nc {
for _, helpKV := range HelpSubSysMap[configName] {
if helpKV.Sensitive {
for name, kvs := range configVals {
for i := range kvs {
if kvs[i].Key == helpKV.Key && len(kvs[i].Value) > 0 {
kvs[i].Value = "*redacted*"
}
}
configVals[name] = kvs
}
}
}
}
// Remove the server credentials altogether
nc.DelKVS(CredentialsSubSys)
return nc
}
type configWriteTo struct {
Config
filterByKey string
}
// NewConfigWriteTo - returns a struct which
// allows for serializing the config/kv struct
// to a io.WriterTo
func NewConfigWriteTo(cfg Config, key string) io.WriterTo {
return &configWriteTo{Config: cfg, filterByKey: key}
}
// WriteTo - implements io.WriterTo interface implementation for config.
func (c *configWriteTo) WriteTo(w io.Writer) (int64, error) {
kvsTargets, err := c.GetKVS(c.filterByKey, DefaultKVS)
if err != nil {
return 0, err
}
var n int
for _, target := range kvsTargets {
m1, _ := w.Write([]byte(target.SubSystem))
m2, _ := w.Write([]byte(KvSpaceSeparator))
m3, _ := w.Write([]byte(target.KVS.String()))
if len(kvsTargets) > 1 {
m4, _ := w.Write([]byte(KvNewline))
n += m1 + m2 + m3 + m4
} else {
n += m1 + m2 + m3
}
}
return int64(n), nil
}
// Default KV configs for worm and region
var (
DefaultCredentialKVS = KVS{
KV{
Key: AccessKey,
Value: auth.DefaultAccessKey,
},
KV{
Key: SecretKey,
Value: auth.DefaultSecretKey,
},
}
DefaultRegionKVS = KVS{
KV{
Key: RegionName,
Value: "",
},
}
)
// LookupCreds - lookup credentials from config.
func LookupCreds(kv KVS) (auth.Credentials, error) {
if err := CheckValidKeys(CredentialsSubSys, kv, DefaultCredentialKVS); err != nil {
return auth.Credentials{}, err
}
accessKey := kv.Get(AccessKey)
secretKey := kv.Get(SecretKey)
if accessKey == "" || secretKey == "" {
accessKey = auth.DefaultAccessKey
secretKey = auth.DefaultSecretKey
}
return auth.CreateCredentials(accessKey, secretKey)
}
var validRegionRegex = regexp.MustCompile("^[a-zA-Z][a-zA-Z0-9-_-]+$")
// LookupRegion - get current region.
func LookupRegion(kv KVS) (string, error) {
if err := CheckValidKeys(RegionSubSys, kv, DefaultRegionKVS); err != nil {
return "", err
}
region := env.Get(EnvRegion, "")
if region == "" {
region = env.Get(EnvRegionName, kv.Get(RegionName))
}
if region != "" {
if validRegionRegex.MatchString(region) {
return region, nil
}
return "", Errorf(
"region '%s' is invalid, expected simple characters such as [us-east-1, myregion...]",
region)
}
return "", nil
}
// CheckValidKeys - checks if inputs KVS has the necessary keys,
// returns error if it find extra or superflous keys.
func CheckValidKeys(subSys string, kv KVS, validKVS KVS) error {
nkv := KVS{}
for _, kv := range kv {
// Comment is a valid key, its also fully optional
// ignore it since it is a valid key for all
// sub-systems.
if kv.Key == Comment {
continue
}
if _, ok := validKVS.Lookup(kv.Key); !ok {
nkv = append(nkv, kv)
}
}
if len(nkv) > 0 {
return Errorf(
"found invalid keys (%s) for '%s' sub-system, use 'mc admin config reset myminio %s' to fix invalid keys", nkv.String(), subSys, subSys)
}
return nil
}
// LookupWorm - check if worm is enabled
func LookupWorm() (bool, error) {
return ParseBool(env.Get(EnvWorm, EnableOff))
}
// Carries all the renamed sub-systems from their
// previously known names
var renamedSubsys = map[string]string{
CrawlerSubSys: ScannerSubSys,
// Add future sub-system renames
}
// Merge - merges a new config with all the
// missing values for default configs,
// returns a config.
func (c Config) Merge() Config {
cp := New()
for subSys, tgtKV := range c {
for tgt := range tgtKV {
ckvs := c[subSys][tgt]
for _, kv := range cp[subSys][Default] {
_, ok := c[subSys][tgt].Lookup(kv.Key)
if !ok {
ckvs.Set(kv.Key, kv.Value)
}
}
if _, ok := cp[subSys]; !ok {
rnSubSys, ok := renamedSubsys[subSys]
if !ok {
// A config subsystem was removed or server was downgraded.
continue
}
// Copy over settings from previous sub-system
// to newly renamed sub-system
for _, kv := range cp[rnSubSys][Default] {
_, ok := c[subSys][tgt].Lookup(kv.Key)
if !ok {
ckvs.Set(kv.Key, kv.Value)
}
}
subSys = rnSubSys
}
cp[subSys][tgt] = ckvs
}
}
return cp
}
// New - initialize a new server config.
func New() Config {
srvCfg := make(Config)
for _, k := range SubSystems.ToSlice() {
srvCfg[k] = map[string]KVS{}
srvCfg[k][Default] = DefaultKVS[k]
}
return srvCfg
}
// Target signifies an individual target
type Target struct {
SubSystem string
KVS KVS
}
// Targets sub-system targets
type Targets []Target
// GetKVS - get kvs from specific subsystem.
func (c Config) GetKVS(s string, defaultKVS map[string]KVS) (Targets, error) {
if len(s) == 0 {
return nil, Errorf("input cannot be empty")
}
inputs := strings.Fields(s)
if len(inputs) > 1 {
return nil, Errorf("invalid number of arguments %s", s)
}
subSystemValue := strings.SplitN(inputs[0], SubSystemSeparator, 2)
if len(subSystemValue) == 0 {
return nil, Errorf("invalid number of arguments %s", s)
}
found := SubSystems.Contains(subSystemValue[0])
if !found {
// Check for sub-prefix only if the input value is only a
// single value, this rejects invalid inputs if any.
found = !SubSystems.FuncMatch(strings.HasPrefix, subSystemValue[0]).IsEmpty() && len(subSystemValue) == 1
}
if !found {
return nil, Errorf("unknown sub-system %s", s)
}
targets := Targets{}
subSysPrefix := subSystemValue[0]
if len(subSystemValue) == 2 {
if len(subSystemValue[1]) == 0 {
return nil, Errorf("sub-system target '%s' cannot be empty", s)
}
kvs, ok := c[subSysPrefix][subSystemValue[1]]
if !ok {
return nil, Errorf("sub-system target '%s' doesn't exist", s)
}
for _, kv := range defaultKVS[subSysPrefix] {
_, ok = kvs.Lookup(kv.Key)
if !ok {
kvs.Set(kv.Key, kv.Value)
}
}
targets = append(targets, Target{
SubSystem: inputs[0],
KVS: kvs,
})
} else {
hkvs := HelpSubSysMap[""]
// Use help for sub-system to preserve the order.
for _, hkv := range hkvs {
if !strings.HasPrefix(hkv.Key, subSysPrefix) {
continue
}
if c[hkv.Key][Default].Empty() {
targets = append(targets, Target{
SubSystem: hkv.Key,
KVS: defaultKVS[hkv.Key],
})
}
for k, kvs := range c[hkv.Key] {
for _, dkv := range defaultKVS[hkv.Key] {
_, ok := kvs.Lookup(dkv.Key)
if !ok {
kvs.Set(dkv.Key, dkv.Value)
}
}
if k != Default {
targets = append(targets, Target{
SubSystem: hkv.Key + SubSystemSeparator + k,
KVS: kvs,
})
} else {
targets = append(targets, Target{
SubSystem: hkv.Key,
KVS: kvs,
})
}
}
}
}
return targets, nil
}
// DelKVS - delete a specific key.
func (c Config) DelKVS(s string) error {
if len(s) == 0 {
return Errorf("input arguments cannot be empty")
}
inputs := strings.Fields(s)
if len(inputs) > 1 {
return Errorf("invalid number of arguments %s", s)
}
subSystemValue := strings.SplitN(inputs[0], SubSystemSeparator, 2)
if len(subSystemValue) == 0 {
return Errorf("invalid number of arguments %s", s)
}
if !SubSystems.Contains(subSystemValue[0]) {
// Unknown sub-system found try to remove it anyways.
delete(c, subSystemValue[0])
return nil
}
tgt := Default
subSys := subSystemValue[0]
if len(subSystemValue) == 2 {
if len(subSystemValue[1]) == 0 {
return Errorf("sub-system target '%s' cannot be empty", s)
}
tgt = subSystemValue[1]
}
_, ok := c[subSys][tgt]
if !ok {
return Errorf("sub-system %s already deleted", s)
}
delete(c[subSys], tgt)
return nil
}
// Clone - clones a config map entirely.
func (c Config) Clone() Config {
cp := New()
for subSys, tgtKV := range c {
cp[subSys] = make(map[string]KVS)
for tgt, kv := range tgtKV {
cp[subSys][tgt] = append(cp[subSys][tgt], kv...)
}
}
return cp
}
// SetKVS - set specific key values per sub-system.
func (c Config) SetKVS(s string, defaultKVS map[string]KVS) (dynamic bool, err error) {
if len(s) == 0 {
return false, Errorf("input arguments cannot be empty")
}
inputs := strings.SplitN(s, KvSpaceSeparator, 2)
if len(inputs) <= 1 {
return false, Errorf("invalid number of arguments '%s'", s)
}
subSystemValue := strings.SplitN(inputs[0], SubSystemSeparator, 2)
if len(subSystemValue) == 0 {
return false, Errorf("invalid number of arguments %s", s)
}
if !SubSystems.Contains(subSystemValue[0]) {
return false, Errorf("unknown sub-system %s", s)
}
if SubSystemsSingleTargets.Contains(subSystemValue[0]) && len(subSystemValue) == 2 {
return false, Errorf("sub-system '%s' only supports single target", subSystemValue[0])
}
dynamic = SubSystemsDynamic.Contains(subSystemValue[0])
tgt := Default
subSys := subSystemValue[0]
if len(subSystemValue) == 2 {
tgt = subSystemValue[1]
}
fields := madmin.KvFields(inputs[1], defaultKVS[subSys].Keys())
if len(fields) == 0 {
return false, Errorf("sub-system '%s' cannot have empty keys", subSys)
}
var kvs = KVS{}
var prevK string
for _, v := range fields {
kv := strings.SplitN(v, KvSeparator, 2)
if len(kv) == 0 {
continue
}
if len(kv) == 1 && prevK != "" {
value := strings.Join([]string{
kvs.Get(prevK),
madmin.SanitizeValue(kv[0]),
}, KvSpaceSeparator)
kvs.Set(prevK, value)
continue
}
if len(kv) == 2 {
prevK = kv[0]
kvs.Set(prevK, madmin.SanitizeValue(kv[1]))
continue
}
return false, Errorf("key '%s', cannot have empty value", kv[0])
}
_, ok := kvs.Lookup(Enable)
// Check if state is required
_, enableRequired := defaultKVS[subSys].Lookup(Enable)
if !ok && enableRequired {
// implicit state "on" if not specified.
kvs.Set(Enable, EnableOn)
}
currKVS, ok := c[subSys][tgt]
if !ok {
currKVS = defaultKVS[subSys]
} else {
for _, kv := range defaultKVS[subSys] {
if _, ok = currKVS.Lookup(kv.Key); !ok {
currKVS.Set(kv.Key, kv.Value)
}
}
}
for _, kv := range kvs {
if kv.Key == Comment {
// Skip comment and add it later.
continue
}
currKVS.Set(kv.Key, kv.Value)
}
v, ok := kvs.Lookup(Comment)
if ok {
currKVS.Set(Comment, v)
}
hkvs := HelpSubSysMap[subSys]
for _, hkv := range hkvs {
var enabled bool
if enableRequired {
enabled = currKVS.Get(Enable) == EnableOn
} else {
// when enable arg is not required
// then it is implicit on for the sub-system.
enabled = true
}
v, _ := currKVS.Lookup(hkv.Key)
if v == "" && !hkv.Optional && enabled {
// Return error only if the
// key is enabled, for state=off
// let it be empty.
return false, Errorf(
"'%s' is not optional for '%s' sub-system, please check '%s' documentation",
hkv.Key, subSys, subSys)
}
}
c[subSys][tgt] = currKVS
return dynamic, nil
}
| internal/config/config.go | 0 | https://github.com/minio/minio/commit/47dfc1b1b09a3ef6bc21d7f39636fbbe81e2c16f | [
0.0010898384498432279,
0.00021351841860450804,
0.00016055804735515267,
0.00017319581820629537,
0.00014408746210392565
] |
{
"id": 3,
"code_window": [
"\tfor _, dn := range userDistNames {\n",
"\t\tsearchRequest := ldap.NewSearchRequest(\n",
"\t\t\tdn,\n",
"\t\t\tldap.ScopeBaseObject, ldap.NeverDerefAliases, 0, 0, false,\n",
"\t\t\t\"(objectclass=*)\",\n",
"\t\t\t[]string{}, // only need DN, so no pass no attributes here\n",
"\t\t\tnil,\n",
"\t\t)\n",
"\n",
"\t\tsearchResult, err := conn.Search(searchRequest)\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\tfilter,\n"
],
"file_path": "internal/config/identity/ldap/config.go",
"type": "replace",
"edit_start_line_idx": 503
} | // Copyright (c) 2015-2021 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package sql
import (
"testing"
"time"
)
func TestParseAndDisplaySQLTimestamp(t *testing.T) {
beijing := time.FixedZone("", int((8 * time.Hour).Seconds()))
fakeLosAngeles := time.FixedZone("", -int((8 * time.Hour).Seconds()))
cases := []struct {
s string
t time.Time
}{
{"2010T", time.Date(2010, 1, 1, 0, 0, 0, 0, time.UTC)},
{"2010-02T", time.Date(2010, 2, 1, 0, 0, 0, 0, time.UTC)},
{"2010-02-03T", time.Date(2010, 2, 3, 0, 0, 0, 0, time.UTC)},
{"2010-02-03T04:11Z", time.Date(2010, 2, 3, 4, 11, 0, 0, time.UTC)},
{"2010-02-03T04:11:30Z", time.Date(2010, 2, 3, 4, 11, 30, 0, time.UTC)},
{"2010-02-03T04:11:30.23Z", time.Date(2010, 2, 3, 4, 11, 30, 230000000, time.UTC)},
{"2010-02-03T04:11+08:00", time.Date(2010, 2, 3, 4, 11, 0, 0, beijing)},
{"2010-02-03T04:11:30+08:00", time.Date(2010, 2, 3, 4, 11, 30, 0, beijing)},
{"2010-02-03T04:11:30.23+08:00", time.Date(2010, 2, 3, 4, 11, 30, 230000000, beijing)},
{"2010-02-03T04:11:30-08:00", time.Date(2010, 2, 3, 4, 11, 30, 0, fakeLosAngeles)},
{"2010-02-03T04:11:30.23-08:00", time.Date(2010, 2, 3, 4, 11, 30, 230000000, fakeLosAngeles)},
}
for i, tc := range cases {
tval, err := parseSQLTimestamp(tc.s)
if err != nil {
t.Errorf("Case %d: Unexpected error: %v", i+1, err)
continue
}
if !tval.Equal(tc.t) {
t.Errorf("Case %d: Expected %v got %v", i+1, tc.t, tval)
continue
}
tstr := FormatSQLTimestamp(tc.t)
if tstr != tc.s {
t.Errorf("Case %d: Expected %s got %s", i+1, tc.s, tstr)
continue
}
}
}
| internal/s3select/sql/timestampfuncs_test.go | 0 | https://github.com/minio/minio/commit/47dfc1b1b09a3ef6bc21d7f39636fbbe81e2c16f | [
0.00017860272782854736,
0.00017283197666984051,
0.00016280919953715056,
0.00017574876255821437,
0.000005321489425114123
] |
{
"id": 3,
"code_window": [
"\tfor _, dn := range userDistNames {\n",
"\t\tsearchRequest := ldap.NewSearchRequest(\n",
"\t\t\tdn,\n",
"\t\t\tldap.ScopeBaseObject, ldap.NeverDerefAliases, 0, 0, false,\n",
"\t\t\t\"(objectclass=*)\",\n",
"\t\t\t[]string{}, // only need DN, so no pass no attributes here\n",
"\t\t\tnil,\n",
"\t\t)\n",
"\n",
"\t\tsearchResult, err := conn.Search(searchRequest)\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\tfilter,\n"
],
"file_path": "internal/config/identity/ldap/config.go",
"type": "replace",
"edit_start_line_idx": 503
} | // Copyright (c) 2015-2021 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
// Package crypto implements AWS S3 related cryptographic building blocks
// for implementing Server-Side-Encryption (SSE-S3) and Server-Side-Encryption
// with customer provided keys (SSE-C).
//
// All objects are encrypted with an unique and randomly generated 'ObjectKey'.
// The ObjectKey itself is never stored in plaintext. Instead it is only stored
// in a sealed from. The sealed 'ObjectKey' is created by encrypting the 'ObjectKey'
// with an unique key-encryption-key. Given the correct key-encryption-key the
// sealed 'ObjectKey' can be unsealed and the object can be decrypted.
//
//
// ## SSE-C
//
// SSE-C computes the key-encryption-key from the client-provided key, an
// initialization vector (IV) and the bucket/object path.
//
// 1. Encrypt:
// Input: ClientKey, bucket, object, metadata, object_data
// - IV := Random({0,1}²⁵⁶)
// - ObjectKey := SHA256(ClientKey || Random({0,1}²⁵⁶))
// - KeyEncKey := HMAC-SHA256(ClientKey, IV || 'SSE-C' || 'DAREv2-HMAC-SHA256' || bucket || '/' || object)
// - SealedKey := DAREv2_Enc(KeyEncKey, ObjectKey)
// - enc_object_data := DAREv2_Enc(ObjectKey, object_data)
// - metadata <- IV
// - metadata <- SealedKey
// Output: enc_object_data, metadata
//
// 2. Decrypt:
// Input: ClientKey, bucket, object, metadata, enc_object_data
// - IV <- metadata
// - SealedKey <- metadata
// - KeyEncKey := HMAC-SHA256(ClientKey, IV || 'SSE-C' || 'DAREv2-HMAC-SHA256' || bucket || '/' || object)
// - ObjectKey := DAREv2_Dec(KeyEncKey, SealedKey)
// - object_data := DAREv2_Dec(ObjectKey, enc_object_data)
// Output: object_data
//
//
// ## SSE-S3
//
// SSE-S3 can use either a master key or a KMS as root-of-trust.
// The en/decryption slightly depens upon which root-of-trust is used.
//
// ### SSE-S3 and single master key
//
// The master key is used to derive unique object- and key-encryption-keys.
// SSE-S3 with a single master key works as SSE-C where the master key is
// used as the client-provided key.
//
// 1. Encrypt:
// Input: MasterKey, bucket, object, metadata, object_data
// - IV := Random({0,1}²⁵⁶)
// - ObjectKey := SHA256(MasterKey || Random({0,1}²⁵⁶))
// - KeyEncKey := HMAC-SHA256(MasterKey, IV || 'SSE-S3' || 'DAREv2-HMAC-SHA256' || bucket || '/' || object)
// - SealedKey := DAREv2_Enc(KeyEncKey, ObjectKey)
// - enc_object_data := DAREv2_Enc(ObjectKey, object_data)
// - metadata <- IV
// - metadata <- SealedKey
// Output: enc_object_data, metadata
//
// 2. Decrypt:
// Input: MasterKey, bucket, object, metadata, enc_object_data
// - IV <- metadata
// - SealedKey <- metadata
// - KeyEncKey := HMAC-SHA256(MasterKey, IV || 'SSE-S3' || 'DAREv2-HMAC-SHA256' || bucket || '/' || object)
// - ObjectKey := DAREv2_Dec(KeyEncKey, SealedKey)
// - object_data := DAREv2_Dec(ObjectKey, enc_object_data)
// Output: object_data
//
//
// ### SSE-S3 and KMS
//
// SSE-S3 requires that the KMS provides two functions:
// 1. Generate(KeyID) -> (Key, EncKey)
// 2. Unseal(KeyID, EncKey) -> Key
//
// 1. Encrypt:
// Input: KeyID, bucket, object, metadata, object_data
// - Key, EncKey := Generate(KeyID)
// - IV := Random({0,1}²⁵⁶)
// - ObjectKey := SHA256(Key, Random({0,1}²⁵⁶))
// - KeyEncKey := HMAC-SHA256(Key, IV || 'SSE-S3' || 'DAREv2-HMAC-SHA256' || bucket || '/' || object)
// - SealedKey := DAREv2_Enc(KeyEncKey, ObjectKey)
// - enc_object_data := DAREv2_Enc(ObjectKey, object_data)
// - metadata <- IV
// - metadata <- KeyID
// - metadata <- EncKey
// - metadata <- SealedKey
// Output: enc_object_data, metadata
//
// 2. Decrypt:
// Input: bucket, object, metadata, enc_object_data
// - KeyID <- metadata
// - EncKey <- metadata
// - IV <- metadata
// - SealedKey <- metadata
// - Key := Unseal(KeyID, EncKey)
// - KeyEncKey := HMAC-SHA256(Key, IV || 'SSE-S3' || 'DAREv2-HMAC-SHA256' || bucket || '/' || object)
// - ObjectKey := DAREv2_Dec(KeyEncKey, SealedKey)
// - object_data := DAREv2_Dec(ObjectKey, enc_object_data)
// Output: object_data
//
package crypto
| internal/crypto/doc.go | 0 | https://github.com/minio/minio/commit/47dfc1b1b09a3ef6bc21d7f39636fbbe81e2c16f | [
0.00017860272782854736,
0.0001721780135994777,
0.00016615788626950234,
0.000172431580722332,
0.0000036786639157071477
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.