diff --git a/Godeps/Godeps.json b/Godeps/Godeps.json
index 4704592..52d3ee3 100644
--- a/Godeps/Godeps.json
+++ b/Godeps/Godeps.json
@@ -2,6 +2,11 @@
"ImportPath": "github.com/containerops/dockyard",
"GoVersion": "go1.4.2",
"Deps": [
+ {
+ "ImportPath": "github.com/Sirupsen/logrus",
+ "Comment": "v0.8.6-1-g8bca266",
+ "Rev": "8bca2664072173a3c71db4c28ca8d304079b1787"
+ },
{
"ImportPath": "github.com/Unknwon/com",
"Rev": "47d7d2b81a44157600669037e11e9ddfbf16745f"
@@ -27,20 +32,64 @@
},
{
"ImportPath": "github.com/containerops/wrench/db",
- "Rev": "4663f11c7fcc03a9c467f3f5fa640b31b06105f3"
+ "Rev": "ce091398c02a0711bd39fd0a8de2c1eb2fafc0c2"
},
{
"ImportPath": "github.com/containerops/wrench/setting",
- "Rev": "4663f11c7fcc03a9c467f3f5fa640b31b06105f3"
+ "Rev": "ce091398c02a0711bd39fd0a8de2c1eb2fafc0c2"
},
{
"ImportPath": "github.com/containerops/wrench/utils",
- "Rev": "4663f11c7fcc03a9c467f3f5fa640b31b06105f3"
+ "Rev": "ce091398c02a0711bd39fd0a8de2c1eb2fafc0c2"
+ },
+ {
+ "ImportPath": "github.com/docker/docker/pkg/archive",
+ "Comment": "v1.4.1-5132-gc19a00d",
+ "Rev": "c19a00d4cbc52adc7da76f2f5f70bb38190c2271"
+ },
+ {
+ "ImportPath": "github.com/docker/docker/pkg/fileutils",
+ "Comment": "v1.4.1-5132-gc19a00d",
+ "Rev": "c19a00d4cbc52adc7da76f2f5f70bb38190c2271"
+ },
+ {
+ "ImportPath": "github.com/docker/docker/pkg/ioutils",
+ "Comment": "v1.4.1-5132-gc19a00d",
+ "Rev": "c19a00d4cbc52adc7da76f2f5f70bb38190c2271"
+ },
+ {
+ "ImportPath": "github.com/docker/docker/pkg/pools",
+ "Comment": "v1.4.1-5132-gc19a00d",
+ "Rev": "c19a00d4cbc52adc7da76f2f5f70bb38190c2271"
+ },
+ {
+ "ImportPath": "github.com/docker/docker/pkg/promise",
+ "Comment": "v1.4.1-5132-gc19a00d",
+ "Rev": "c19a00d4cbc52adc7da76f2f5f70bb38190c2271"
+ },
+ {
+ "ImportPath": "github.com/docker/docker/pkg/stdcopy",
+ "Comment": "v1.4.1-5132-gc19a00d",
+ "Rev": "c19a00d4cbc52adc7da76f2f5f70bb38190c2271"
+ },
+ {
+ "ImportPath": "github.com/docker/docker/pkg/system",
+ "Comment": "v1.4.1-5132-gc19a00d",
+ "Rev": "c19a00d4cbc52adc7da76f2f5f70bb38190c2271"
+ },
+ {
+ "ImportPath": "github.com/docker/docker/pkg/units",
+ "Comment": "v1.4.1-5132-gc19a00d",
+ "Rev": "c19a00d4cbc52adc7da76f2f5f70bb38190c2271"
},
{
"ImportPath": "github.com/docker/libtrust",
"Rev": "9cbd2a1374f46905c68a4eb3694a130610adc62a"
},
+ {
+ "ImportPath": "github.com/satori/go.uuid",
+ "Rev": "6b8e5b55d20d01ad47ecfe98e5171688397c61e9"
+ },
{
"ImportPath": "gopkg.in/bsm/ratelimit.v1",
"Rev": "bda20d5067a03094fc6762f7ead53027afac5f28"
diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/.gitignore b/Godeps/_workspace/src/github.com/Sirupsen/logrus/.gitignore
new file mode 100644
index 0000000..66be63a
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/Sirupsen/logrus/.gitignore
@@ -0,0 +1 @@
+logrus
diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/.travis.yml b/Godeps/_workspace/src/github.com/Sirupsen/logrus/.travis.yml
new file mode 100644
index 0000000..2d8c086
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/Sirupsen/logrus/.travis.yml
@@ -0,0 +1,8 @@
+language: go
+go:
+ - 1.2
+ - 1.3
+ - 1.4
+ - tip
+install:
+ - go get -t ./...
diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/CHANGELOG.md b/Godeps/_workspace/src/github.com/Sirupsen/logrus/CHANGELOG.md
new file mode 100644
index 0000000..b1fe4b6
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/Sirupsen/logrus/CHANGELOG.md
@@ -0,0 +1,41 @@
+# 0.8.6
+
+* hooks/raven: allow passing an initialized client
+
+# 0.8.5
+
+* logrus/core: revert #208
+
+# 0.8.4
+
+* formatter/text: fix data race (#218)
+
+# 0.8.3
+
+* logrus/core: fix entry log level (#208)
+* logrus/core: improve performance of text formatter by 40%
+* logrus/core: expose `LevelHooks` type
+* logrus/core: add support for DragonflyBSD and NetBSD
+* formatter/text: print structs more verbosely
+
+# 0.8.2
+
+* logrus: fix more Fatal family functions
+
+# 0.8.1
+
+* logrus: fix not exiting on `Fatalf` and `Fatalln`
+
+# 0.8.0
+
+* logrus: defaults to stderr instead of stdout
+* hooks/sentry: add special field for `*http.Request`
+* formatter/text: ignore Windows for colors
+
+# 0.7.3
+
+* formatter/\*: allow configuration of timestamp layout
+
+# 0.7.2
+
+* formatter/text: Add configuration option for time format (#158)
diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/LICENSE b/Godeps/_workspace/src/github.com/Sirupsen/logrus/LICENSE
new file mode 100644
index 0000000..f090cb4
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/Sirupsen/logrus/LICENSE
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2014 Simon Eskildsen
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/README.md b/Godeps/_workspace/src/github.com/Sirupsen/logrus/README.md
new file mode 100644
index 0000000..bd9ffb6
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/Sirupsen/logrus/README.md
@@ -0,0 +1,356 @@
+# Logrus [![Build Status](https://travis-ci.org/Sirupsen/logrus.svg?branch=master)](https://travis-ci.org/Sirupsen/logrus) [![godoc reference](https://godoc.org/github.com/Sirupsen/logrus?status.png)][godoc]
+
+Logrus is a structured logger for Go (golang), completely API compatible with
+the standard library logger. [Godoc][godoc]. **Please note the Logrus API is not
+yet stable (pre 1.0). Logrus itself is completely stable and has been used in
+many large deployments. The core API is unlikely to change much but please
+version control your Logrus to make sure you aren't fetching latest `master` on
+every build.**
+
+Nicely color-coded in development (when a TTY is attached, otherwise just
+plain text):
+
+![Colored](http://i.imgur.com/PY7qMwd.png)
+
+With `log.Formatter = new(logrus.JSONFormatter)`, for easy parsing by logstash
+or Splunk:
+
+```json
+{"animal":"walrus","level":"info","msg":"A group of walrus emerges from the
+ocean","size":10,"time":"2014-03-10 19:57:38.562264131 -0400 EDT"}
+
+{"level":"warning","msg":"The group's number increased tremendously!",
+"number":122,"omg":true,"time":"2014-03-10 19:57:38.562471297 -0400 EDT"}
+
+{"animal":"walrus","level":"info","msg":"A giant walrus appears!",
+"size":10,"time":"2014-03-10 19:57:38.562500591 -0400 EDT"}
+
+{"animal":"walrus","level":"info","msg":"Tremendously sized cow enters the ocean.",
+"size":9,"time":"2014-03-10 19:57:38.562527896 -0400 EDT"}
+
+{"level":"fatal","msg":"The ice breaks!","number":100,"omg":true,
+"time":"2014-03-10 19:57:38.562543128 -0400 EDT"}
+```
+
+With the default `log.Formatter = new(&log.TextFormatter{})` when a TTY is not
+attached, the output is compatible with the
+[logfmt](http://godoc.org/github.com/kr/logfmt) format:
+
+```text
+time="2015-03-26T01:27:38-04:00" level=debug msg="Started observing beach" animal=walrus number=8
+time="2015-03-26T01:27:38-04:00" level=info msg="A group of walrus emerges from the ocean" animal=walrus size=10
+time="2015-03-26T01:27:38-04:00" level=warning msg="The group's number increased tremendously!" number=122 omg=true
+time="2015-03-26T01:27:38-04:00" level=debug msg="Temperature changes" temperature=-4
+time="2015-03-26T01:27:38-04:00" level=panic msg="It's over 9000!" animal=orca size=9009
+time="2015-03-26T01:27:38-04:00" level=fatal msg="The ice breaks!" err=&{0x2082280c0 map[animal:orca size:9009] 2015-03-26 01:27:38.441574009 -0400 EDT panic It's over 9000!} number=100 omg=true
+exit status 1
+```
+
+#### Example
+
+The simplest way to use Logrus is simply the package-level exported logger:
+
+```go
+package main
+
+import (
+ log "github.com/Sirupsen/logrus"
+)
+
+func main() {
+ log.WithFields(log.Fields{
+ "animal": "walrus",
+ }).Info("A walrus appears")
+}
+```
+
+Note that it's completely api-compatible with the stdlib logger, so you can
+replace your `log` imports everywhere with `log "github.com/Sirupsen/logrus"`
+and you'll now have the flexibility of Logrus. You can customize it all you
+want:
+
+```go
+package main
+
+import (
+ "os"
+ log "github.com/Sirupsen/logrus"
+ "github.com/Sirupsen/logrus/hooks/airbrake"
+)
+
+func init() {
+ // Log as JSON instead of the default ASCII formatter.
+ log.SetFormatter(&log.JSONFormatter{})
+
+ // Use the Airbrake hook to report errors that have Error severity or above to
+ // an exception tracker. You can create custom hooks, see the Hooks section.
+ log.AddHook(airbrake.NewHook("https://example.com", "xyz", "development"))
+
+ // Output to stderr instead of stdout, could also be a file.
+ log.SetOutput(os.Stderr)
+
+ // Only log the warning severity or above.
+ log.SetLevel(log.WarnLevel)
+}
+
+func main() {
+ log.WithFields(log.Fields{
+ "animal": "walrus",
+ "size": 10,
+ }).Info("A group of walrus emerges from the ocean")
+
+ log.WithFields(log.Fields{
+ "omg": true,
+ "number": 122,
+ }).Warn("The group's number increased tremendously!")
+
+ log.WithFields(log.Fields{
+ "omg": true,
+ "number": 100,
+ }).Fatal("The ice breaks!")
+
+ // A common pattern is to re-use fields between logging statements by re-using
+ // the logrus.Entry returned from WithFields()
+ contextLogger := log.WithFields(log.Fields{
+ "common": "this is a common field",
+ "other": "I also should be logged always",
+ })
+
+ contextLogger.Info("I'll be logged with common and other field")
+ contextLogger.Info("Me too")
+}
+```
+
+For more advanced usage such as logging to multiple locations from the same
+application, you can also create an instance of the `logrus` Logger:
+
+```go
+package main
+
+import (
+ "github.com/Sirupsen/logrus"
+)
+
+// Create a new instance of the logger. You can have any number of instances.
+var log = logrus.New()
+
+func main() {
+ // The API for setting attributes is a little different than the package level
+ // exported logger. See Godoc.
+ log.Out = os.Stderr
+
+ log.WithFields(logrus.Fields{
+ "animal": "walrus",
+ "size": 10,
+ }).Info("A group of walrus emerges from the ocean")
+}
+```
+
+#### Fields
+
+Logrus encourages careful, structured logging though logging fields instead of
+long, unparseable error messages. For example, instead of: `log.Fatalf("Failed
+to send event %s to topic %s with key %d")`, you should log the much more
+discoverable:
+
+```go
+log.WithFields(log.Fields{
+ "event": event,
+ "topic": topic,
+ "key": key,
+}).Fatal("Failed to send event")
+```
+
+We've found this API forces you to think about logging in a way that produces
+much more useful logging messages. We've been in countless situations where just
+a single added field to a log statement that was already there would've saved us
+hours. The `WithFields` call is optional.
+
+In general, with Logrus using any of the `printf`-family functions should be
+seen as a hint you should add a field, however, you can still use the
+`printf`-family functions with Logrus.
+
+#### Hooks
+
+You can add hooks for logging levels. For example to send errors to an exception
+tracking service on `Error`, `Fatal` and `Panic`, info to StatsD or log to
+multiple places simultaneously, e.g. syslog.
+
+Logrus comes with [built-in hooks](hooks/). Add those, or your custom hook, in
+`init`:
+
+```go
+import (
+ log "github.com/Sirupsen/logrus"
+ "github.com/Sirupsen/logrus/hooks/airbrake"
+ logrus_syslog "github.com/Sirupsen/logrus/hooks/syslog"
+ "log/syslog"
+)
+
+func init() {
+ log.AddHook(airbrake.NewHook("https://example.com", "xyz", "development"))
+
+ hook, err := logrus_syslog.NewSyslogHook("udp", "localhost:514", syslog.LOG_INFO, "")
+ if err != nil {
+ log.Error("Unable to connect to local syslog daemon")
+ } else {
+ log.AddHook(hook)
+ }
+}
+```
+
+
+| Hook | Description |
+| ----- | ----------- |
+| [Airbrake](https://github.com/Sirupsen/logrus/blob/master/hooks/airbrake/airbrake.go) | Send errors to an exception tracking service compatible with the Airbrake API. Uses [`airbrake-go`](https://github.com/tobi/airbrake-go) behind the scenes. |
+| [Papertrail](https://github.com/Sirupsen/logrus/blob/master/hooks/papertrail/papertrail.go) | Send errors to the Papertrail hosted logging service via UDP. |
+| [Syslog](https://github.com/Sirupsen/logrus/blob/master/hooks/syslog/syslog.go) | Send errors to remote syslog server. Uses standard library `log/syslog` behind the scenes. |
+| [BugSnag](https://github.com/Sirupsen/logrus/blob/master/hooks/bugsnag/bugsnag.go) | Send errors to the Bugsnag exception tracking service. |
+| [Sentry](https://github.com/Sirupsen/logrus/blob/master/hooks/sentry/sentry.go) | Send errors to the Sentry error logging and aggregation service. |
+| [Hiprus](https://github.com/nubo/hiprus) | Send errors to a channel in hipchat. |
+| [Logrusly](https://github.com/sebest/logrusly) | Send logs to [Loggly](https://www.loggly.com/) |
+| [Slackrus](https://github.com/johntdyer/slackrus) | Hook for Slack chat. |
+| [Journalhook](https://github.com/wercker/journalhook) | Hook for logging to `systemd-journald` |
+| [Graylog](https://github.com/gemnasium/logrus-hooks/tree/master/graylog) | Hook for logging to [Graylog](http://graylog2.org/) |
+| [Raygun](https://github.com/squirkle/logrus-raygun-hook) | Hook for logging to [Raygun.io](http://raygun.io/) |
+| [LFShook](https://github.com/rifflock/lfshook) | Hook for logging to the local filesystem |
+| [Honeybadger](https://github.com/agonzalezro/logrus_honeybadger) | Hook for sending exceptions to Honeybadger |
+| [Mail](https://github.com/zbindenren/logrus_mail) | Hook for sending exceptions via mail |
+| [Rollrus](https://github.com/heroku/rollrus) | Hook for sending errors to rollbar |
+| [Fluentd](https://github.com/evalphobia/logrus_fluent) | Hook for logging to fluentd |
+
+#### Level logging
+
+Logrus has six logging levels: Debug, Info, Warning, Error, Fatal and Panic.
+
+```go
+log.Debug("Useful debugging information.")
+log.Info("Something noteworthy happened!")
+log.Warn("You should probably take a look at this.")
+log.Error("Something failed but I'm not quitting.")
+// Calls os.Exit(1) after logging
+log.Fatal("Bye.")
+// Calls panic() after logging
+log.Panic("I'm bailing.")
+```
+
+You can set the logging level on a `Logger`, then it will only log entries with
+that severity or anything above it:
+
+```go
+// Will log anything that is info or above (warn, error, fatal, panic). Default.
+log.SetLevel(log.InfoLevel)
+```
+
+It may be useful to set `log.Level = logrus.DebugLevel` in a debug or verbose
+environment if your application has that.
+
+#### Entries
+
+Besides the fields added with `WithField` or `WithFields` some fields are
+automatically added to all logging events:
+
+1. `time`. The timestamp when the entry was created.
+2. `msg`. The logging message passed to `{Info,Warn,Error,Fatal,Panic}` after
+ the `AddFields` call. E.g. `Failed to send event.`
+3. `level`. The logging level. E.g. `info`.
+
+#### Environments
+
+Logrus has no notion of environment.
+
+If you wish for hooks and formatters to only be used in specific environments,
+you should handle that yourself. For example, if your application has a global
+variable `Environment`, which is a string representation of the environment you
+could do:
+
+```go
+import (
+ log "github.com/Sirupsen/logrus"
+)
+
+init() {
+ // do something here to set environment depending on an environment variable
+ // or command-line flag
+ if Environment == "production" {
+ log.SetFormatter(&logrus.JSONFormatter{})
+ } else {
+ // The TextFormatter is default, you don't actually have to do this.
+ log.SetFormatter(&log.TextFormatter{})
+ }
+}
+```
+
+This configuration is how `logrus` was intended to be used, but JSON in
+production is mostly only useful if you do log aggregation with tools like
+Splunk or Logstash.
+
+#### Formatters
+
+The built-in logging formatters are:
+
+* `logrus.TextFormatter`. Logs the event in colors if stdout is a tty, otherwise
+ without colors.
+ * *Note:* to force colored output when there is no TTY, set the `ForceColors`
+ field to `true`. To force no colored output even if there is a TTY set the
+ `DisableColors` field to `true`
+* `logrus.JSONFormatter`. Logs fields as JSON.
+* `logrus_logstash.LogstashFormatter`. Logs fields as Logstash Events (http://logstash.net).
+
+ ```go
+ logrus.SetFormatter(&logrus_logstash.LogstashFormatter{Type: “application_name"})
+ ```
+
+Third party logging formatters:
+
+* [`zalgo`](https://github.com/aybabtme/logzalgo): invoking the P͉̫o̳̼̊w̖͈̰͎e̬͔̭͂r͚̼̹̲ ̫͓͉̳͈ō̠͕͖̚f̝͍̠ ͕̲̞͖͑Z̖̫̤̫ͪa͉̬͈̗l͖͎g̳̥o̰̥̅!̣͔̲̻͊̄ ̙̘̦̹̦.
+
+You can define your formatter by implementing the `Formatter` interface,
+requiring a `Format` method. `Format` takes an `*Entry`. `entry.Data` is a
+`Fields` type (`map[string]interface{}`) with all your fields as well as the
+default ones (see Entries section above):
+
+```go
+type MyJSONFormatter struct {
+}
+
+log.SetFormatter(new(MyJSONFormatter))
+
+func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) {
+ // Note this doesn't include Time, Level and Message which are available on
+ // the Entry. Consult `godoc` on information about those fields or read the
+ // source of the official loggers.
+ serialized, err := json.Marshal(entry.Data)
+ if err != nil {
+ return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err)
+ }
+ return append(serialized, '\n'), nil
+}
+```
+
+#### Logger as an `io.Writer`
+
+Logrus can be transformed into an `io.Writer`. That writer is the end of an `io.Pipe` and it is your responsibility to close it.
+
+```go
+w := logger.Writer()
+defer w.Close()
+
+srv := http.Server{
+ // create a stdlib log.Logger that writes to
+ // logrus.Logger.
+ ErrorLog: log.New(w, "", 0),
+}
+```
+
+Each line written to that writer will be printed the usual way, using formatters
+and hooks. The level for those entries is `info`.
+
+#### Rotation
+
+Log rotation is not provided with Logrus. Log rotation should be done by an
+external program (like `logrotate(8)`) that can compress and delete old log
+entries. It should not be a feature of the application-level logger.
+
+
+[godoc]: https://godoc.org/github.com/Sirupsen/logrus
diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/entry.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/entry.go
new file mode 100644
index 0000000..699ea03
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/Sirupsen/logrus/entry.go
@@ -0,0 +1,254 @@
+package logrus
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "os"
+ "time"
+)
+
+// An entry is the final or intermediate Logrus logging entry. It contains all
+// the fields passed with WithField{,s}. It's finally logged when Debug, Info,
+// Warn, Error, Fatal or Panic is called on it. These objects can be reused and
+// passed around as much as you wish to avoid field duplication.
+type Entry struct {
+ Logger *Logger
+
+ // Contains all the fields set by the user.
+ Data Fields
+
+ // Time at which the log entry was created
+ Time time.Time
+
+ // Level the log entry was logged at: Debug, Info, Warn, Error, Fatal or Panic
+ Level Level
+
+ // Message passed to Debug, Info, Warn, Error, Fatal or Panic
+ Message string
+}
+
+func NewEntry(logger *Logger) *Entry {
+ return &Entry{
+ Logger: logger,
+ // Default is three fields, give a little extra room
+ Data: make(Fields, 5),
+ }
+}
+
+// Returns a reader for the entry, which is a proxy to the formatter.
+func (entry *Entry) Reader() (*bytes.Buffer, error) {
+ serialized, err := entry.Logger.Formatter.Format(entry)
+ return bytes.NewBuffer(serialized), err
+}
+
+// Returns the string representation from the reader and ultimately the
+// formatter.
+func (entry *Entry) String() (string, error) {
+ reader, err := entry.Reader()
+ if err != nil {
+ return "", err
+ }
+
+ return reader.String(), err
+}
+
+// Add a single field to the Entry.
+func (entry *Entry) WithField(key string, value interface{}) *Entry {
+ return entry.WithFields(Fields{key: value})
+}
+
+// Add a map of fields to the Entry.
+func (entry *Entry) WithFields(fields Fields) *Entry {
+ data := Fields{}
+ for k, v := range entry.Data {
+ data[k] = v
+ }
+ for k, v := range fields {
+ data[k] = v
+ }
+ return &Entry{Logger: entry.Logger, Data: data}
+}
+
+func (entry *Entry) log(level Level, msg string) {
+ entry.Time = time.Now()
+ entry.Level = level
+ entry.Message = msg
+
+ if err := entry.Logger.Hooks.Fire(level, entry); err != nil {
+ entry.Logger.mu.Lock()
+ fmt.Fprintf(os.Stderr, "Failed to fire hook: %v\n", err)
+ entry.Logger.mu.Unlock()
+ }
+
+ reader, err := entry.Reader()
+ if err != nil {
+ entry.Logger.mu.Lock()
+ fmt.Fprintf(os.Stderr, "Failed to obtain reader, %v\n", err)
+ entry.Logger.mu.Unlock()
+ }
+
+ entry.Logger.mu.Lock()
+ defer entry.Logger.mu.Unlock()
+
+ _, err = io.Copy(entry.Logger.Out, reader)
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "Failed to write to log, %v\n", err)
+ }
+
+ // To avoid Entry#log() returning a value that only would make sense for
+ // panic() to use in Entry#Panic(), we avoid the allocation by checking
+ // directly here.
+ if level <= PanicLevel {
+ panic(entry)
+ }
+}
+
+func (entry *Entry) Debug(args ...interface{}) {
+ if entry.Logger.Level >= DebugLevel {
+ entry.log(DebugLevel, fmt.Sprint(args...))
+ }
+}
+
+func (entry *Entry) Print(args ...interface{}) {
+ entry.Info(args...)
+}
+
+func (entry *Entry) Info(args ...interface{}) {
+ if entry.Logger.Level >= InfoLevel {
+ entry.log(InfoLevel, fmt.Sprint(args...))
+ }
+}
+
+func (entry *Entry) Warn(args ...interface{}) {
+ if entry.Logger.Level >= WarnLevel {
+ entry.log(WarnLevel, fmt.Sprint(args...))
+ }
+}
+
+func (entry *Entry) Warning(args ...interface{}) {
+ entry.Warn(args...)
+}
+
+func (entry *Entry) Error(args ...interface{}) {
+ if entry.Logger.Level >= ErrorLevel {
+ entry.log(ErrorLevel, fmt.Sprint(args...))
+ }
+}
+
+func (entry *Entry) Fatal(args ...interface{}) {
+ if entry.Logger.Level >= FatalLevel {
+ entry.log(FatalLevel, fmt.Sprint(args...))
+ }
+ os.Exit(1)
+}
+
+func (entry *Entry) Panic(args ...interface{}) {
+ if entry.Logger.Level >= PanicLevel {
+ entry.log(PanicLevel, fmt.Sprint(args...))
+ }
+ panic(fmt.Sprint(args...))
+}
+
+// Entry Printf family functions
+
+func (entry *Entry) Debugf(format string, args ...interface{}) {
+ if entry.Logger.Level >= DebugLevel {
+ entry.Debug(fmt.Sprintf(format, args...))
+ }
+}
+
+func (entry *Entry) Infof(format string, args ...interface{}) {
+ if entry.Logger.Level >= InfoLevel {
+ entry.Info(fmt.Sprintf(format, args...))
+ }
+}
+
+func (entry *Entry) Printf(format string, args ...interface{}) {
+ entry.Infof(format, args...)
+}
+
+func (entry *Entry) Warnf(format string, args ...interface{}) {
+ if entry.Logger.Level >= WarnLevel {
+ entry.Warn(fmt.Sprintf(format, args...))
+ }
+}
+
+func (entry *Entry) Warningf(format string, args ...interface{}) {
+ entry.Warnf(format, args...)
+}
+
+func (entry *Entry) Errorf(format string, args ...interface{}) {
+ if entry.Logger.Level >= ErrorLevel {
+ entry.Error(fmt.Sprintf(format, args...))
+ }
+}
+
+func (entry *Entry) Fatalf(format string, args ...interface{}) {
+ if entry.Logger.Level >= FatalLevel {
+ entry.Fatal(fmt.Sprintf(format, args...))
+ }
+ os.Exit(1)
+}
+
+func (entry *Entry) Panicf(format string, args ...interface{}) {
+ if entry.Logger.Level >= PanicLevel {
+ entry.Panic(fmt.Sprintf(format, args...))
+ }
+}
+
+// Entry Println family functions
+
+func (entry *Entry) Debugln(args ...interface{}) {
+ if entry.Logger.Level >= DebugLevel {
+ entry.Debug(entry.sprintlnn(args...))
+ }
+}
+
+func (entry *Entry) Infoln(args ...interface{}) {
+ if entry.Logger.Level >= InfoLevel {
+ entry.Info(entry.sprintlnn(args...))
+ }
+}
+
+func (entry *Entry) Println(args ...interface{}) {
+ entry.Infoln(args...)
+}
+
+func (entry *Entry) Warnln(args ...interface{}) {
+ if entry.Logger.Level >= WarnLevel {
+ entry.Warn(entry.sprintlnn(args...))
+ }
+}
+
+func (entry *Entry) Warningln(args ...interface{}) {
+ entry.Warnln(args...)
+}
+
+func (entry *Entry) Errorln(args ...interface{}) {
+ if entry.Logger.Level >= ErrorLevel {
+ entry.Error(entry.sprintlnn(args...))
+ }
+}
+
+func (entry *Entry) Fatalln(args ...interface{}) {
+ if entry.Logger.Level >= FatalLevel {
+ entry.Fatal(entry.sprintlnn(args...))
+ }
+ os.Exit(1)
+}
+
+func (entry *Entry) Panicln(args ...interface{}) {
+ if entry.Logger.Level >= PanicLevel {
+ entry.Panic(entry.sprintlnn(args...))
+ }
+}
+
+// Sprintlnn => Sprint no newline. This is to get the behavior of how
+// fmt.Sprintln where spaces are always added between operands, regardless of
+// their type. Instead of vendoring the Sprintln implementation to spare a
+// string allocation, we do the simplest thing.
+func (entry *Entry) sprintlnn(args ...interface{}) string {
+ msg := fmt.Sprintln(args...)
+ return msg[:len(msg)-1]
+}
diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/entry_test.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/entry_test.go
new file mode 100644
index 0000000..98717df
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/Sirupsen/logrus/entry_test.go
@@ -0,0 +1,53 @@
+package logrus
+
+import (
+ "bytes"
+ "fmt"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func TestEntryPanicln(t *testing.T) {
+ errBoom := fmt.Errorf("boom time")
+
+ defer func() {
+ p := recover()
+ assert.NotNil(t, p)
+
+ switch pVal := p.(type) {
+ case *Entry:
+ assert.Equal(t, "kaboom", pVal.Message)
+ assert.Equal(t, errBoom, pVal.Data["err"])
+ default:
+ t.Fatalf("want type *Entry, got %T: %#v", pVal, pVal)
+ }
+ }()
+
+ logger := New()
+ logger.Out = &bytes.Buffer{}
+ entry := NewEntry(logger)
+ entry.WithField("err", errBoom).Panicln("kaboom")
+}
+
+func TestEntryPanicf(t *testing.T) {
+ errBoom := fmt.Errorf("boom again")
+
+ defer func() {
+ p := recover()
+ assert.NotNil(t, p)
+
+ switch pVal := p.(type) {
+ case *Entry:
+ assert.Equal(t, "kaboom true", pVal.Message)
+ assert.Equal(t, errBoom, pVal.Data["err"])
+ default:
+ t.Fatalf("want type *Entry, got %T: %#v", pVal, pVal)
+ }
+ }()
+
+ logger := New()
+ logger.Out = &bytes.Buffer{}
+ entry := NewEntry(logger)
+ entry.WithField("err", errBoom).Panicf("kaboom %v", true)
+}
diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/examples/basic/basic.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/examples/basic/basic.go
new file mode 100644
index 0000000..a1623ec
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/Sirupsen/logrus/examples/basic/basic.go
@@ -0,0 +1,50 @@
+package main
+
+import (
+ "github.com/Sirupsen/logrus"
+)
+
+var log = logrus.New()
+
+func init() {
+ log.Formatter = new(logrus.JSONFormatter)
+ log.Formatter = new(logrus.TextFormatter) // default
+ log.Level = logrus.DebugLevel
+}
+
+func main() {
+ defer func() {
+ err := recover()
+ if err != nil {
+ log.WithFields(logrus.Fields{
+ "omg": true,
+ "err": err,
+ "number": 100,
+ }).Fatal("The ice breaks!")
+ }
+ }()
+
+ log.WithFields(logrus.Fields{
+ "animal": "walrus",
+ "number": 8,
+ }).Debug("Started observing beach")
+
+ log.WithFields(logrus.Fields{
+ "animal": "walrus",
+ "size": 10,
+ }).Info("A group of walrus emerges from the ocean")
+
+ log.WithFields(logrus.Fields{
+ "omg": true,
+ "number": 122,
+ }).Warn("The group's number increased tremendously!")
+
+ log.WithFields(logrus.Fields{
+ "temperature": -4,
+ }).Debug("Temperature changes")
+
+ log.WithFields(logrus.Fields{
+ "animal": "orca",
+ "size": 9009,
+ }).Panic("It's over 9000!")
+}
diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/examples/hook/hook.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/examples/hook/hook.go
new file mode 100644
index 0000000..cb5759a
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/Sirupsen/logrus/examples/hook/hook.go
@@ -0,0 +1,30 @@
+package main
+
+import (
+ "github.com/Sirupsen/logrus"
+ "github.com/Sirupsen/logrus/hooks/airbrake"
+)
+
+var log = logrus.New()
+
+func init() {
+ log.Formatter = new(logrus.TextFormatter) // default
+ log.Hooks.Add(airbrake.NewHook("https://example.com", "xyz", "development"))
+}
+
+func main() {
+ log.WithFields(logrus.Fields{
+ "animal": "walrus",
+ "size": 10,
+ }).Info("A group of walrus emerges from the ocean")
+
+ log.WithFields(logrus.Fields{
+ "omg": true,
+ "number": 122,
+ }).Warn("The group's number increased tremendously!")
+
+ log.WithFields(logrus.Fields{
+ "omg": true,
+ "number": 100,
+ }).Fatal("The ice breaks!")
+}
diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/exported.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/exported.go
new file mode 100644
index 0000000..a67e1b8
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/Sirupsen/logrus/exported.go
@@ -0,0 +1,188 @@
+package logrus
+
+import (
+ "io"
+)
+
+var (
+ // std is the name of the standard logger in stdlib `log`
+ std = New()
+)
+
+func StandardLogger() *Logger {
+ return std
+}
+
+// SetOutput sets the standard logger output.
+func SetOutput(out io.Writer) {
+ std.mu.Lock()
+ defer std.mu.Unlock()
+ std.Out = out
+}
+
+// SetFormatter sets the standard logger formatter.
+func SetFormatter(formatter Formatter) {
+ std.mu.Lock()
+ defer std.mu.Unlock()
+ std.Formatter = formatter
+}
+
+// SetLevel sets the standard logger level.
+func SetLevel(level Level) {
+ std.mu.Lock()
+ defer std.mu.Unlock()
+ std.Level = level
+}
+
+// GetLevel returns the standard logger level.
+func GetLevel() Level {
+ std.mu.Lock()
+ defer std.mu.Unlock()
+ return std.Level
+}
+
+// AddHook adds a hook to the standard logger hooks.
+func AddHook(hook Hook) {
+ std.mu.Lock()
+ defer std.mu.Unlock()
+ std.Hooks.Add(hook)
+}
+
+// WithField creates an entry from the standard logger and adds a field to
+// it. If you want multiple fields, use `WithFields`.
+//
+// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal
+// or Panic on the Entry it returns.
+func WithField(key string, value interface{}) *Entry {
+ return std.WithField(key, value)
+}
+
+// WithFields creates an entry from the standard logger and adds multiple
+// fields to it. This is simply a helper for `WithField`, invoking it
+// once for each field.
+//
+// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal
+// or Panic on the Entry it returns.
+func WithFields(fields Fields) *Entry {
+ return std.WithFields(fields)
+}
+
+// Debug logs a message at level Debug on the standard logger.
+func Debug(args ...interface{}) {
+ std.Debug(args...)
+}
+
+// Print logs a message at level Info on the standard logger.
+func Print(args ...interface{}) {
+ std.Print(args...)
+}
+
+// Info logs a message at level Info on the standard logger.
+func Info(args ...interface{}) {
+ std.Info(args...)
+}
+
+// Warn logs a message at level Warn on the standard logger.
+func Warn(args ...interface{}) {
+ std.Warn(args...)
+}
+
+// Warning logs a message at level Warn on the standard logger.
+func Warning(args ...interface{}) {
+ std.Warning(args...)
+}
+
+// Error logs a message at level Error on the standard logger.
+func Error(args ...interface{}) {
+ std.Error(args...)
+}
+
+// Panic logs a message at level Panic on the standard logger.
+func Panic(args ...interface{}) {
+ std.Panic(args...)
+}
+
+// Fatal logs a message at level Fatal on the standard logger.
+func Fatal(args ...interface{}) {
+ std.Fatal(args...)
+}
+
+// Debugf logs a message at level Debug on the standard logger.
+func Debugf(format string, args ...interface{}) {
+ std.Debugf(format, args...)
+}
+
+// Printf logs a message at level Info on the standard logger.
+func Printf(format string, args ...interface{}) {
+ std.Printf(format, args...)
+}
+
+// Infof logs a message at level Info on the standard logger.
+func Infof(format string, args ...interface{}) {
+ std.Infof(format, args...)
+}
+
+// Warnf logs a message at level Warn on the standard logger.
+func Warnf(format string, args ...interface{}) {
+ std.Warnf(format, args...)
+}
+
+// Warningf logs a message at level Warn on the standard logger.
+func Warningf(format string, args ...interface{}) {
+ std.Warningf(format, args...)
+}
+
+// Errorf logs a message at level Error on the standard logger.
+func Errorf(format string, args ...interface{}) {
+ std.Errorf(format, args...)
+}
+
+// Panicf logs a message at level Panic on the standard logger.
+func Panicf(format string, args ...interface{}) {
+ std.Panicf(format, args...)
+}
+
+// Fatalf logs a message at level Fatal on the standard logger.
+func Fatalf(format string, args ...interface{}) {
+ std.Fatalf(format, args...)
+}
+
+// Debugln logs a message at level Debug on the standard logger.
+func Debugln(args ...interface{}) {
+ std.Debugln(args...)
+}
+
+// Println logs a message at level Info on the standard logger.
+func Println(args ...interface{}) {
+ std.Println(args...)
+}
+
+// Infoln logs a message at level Info on the standard logger.
+func Infoln(args ...interface{}) {
+ std.Infoln(args...)
+}
+
+// Warnln logs a message at level Warn on the standard logger.
+func Warnln(args ...interface{}) {
+ std.Warnln(args...)
+}
+
+// Warningln logs a message at level Warn on the standard logger.
+func Warningln(args ...interface{}) {
+ std.Warningln(args...)
+}
+
+// Errorln logs a message at level Error on the standard logger.
+func Errorln(args ...interface{}) {
+ std.Errorln(args...)
+}
+
+// Panicln logs a message at level Panic on the standard logger.
+func Panicln(args ...interface{}) {
+ std.Panicln(args...)
+}
+
+// Fatalln logs a message at level Fatal on the standard logger.
+func Fatalln(args ...interface{}) {
+ std.Fatalln(args...)
+}
diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/formatter.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/formatter.go
new file mode 100644
index 0000000..104d689
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/Sirupsen/logrus/formatter.go
@@ -0,0 +1,48 @@
+package logrus
+
+import "time"
+
+const DefaultTimestampFormat = time.RFC3339
+
+// The Formatter interface is used to implement a custom Formatter. It takes an
+// `Entry`. It exposes all the fields, including the default ones:
+//
+// * `entry.Data["msg"]`. The message passed from Info, Warn, Error ..
+// * `entry.Data["time"]`. The timestamp.
+// * `entry.Data["level"]. The level the entry was logged at.
+//
+// Any additional fields added with `WithField` or `WithFields` are also in
+// `entry.Data`. Format is expected to return an array of bytes which are then
+// logged to `logger.Out`.
+type Formatter interface {
+ Format(*Entry) ([]byte, error)
+}
+
+// This is to not silently overwrite `time`, `msg` and `level` fields when
+// dumping it. If this code wasn't there doing:
+//
+// logrus.WithField("level", 1).Info("hello")
+//
+// Would just silently drop the user provided level. Instead with this code
+// it'll logged as:
+//
+// {"level": "info", "fields.level": 1, "msg": "hello", "time": "..."}
+//
+// It's not exported because it's still using Data in an opinionated way. It's to
+// avoid code duplication between the two default formatters.
+func prefixFieldClashes(data Fields) {
+ _, ok := data["time"]
+ if ok {
+ data["fields.time"] = data["time"]
+ }
+
+ _, ok = data["msg"]
+ if ok {
+ data["fields.msg"] = data["msg"]
+ }
+
+ _, ok = data["level"]
+ if ok {
+ data["fields.level"] = data["level"]
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/formatter_bench_test.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/formatter_bench_test.go
new file mode 100644
index 0000000..c6d290c
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/Sirupsen/logrus/formatter_bench_test.go
@@ -0,0 +1,98 @@
+package logrus
+
+import (
+ "fmt"
+ "testing"
+ "time"
+)
+
+// smallFields is a small size data set for benchmarking
+var smallFields = Fields{
+ "foo": "bar",
+ "baz": "qux",
+ "one": "two",
+ "three": "four",
+}
+
+// largeFields is a large size data set for benchmarking
+var largeFields = Fields{
+ "foo": "bar",
+ "baz": "qux",
+ "one": "two",
+ "three": "four",
+ "five": "six",
+ "seven": "eight",
+ "nine": "ten",
+ "eleven": "twelve",
+ "thirteen": "fourteen",
+ "fifteen": "sixteen",
+ "seventeen": "eighteen",
+ "nineteen": "twenty",
+ "a": "b",
+ "c": "d",
+ "e": "f",
+ "g": "h",
+ "i": "j",
+ "k": "l",
+ "m": "n",
+ "o": "p",
+ "q": "r",
+ "s": "t",
+ "u": "v",
+ "w": "x",
+ "y": "z",
+ "this": "will",
+ "make": "thirty",
+ "entries": "yeah",
+}
+
+var errorFields = Fields{
+ "foo": fmt.Errorf("bar"),
+ "baz": fmt.Errorf("qux"),
+}
+
+func BenchmarkErrorTextFormatter(b *testing.B) {
+ doBenchmark(b, &TextFormatter{DisableColors: true}, errorFields)
+}
+
+func BenchmarkSmallTextFormatter(b *testing.B) {
+ doBenchmark(b, &TextFormatter{DisableColors: true}, smallFields)
+}
+
+func BenchmarkLargeTextFormatter(b *testing.B) {
+ doBenchmark(b, &TextFormatter{DisableColors: true}, largeFields)
+}
+
+func BenchmarkSmallColoredTextFormatter(b *testing.B) {
+ doBenchmark(b, &TextFormatter{ForceColors: true}, smallFields)
+}
+
+func BenchmarkLargeColoredTextFormatter(b *testing.B) {
+ doBenchmark(b, &TextFormatter{ForceColors: true}, largeFields)
+}
+
+func BenchmarkSmallJSONFormatter(b *testing.B) {
+ doBenchmark(b, &JSONFormatter{}, smallFields)
+}
+
+func BenchmarkLargeJSONFormatter(b *testing.B) {
+ doBenchmark(b, &JSONFormatter{}, largeFields)
+}
+
+func doBenchmark(b *testing.B, formatter Formatter, fields Fields) {
+ entry := &Entry{
+ Time: time.Time{},
+ Level: InfoLevel,
+ Message: "message",
+ Data: fields,
+ }
+ var d []byte
+ var err error
+ for i := 0; i < b.N; i++ {
+ d, err = formatter.Format(entry)
+ if err != nil {
+ b.Fatal(err)
+ }
+ b.SetBytes(int64(len(d)))
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/formatters/logstash/logstash.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/formatters/logstash/logstash.go
new file mode 100644
index 0000000..8ea93dd
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/Sirupsen/logrus/formatters/logstash/logstash.go
@@ -0,0 +1,56 @@
+package logstash
+
+import (
+ "encoding/json"
+ "fmt"
+
+ "github.com/Sirupsen/logrus"
+)
+
+// Formatter generates json in logstash format.
+// Logstash site: http://logstash.net/
+type LogstashFormatter struct {
+ Type string // if not empty use for logstash type field.
+
+ // TimestampFormat sets the format used for timestamps.
+ TimestampFormat string
+}
+
+func (f *LogstashFormatter) Format(entry *logrus.Entry) ([]byte, error) {
+ entry.Data["@version"] = 1
+
+ if f.TimestampFormat == "" {
+ f.TimestampFormat = logrus.DefaultTimestampFormat
+ }
+
+ entry.Data["@timestamp"] = entry.Time.Format(f.TimestampFormat)
+
+ // set message field
+ v, ok := entry.Data["message"]
+ if ok {
+ entry.Data["fields.message"] = v
+ }
+ entry.Data["message"] = entry.Message
+
+ // set level field
+ v, ok = entry.Data["level"]
+ if ok {
+ entry.Data["fields.level"] = v
+ }
+ entry.Data["level"] = entry.Level.String()
+
+ // set type field
+ if f.Type != "" {
+ v, ok = entry.Data["type"]
+ if ok {
+ entry.Data["fields.type"] = v
+ }
+ entry.Data["type"] = f.Type
+ }
+
+ serialized, err := json.Marshal(entry.Data)
+ if err != nil {
+ return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err)
+ }
+ return append(serialized, '\n'), nil
+}
diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/formatters/logstash/logstash_test.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/formatters/logstash/logstash_test.go
new file mode 100644
index 0000000..d8814a0
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/Sirupsen/logrus/formatters/logstash/logstash_test.go
@@ -0,0 +1,52 @@
+package logstash
+
+import (
+ "bytes"
+ "encoding/json"
+ "github.com/Sirupsen/logrus"
+ "github.com/stretchr/testify/assert"
+ "testing"
+)
+
+func TestLogstashFormatter(t *testing.T) {
+ assert := assert.New(t)
+
+ lf := LogstashFormatter{Type: "abc"}
+
+ fields := logrus.Fields{
+ "message": "def",
+ "level": "ijk",
+ "type": "lmn",
+ "one": 1,
+ "pi": 3.14,
+ "bool": true,
+ }
+
+ entry := logrus.WithFields(fields)
+ entry.Message = "msg"
+ entry.Level = logrus.InfoLevel
+
+ b, _ := lf.Format(entry)
+
+ var data map[string]interface{}
+ dec := json.NewDecoder(bytes.NewReader(b))
+ dec.UseNumber()
+ dec.Decode(&data)
+
+ // base fields
+ assert.Equal(json.Number("1"), data["@version"])
+ assert.NotEmpty(data["@timestamp"])
+ assert.Equal("abc", data["type"])
+ assert.Equal("msg", data["message"])
+ assert.Equal("info", data["level"])
+
+ // substituted fields
+ assert.Equal("def", data["fields.message"])
+ assert.Equal("ijk", data["fields.level"])
+ assert.Equal("lmn", data["fields.type"])
+
+ // formats
+ assert.Equal(json.Number("1"), data["one"])
+ assert.Equal(json.Number("3.14"), data["pi"])
+ assert.Equal(true, data["bool"])
+}
diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/hook_test.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/hook_test.go
new file mode 100644
index 0000000..13f34cb
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/Sirupsen/logrus/hook_test.go
@@ -0,0 +1,122 @@
+package logrus
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+)
+
+type TestHook struct {
+ Fired bool
+}
+
+func (hook *TestHook) Fire(entry *Entry) error {
+ hook.Fired = true
+ return nil
+}
+
+func (hook *TestHook) Levels() []Level {
+ return []Level{
+ DebugLevel,
+ InfoLevel,
+ WarnLevel,
+ ErrorLevel,
+ FatalLevel,
+ PanicLevel,
+ }
+}
+
+func TestHookFires(t *testing.T) {
+ hook := new(TestHook)
+
+ LogAndAssertJSON(t, func(log *Logger) {
+ log.Hooks.Add(hook)
+ assert.Equal(t, hook.Fired, false)
+
+ log.Print("test")
+ }, func(fields Fields) {
+ assert.Equal(t, hook.Fired, true)
+ })
+}
+
+type ModifyHook struct {
+}
+
+func (hook *ModifyHook) Fire(entry *Entry) error {
+ entry.Data["wow"] = "whale"
+ return nil
+}
+
+func (hook *ModifyHook) Levels() []Level {
+ return []Level{
+ DebugLevel,
+ InfoLevel,
+ WarnLevel,
+ ErrorLevel,
+ FatalLevel,
+ PanicLevel,
+ }
+}
+
+func TestHookCanModifyEntry(t *testing.T) {
+ hook := new(ModifyHook)
+
+ LogAndAssertJSON(t, func(log *Logger) {
+ log.Hooks.Add(hook)
+ log.WithField("wow", "elephant").Print("test")
+ }, func(fields Fields) {
+ assert.Equal(t, fields["wow"], "whale")
+ })
+}
+
+func TestCanFireMultipleHooks(t *testing.T) {
+ hook1 := new(ModifyHook)
+ hook2 := new(TestHook)
+
+ LogAndAssertJSON(t, func(log *Logger) {
+ log.Hooks.Add(hook1)
+ log.Hooks.Add(hook2)
+
+ log.WithField("wow", "elephant").Print("test")
+ }, func(fields Fields) {
+ assert.Equal(t, fields["wow"], "whale")
+ assert.Equal(t, hook2.Fired, true)
+ })
+}
+
+type ErrorHook struct {
+ Fired bool
+}
+
+func (hook *ErrorHook) Fire(entry *Entry) error {
+ hook.Fired = true
+ return nil
+}
+
+func (hook *ErrorHook) Levels() []Level {
+ return []Level{
+ ErrorLevel,
+ }
+}
+
+func TestErrorHookShouldntFireOnInfo(t *testing.T) {
+ hook := new(ErrorHook)
+
+ LogAndAssertJSON(t, func(log *Logger) {
+ log.Hooks.Add(hook)
+ log.Info("test")
+ }, func(fields Fields) {
+ assert.Equal(t, hook.Fired, false)
+ })
+}
+
+func TestErrorHookShouldFireOnError(t *testing.T) {
+ hook := new(ErrorHook)
+
+ LogAndAssertJSON(t, func(log *Logger) {
+ log.Hooks.Add(hook)
+ log.Error("test")
+ }, func(fields Fields) {
+ assert.Equal(t, hook.Fired, true)
+ })
+}
diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks.go
new file mode 100644
index 0000000..3f151cd
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks.go
@@ -0,0 +1,34 @@
+package logrus
+
+// A hook to be fired when logging on the logging levels returned from
+// `Levels()` on your implementation of the interface. Note that this is not
+// fired in a goroutine or a channel with workers, you should handle such
+// functionality yourself if your call is non-blocking and you don't wish for
+// the logging calls for levels returned from `Levels()` to block.
+type Hook interface {
+ Levels() []Level
+ Fire(*Entry) error
+}
+
+// Internal type for storing the hooks on a logger instance.
+type LevelHooks map[Level][]Hook
+
+// Add a hook to an instance of logger. This is called with
+// `log.Hooks.Add(new(MyHook))` where `MyHook` implements the `Hook` interface.
+func (hooks LevelHooks) Add(hook Hook) {
+ for _, level := range hook.Levels() {
+ hooks[level] = append(hooks[level], hook)
+ }
+}
+
+// Fire all the hooks for the passed level. Used by `entry.log` to fire
+// appropriate hooks for a log entry.
+func (hooks LevelHooks) Fire(level Level, entry *Entry) error {
+ for _, hook := range hooks[level] {
+ if err := hook.Fire(entry); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/airbrake/airbrake.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/airbrake/airbrake.go
new file mode 100644
index 0000000..b0502c3
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/airbrake/airbrake.go
@@ -0,0 +1,54 @@
+package airbrake
+
+import (
+ "errors"
+ "fmt"
+
+ "github.com/Sirupsen/logrus"
+ "github.com/tobi/airbrake-go"
+)
+
+// AirbrakeHook to send exceptions to an exception-tracking service compatible
+// with the Airbrake API.
+type airbrakeHook struct {
+ APIKey string
+ Endpoint string
+ Environment string
+}
+
+func NewHook(endpoint, apiKey, env string) *airbrakeHook {
+ return &airbrakeHook{
+ APIKey: apiKey,
+ Endpoint: endpoint,
+ Environment: env,
+ }
+}
+
+func (hook *airbrakeHook) Fire(entry *logrus.Entry) error {
+ airbrake.ApiKey = hook.APIKey
+ airbrake.Endpoint = hook.Endpoint
+ airbrake.Environment = hook.Environment
+
+ var notifyErr error
+ err, ok := entry.Data["error"].(error)
+ if ok {
+ notifyErr = err
+ } else {
+ notifyErr = errors.New(entry.Message)
+ }
+
+ airErr := airbrake.Notify(notifyErr)
+ if airErr != nil {
+ return fmt.Errorf("Failed to send error to Airbrake: %s", airErr)
+ }
+
+ return nil
+}
+
+func (hook *airbrakeHook) Levels() []logrus.Level {
+ return []logrus.Level{
+ logrus.ErrorLevel,
+ logrus.FatalLevel,
+ logrus.PanicLevel,
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/airbrake/airbrake_test.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/airbrake/airbrake_test.go
new file mode 100644
index 0000000..058a91e
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/airbrake/airbrake_test.go
@@ -0,0 +1,133 @@
+package airbrake
+
+import (
+ "encoding/xml"
+ "net/http"
+ "net/http/httptest"
+ "testing"
+ "time"
+
+ "github.com/Sirupsen/logrus"
+)
+
+type notice struct {
+ Error NoticeError `xml:"error"`
+}
+type NoticeError struct {
+ Class string `xml:"class"`
+ Message string `xml:"message"`
+}
+
+type customErr struct {
+ msg string
+}
+
+func (e *customErr) Error() string {
+ return e.msg
+}
+
+const (
+ testAPIKey = "abcxyz"
+ testEnv = "development"
+ expectedClass = "*airbrake.customErr"
+ expectedMsg = "foo"
+ unintendedMsg = "Airbrake will not see this string"
+)
+
+var (
+ noticeError = make(chan NoticeError, 1)
+)
+
+// TestLogEntryMessageReceived checks if invoking Logrus' log.Error
+// method causes an XML payload containing the log entry message is received
+// by a HTTP server emulating an Airbrake-compatible endpoint.
+func TestLogEntryMessageReceived(t *testing.T) {
+ log := logrus.New()
+ ts := startAirbrakeServer(t)
+ defer ts.Close()
+
+ hook := NewHook(ts.URL, testAPIKey, "production")
+ log.Hooks.Add(hook)
+
+ log.Error(expectedMsg)
+
+ select {
+ case received := <-noticeError:
+ if received.Message != expectedMsg {
+ t.Errorf("Unexpected message received: %s", received.Message)
+ }
+ case <-time.After(time.Second):
+ t.Error("Timed out; no notice received by Airbrake API")
+ }
+}
+
+// TestLogEntryMessageReceived confirms that, when passing an error type using
+// logrus.Fields, a HTTP server emulating an Airbrake endpoint receives the
+// error message returned by the Error() method on the error interface
+// rather than the logrus.Entry.Message string.
+func TestLogEntryWithErrorReceived(t *testing.T) {
+ log := logrus.New()
+ ts := startAirbrakeServer(t)
+ defer ts.Close()
+
+ hook := NewHook(ts.URL, testAPIKey, "production")
+ log.Hooks.Add(hook)
+
+ log.WithFields(logrus.Fields{
+ "error": &customErr{expectedMsg},
+ }).Error(unintendedMsg)
+
+ select {
+ case received := <-noticeError:
+ if received.Message != expectedMsg {
+ t.Errorf("Unexpected message received: %s", received.Message)
+ }
+ if received.Class != expectedClass {
+ t.Errorf("Unexpected error class: %s", received.Class)
+ }
+ case <-time.After(time.Second):
+ t.Error("Timed out; no notice received by Airbrake API")
+ }
+}
+
+// TestLogEntryWithNonErrorTypeNotReceived confirms that, when passing a
+// non-error type using logrus.Fields, a HTTP server emulating an Airbrake
+// endpoint receives the logrus.Entry.Message string.
+//
+// Only error types are supported when setting the 'error' field using
+// logrus.WithFields().
+func TestLogEntryWithNonErrorTypeNotReceived(t *testing.T) {
+ log := logrus.New()
+ ts := startAirbrakeServer(t)
+ defer ts.Close()
+
+ hook := NewHook(ts.URL, testAPIKey, "production")
+ log.Hooks.Add(hook)
+
+ log.WithFields(logrus.Fields{
+ "error": expectedMsg,
+ }).Error(unintendedMsg)
+
+ select {
+ case received := <-noticeError:
+ if received.Message != unintendedMsg {
+ t.Errorf("Unexpected message received: %s", received.Message)
+ }
+ case <-time.After(time.Second):
+ t.Error("Timed out; no notice received by Airbrake API")
+ }
+}
+
+func startAirbrakeServer(t *testing.T) *httptest.Server {
+ ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ var notice notice
+ if err := xml.NewDecoder(r.Body).Decode(¬ice); err != nil {
+ t.Error(err)
+ }
+ r.Body.Close()
+
+ noticeError <- notice.Error
+ }))
+
+ return ts
+}
diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/bugsnag/bugsnag.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/bugsnag/bugsnag.go
new file mode 100644
index 0000000..d20a0f5
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/bugsnag/bugsnag.go
@@ -0,0 +1,68 @@
+package logrus_bugsnag
+
+import (
+ "errors"
+
+ "github.com/Sirupsen/logrus"
+ "github.com/bugsnag/bugsnag-go"
+)
+
+type bugsnagHook struct{}
+
+// ErrBugsnagUnconfigured is returned if NewBugsnagHook is called before
+// bugsnag.Configure. Bugsnag must be configured before the hook.
+var ErrBugsnagUnconfigured = errors.New("bugsnag must be configured before installing this logrus hook")
+
+// ErrBugsnagSendFailed indicates that the hook failed to submit an error to
+// bugsnag. The error was successfully generated, but `bugsnag.Notify()`
+// failed.
+type ErrBugsnagSendFailed struct {
+ err error
+}
+
+func (e ErrBugsnagSendFailed) Error() string {
+ return "failed to send error to Bugsnag: " + e.err.Error()
+}
+
+// NewBugsnagHook initializes a logrus hook which sends exceptions to an
+// exception-tracking service compatible with the Bugsnag API. Before using
+// this hook, you must call bugsnag.Configure(). The returned object should be
+// registered with a log via `AddHook()`
+//
+// Entries that trigger an Error, Fatal or Panic should now include an "error"
+// field to send to Bugsnag.
+func NewBugsnagHook() (*bugsnagHook, error) {
+ if bugsnag.Config.APIKey == "" {
+ return nil, ErrBugsnagUnconfigured
+ }
+ return &bugsnagHook{}, nil
+}
+
+// Fire forwards an error to Bugsnag. Given a logrus.Entry, it extracts the
+// "error" field (or the Message if the error isn't present) and sends it off.
+func (hook *bugsnagHook) Fire(entry *logrus.Entry) error {
+ var notifyErr error
+ err, ok := entry.Data["error"].(error)
+ if ok {
+ notifyErr = err
+ } else {
+ notifyErr = errors.New(entry.Message)
+ }
+
+ bugsnagErr := bugsnag.Notify(notifyErr)
+ if bugsnagErr != nil {
+ return ErrBugsnagSendFailed{bugsnagErr}
+ }
+
+ return nil
+}
+
+// Levels enumerates the log levels on which the error should be forwarded to
+// bugsnag: everything at or above the "Error" level.
+func (hook *bugsnagHook) Levels() []logrus.Level {
+ return []logrus.Level{
+ logrus.ErrorLevel,
+ logrus.FatalLevel,
+ logrus.PanicLevel,
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/bugsnag/bugsnag_test.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/bugsnag/bugsnag_test.go
new file mode 100644
index 0000000..e9ea298
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/bugsnag/bugsnag_test.go
@@ -0,0 +1,64 @@
+package logrus_bugsnag
+
+import (
+ "encoding/json"
+ "errors"
+ "io/ioutil"
+ "net/http"
+ "net/http/httptest"
+ "testing"
+ "time"
+
+ "github.com/Sirupsen/logrus"
+ "github.com/bugsnag/bugsnag-go"
+)
+
+type notice struct {
+ Events []struct {
+ Exceptions []struct {
+ Message string `json:"message"`
+ } `json:"exceptions"`
+ } `json:"events"`
+}
+
+func TestNoticeReceived(t *testing.T) {
+ msg := make(chan string, 1)
+ expectedMsg := "foo"
+
+ ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ var notice notice
+ data, _ := ioutil.ReadAll(r.Body)
+ if err := json.Unmarshal(data, ¬ice); err != nil {
+ t.Error(err)
+ }
+ _ = r.Body.Close()
+
+ msg <- notice.Events[0].Exceptions[0].Message
+ }))
+ defer ts.Close()
+
+ hook := &bugsnagHook{}
+
+ bugsnag.Configure(bugsnag.Configuration{
+ Endpoint: ts.URL,
+ ReleaseStage: "production",
+ APIKey: "12345678901234567890123456789012",
+ Synchronous: true,
+ })
+
+ log := logrus.New()
+ log.Hooks.Add(hook)
+
+ log.WithFields(logrus.Fields{
+ "error": errors.New(expectedMsg),
+ }).Error("Bugsnag will not see this string")
+
+ select {
+ case received := <-msg:
+ if received != expectedMsg {
+ t.Errorf("Unexpected message received: %s", received)
+ }
+ case <-time.After(time.Second):
+ t.Error("Timed out; no notice received by Bugsnag API")
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/papertrail/README.md b/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/papertrail/README.md
new file mode 100644
index 0000000..ae61e92
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/papertrail/README.md
@@ -0,0 +1,28 @@
+# Papertrail Hook for Logrus
+
+[Papertrail](https://papertrailapp.com) provides hosted log management. Once stored in Papertrail, you can [group](http://help.papertrailapp.com/kb/how-it-works/groups/) your logs on various dimensions, [search](http://help.papertrailapp.com/kb/how-it-works/search-syntax) them, and trigger [alerts](http://help.papertrailapp.com/kb/how-it-works/alerts).
+
+In most deployments, you'll want to send logs to Papertrail via their [remote_syslog](http://help.papertrailapp.com/kb/configuration/configuring-centralized-logging-from-text-log-files-in-unix/) daemon, which requires no application-specific configuration. This hook is intended for relatively low-volume logging, likely in managed cloud hosting deployments where installing `remote_syslog` is not possible.
+
+## Usage
+
+You can find your Papertrail UDP port on your [Papertrail account page](https://papertrailapp.com/account/destinations). Substitute it below for `YOUR_PAPERTRAIL_UDP_PORT`.
+
+For `YOUR_APP_NAME`, substitute a short string that will readily identify your application or service in the logs.
+
+```go
+import (
+ "log/syslog"
+ "github.com/Sirupsen/logrus"
+ "github.com/Sirupsen/logrus/hooks/papertrail"
+)
+
+func main() {
+ log := logrus.New()
+ hook, err := logrus_papertrail.NewPapertrailHook("logs.papertrailapp.com", YOUR_PAPERTRAIL_UDP_PORT, YOUR_APP_NAME)
+
+ if err == nil {
+ log.Hooks.Add(hook)
+ }
+}
+```
diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/papertrail/papertrail.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/papertrail/papertrail.go
new file mode 100644
index 0000000..c0f10c1
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/papertrail/papertrail.go
@@ -0,0 +1,55 @@
+package logrus_papertrail
+
+import (
+ "fmt"
+ "net"
+ "os"
+ "time"
+
+ "github.com/Sirupsen/logrus"
+)
+
+const (
+ format = "Jan 2 15:04:05"
+)
+
+// PapertrailHook to send logs to a logging service compatible with the Papertrail API.
+type PapertrailHook struct {
+ Host string
+ Port int
+ AppName string
+ UDPConn net.Conn
+}
+
+// NewPapertrailHook creates a hook to be added to an instance of logger.
+func NewPapertrailHook(host string, port int, appName string) (*PapertrailHook, error) {
+ conn, err := net.Dial("udp", fmt.Sprintf("%s:%d", host, port))
+ return &PapertrailHook{host, port, appName, conn}, err
+}
+
+// Fire is called when a log event is fired.
+func (hook *PapertrailHook) Fire(entry *logrus.Entry) error {
+ date := time.Now().Format(format)
+ msg, _ := entry.String()
+ payload := fmt.Sprintf("<22> %s %s: %s", date, hook.AppName, msg)
+
+ bytesWritten, err := hook.UDPConn.Write([]byte(payload))
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "Unable to send log line to Papertrail via UDP. Wrote %d bytes before error: %v", bytesWritten, err)
+ return err
+ }
+
+ return nil
+}
+
+// Levels returns the available logging levels.
+func (hook *PapertrailHook) Levels() []logrus.Level {
+ return []logrus.Level{
+ logrus.PanicLevel,
+ logrus.FatalLevel,
+ logrus.ErrorLevel,
+ logrus.WarnLevel,
+ logrus.InfoLevel,
+ logrus.DebugLevel,
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/papertrail/papertrail_test.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/papertrail/papertrail_test.go
new file mode 100644
index 0000000..96318d0
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/papertrail/papertrail_test.go
@@ -0,0 +1,26 @@
+package logrus_papertrail
+
+import (
+ "fmt"
+ "testing"
+
+ "github.com/Sirupsen/logrus"
+ "github.com/stvp/go-udp-testing"
+)
+
+func TestWritingToUDP(t *testing.T) {
+ port := 16661
+ udp.SetAddr(fmt.Sprintf(":%d", port))
+
+ hook, err := NewPapertrailHook("localhost", port, "test")
+ if err != nil {
+ t.Errorf("Unable to connect to local UDP server.")
+ }
+
+ log := logrus.New()
+ log.Hooks.Add(hook)
+
+ udp.ShouldReceive(t, "foo", func() {
+ log.Info("foo")
+ })
+}
diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/sentry/README.md b/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/sentry/README.md
new file mode 100644
index 0000000..31de654
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/sentry/README.md
@@ -0,0 +1,111 @@
+# Sentry Hook for Logrus
+
+[Sentry](https://getsentry.com) provides both self-hosted and hosted
+solutions for exception tracking.
+Both client and server are
+[open source](https://github.com/getsentry/sentry).
+
+## Usage
+
+Every sentry application defined on the server gets a different
+[DSN](https://www.getsentry.com/docs/). In the example below replace
+`YOUR_DSN` with the one created for your application.
+
+```go
+import (
+ "github.com/Sirupsen/logrus"
+ "github.com/Sirupsen/logrus/hooks/sentry"
+)
+
+func main() {
+ log := logrus.New()
+ hook, err := logrus_sentry.NewSentryHook(YOUR_DSN, []logrus.Level{
+ logrus.PanicLevel,
+ logrus.FatalLevel,
+ logrus.ErrorLevel,
+ })
+
+ if err == nil {
+ log.Hooks.Add(hook)
+ }
+}
+```
+
+If you wish to initialize a SentryHook with tags, you can use the `NewWithTagsSentryHook` constructor to provide default tags:
+
+```go
+tags := map[string]string{
+ "site": "example.com",
+}
+levels := []logrus.Level{
+ logrus.PanicLevel,
+ logrus.FatalLevel,
+ logrus.ErrorLevel,
+}
+hook, err := logrus_sentry.NewWithTagsSentryHook(YOUR_DSN, tags, levels)
+
+```
+
+If you wish to initialize a SentryHook with an already initialized raven client, you can use
+the `NewWithClientSentryHook` constructor:
+
+```go
+import (
+ "github.com/Sirupsen/logrus"
+ "github.com/Sirupsen/logrus/hooks/sentry"
+ "github.com/getsentry/raven-go"
+)
+
+func main() {
+ log := logrus.New()
+
+ client, err := raven.New(YOUR_DSN)
+ if err != nil {
+ log.Fatal(err)
+ }
+
+ hook, err := logrus_sentry.NewWithClientSentryHook(client, []logrus.Level{
+ logrus.PanicLevel,
+ logrus.FatalLevel,
+ logrus.ErrorLevel,
+ })
+
+ if err == nil {
+ log.Hooks.Add(hook)
+ }
+}
+
+hook, err := NewWithClientSentryHook(client, []logrus.Level{
+ logrus.ErrorLevel,
+})
+```
+
+## Special fields
+
+Some logrus fields have a special meaning in this hook,
+these are `server_name`, `logger` and `http_request`.
+When logs are sent to sentry these fields are treated differently.
+- `server_name` (also known as hostname) is the name of the server which
+is logging the event (hostname.example.com)
+- `logger` is the part of the application which is logging the event.
+In go this usually means setting it to the name of the package.
+- `http_request` is the in-coming request(*http.Request). The detailed request data are sent to Sentry.
+
+## Timeout
+
+`Timeout` is the time the sentry hook will wait for a response
+from the sentry server.
+
+If this time elapses with no response from
+the server an error will be returned.
+
+If `Timeout` is set to 0 the SentryHook will not wait for a reply
+and will assume a correct delivery.
+
+The SentryHook has a default timeout of `100 milliseconds` when created
+with a call to `NewSentryHook`. This can be changed by assigning a value to the `Timeout` field:
+
+```go
+hook, _ := logrus_sentry.NewSentryHook(...)
+hook.Timeout = 20*time.Second
+```
diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/sentry/sentry.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/sentry/sentry.go
new file mode 100644
index 0000000..cf88098
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/sentry/sentry.go
@@ -0,0 +1,137 @@
+package logrus_sentry
+
+import (
+ "fmt"
+ "net/http"
+ "time"
+
+ "github.com/Sirupsen/logrus"
+ "github.com/getsentry/raven-go"
+)
+
+var (
+ severityMap = map[logrus.Level]raven.Severity{
+ logrus.DebugLevel: raven.DEBUG,
+ logrus.InfoLevel: raven.INFO,
+ logrus.WarnLevel: raven.WARNING,
+ logrus.ErrorLevel: raven.ERROR,
+ logrus.FatalLevel: raven.FATAL,
+ logrus.PanicLevel: raven.FATAL,
+ }
+)
+
+func getAndDel(d logrus.Fields, key string) (string, bool) {
+ var (
+ ok bool
+ v interface{}
+ val string
+ )
+ if v, ok = d[key]; !ok {
+ return "", false
+ }
+
+ if val, ok = v.(string); !ok {
+ return "", false
+ }
+ delete(d, key)
+ return val, true
+}
+
+func getAndDelRequest(d logrus.Fields, key string) (*http.Request, bool) {
+ var (
+ ok bool
+ v interface{}
+ req *http.Request
+ )
+ if v, ok = d[key]; !ok {
+ return nil, false
+ }
+ if req, ok = v.(*http.Request); !ok || req == nil {
+ return nil, false
+ }
+ delete(d, key)
+ return req, true
+}
+
+// SentryHook delivers logs to a sentry server.
+type SentryHook struct {
+ // Timeout sets the time to wait for a delivery error from the sentry server.
+ // If this is set to zero the server will not wait for any response and will
+ // consider the message correctly sent
+ Timeout time.Duration
+
+ client *raven.Client
+ levels []logrus.Level
+}
+
+// NewSentryHook creates a hook to be added to an instance of logger
+// and initializes the raven client.
+// This method sets the timeout to 100 milliseconds.
+func NewSentryHook(DSN string, levels []logrus.Level) (*SentryHook, error) {
+ client, err := raven.New(DSN)
+ if err != nil {
+ return nil, err
+ }
+ return &SentryHook{100 * time.Millisecond, client, levels}, nil
+}
+
+// NewWithTagsSentryHook creates a hook with tags to be added to an instance
+// of logger and initializes the raven client. This method sets the timeout to
+// 100 milliseconds.
+func NewWithTagsSentryHook(DSN string, tags map[string]string, levels []logrus.Level) (*SentryHook, error) {
+ client, err := raven.NewWithTags(DSN, tags)
+ if err != nil {
+ return nil, err
+ }
+ return &SentryHook{100 * time.Millisecond, client, levels}, nil
+}
+
+// NewWithClientSentryHook creates a hook using an initialized raven client.
+// This method sets the timeout to 100 milliseconds.
+func NewWithClientSentryHook(client *raven.Client, levels []logrus.Level) (*SentryHook, error) {
+ return &SentryHook{100 * time.Millisecond, client, levels}, nil
+}
+
+// Called when an event should be sent to sentry
+// Special fields that sentry uses to give more information to the server
+// are extracted from entry.Data (if they are found)
+// These fields are: logger, server_name and http_request
+func (hook *SentryHook) Fire(entry *logrus.Entry) error {
+ packet := &raven.Packet{
+ Message: entry.Message,
+ Timestamp: raven.Timestamp(entry.Time),
+ Level: severityMap[entry.Level],
+ Platform: "go",
+ }
+
+ d := entry.Data
+
+ if logger, ok := getAndDel(d, "logger"); ok {
+ packet.Logger = logger
+ }
+ if serverName, ok := getAndDel(d, "server_name"); ok {
+ packet.ServerName = serverName
+ }
+ if req, ok := getAndDelRequest(d, "http_request"); ok {
+ packet.Interfaces = append(packet.Interfaces, raven.NewHttp(req))
+ }
+ packet.Extra = map[string]interface{}(d)
+
+ _, errCh := hook.client.Capture(packet, nil)
+ timeout := hook.Timeout
+ if timeout != 0 {
+ timeoutCh := time.After(timeout)
+ select {
+ case err := <-errCh:
+ return err
+ case <-timeoutCh:
+ return fmt.Errorf("no response from sentry server in %s", timeout)
+ }
+ }
+ return nil
+}
+
+// Levels returns the available logging levels.
+func (hook *SentryHook) Levels() []logrus.Level {
+ return hook.levels
+}
diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/sentry/sentry_test.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/sentry/sentry_test.go
new file mode 100644
index 0000000..4a97bc6
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/sentry/sentry_test.go
@@ -0,0 +1,154 @@
+package logrus_sentry
+
+import (
+ "encoding/json"
+ "fmt"
+ "io/ioutil"
+ "net/http"
+ "net/http/httptest"
+ "reflect"
+ "strings"
+ "testing"
+
+ "github.com/Sirupsen/logrus"
+ "github.com/getsentry/raven-go"
+)
+
+const (
+ message = "error message"
+ server_name = "testserver.internal"
+ logger_name = "test.logger"
+)
+
+func getTestLogger() *logrus.Logger {
+ l := logrus.New()
+ l.Out = ioutil.Discard
+ return l
+}
+
+func WithTestDSN(t *testing.T, tf func(string, <-chan *raven.Packet)) {
+ pch := make(chan *raven.Packet, 1)
+ s := httptest.NewServer(http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) {
+ defer req.Body.Close()
+ d := json.NewDecoder(req.Body)
+ p := &raven.Packet{}
+ err := d.Decode(p)
+ if err != nil {
+ t.Fatal(err.Error())
+ }
+
+ pch <- p
+ }))
+ defer s.Close()
+
+ fragments := strings.SplitN(s.URL, "://", 2)
+ dsn := fmt.Sprintf(
+ "%s://public:secret@%s/sentry/project-id",
+ fragments[0],
+ fragments[1],
+ )
+ tf(dsn, pch)
+}
+
+func TestSpecialFields(t *testing.T) {
+ WithTestDSN(t, func(dsn string, pch <-chan *raven.Packet) {
+ logger := getTestLogger()
+
+ hook, err := NewSentryHook(dsn, []logrus.Level{
+ logrus.ErrorLevel,
+ })
+
+ if err != nil {
+ t.Fatal(err.Error())
+ }
+ logger.Hooks.Add(hook)
+
+ req, _ := http.NewRequest("GET", "url", nil)
+ logger.WithFields(logrus.Fields{
+ "server_name": server_name,
+ "logger": logger_name,
+ "http_request": req,
+ }).Error(message)
+
+ packet := <-pch
+ if packet.Logger != logger_name {
+ t.Errorf("logger should have been %s, was %s", logger_name, packet.Logger)
+ }
+
+ if packet.ServerName != server_name {
+ t.Errorf("server_name should have been %s, was %s", server_name, packet.ServerName)
+ }
+ })
+}
+
+func TestSentryHandler(t *testing.T) {
+ WithTestDSN(t, func(dsn string, pch <-chan *raven.Packet) {
+ logger := getTestLogger()
+ hook, err := NewSentryHook(dsn, []logrus.Level{
+ logrus.ErrorLevel,
+ })
+ if err != nil {
+ t.Fatal(err.Error())
+ }
+ logger.Hooks.Add(hook)
+
+ logger.Error(message)
+ packet := <-pch
+ if packet.Message != message {
+ t.Errorf("message should have been %s, was %s", message, packet.Message)
+ }
+ })
+}
+
+func TestSentryWithClient(t *testing.T) {
+ WithTestDSN(t, func(dsn string, pch <-chan *raven.Packet) {
+ logger := getTestLogger()
+
+ client, _ := raven.New(dsn)
+
+ hook, err := NewWithClientSentryHook(client, []logrus.Level{
+ logrus.ErrorLevel,
+ })
+ if err != nil {
+ t.Fatal(err.Error())
+ }
+ logger.Hooks.Add(hook)
+
+ logger.Error(message)
+ packet := <-pch
+ if packet.Message != message {
+ t.Errorf("message should have been %s, was %s", message, packet.Message)
+ }
+ })
+}
+
+func TestSentryTags(t *testing.T) {
+ WithTestDSN(t, func(dsn string, pch <-chan *raven.Packet) {
+ logger := getTestLogger()
+ tags := map[string]string{
+ "site": "test",
+ }
+ levels := []logrus.Level{
+ logrus.ErrorLevel,
+ }
+
+ hook, err := NewWithTagsSentryHook(dsn, tags, levels)
+ if err != nil {
+ t.Fatal(err.Error())
+ }
+
+ logger.Hooks.Add(hook)
+
+ logger.Error(message)
+ packet := <-pch
+ expected := raven.Tags{
+ raven.Tag{
+ Key: "site",
+ Value: "test",
+ },
+ }
+ if !reflect.DeepEqual(packet.Tags, expected) {
+ t.Errorf("message should have been %s, was %s", message, packet.Message)
+ }
+ })
+}
diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/syslog/README.md b/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/syslog/README.md
new file mode 100644
index 0000000..4dbb8e7
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/syslog/README.md
@@ -0,0 +1,20 @@
+# Syslog Hooks for Logrus
+
+## Usage
+
+```go
+import (
+ "log/syslog"
+ "github.com/Sirupsen/logrus"
+ logrus_syslog "github.com/Sirupsen/logrus/hooks/syslog"
+)
+
+func main() {
+ log := logrus.New()
+ hook, err := logrus_syslog.NewSyslogHook("udp", "localhost:514", syslog.LOG_INFO, "")
+
+ if err == nil {
+ log.Hooks.Add(hook)
+ }
+}
+```
diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/syslog/syslog.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/syslog/syslog.go
new file mode 100644
index 0000000..b6fa374
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/syslog/syslog.go
@@ -0,0 +1,59 @@
+package logrus_syslog
+
+import (
+ "fmt"
+ "github.com/Sirupsen/logrus"
+ "log/syslog"
+ "os"
+)
+
+// SyslogHook to send logs via syslog.
+type SyslogHook struct {
+ Writer *syslog.Writer
+ SyslogNetwork string
+ SyslogRaddr string
+}
+
+// Creates a hook to be added to an instance of logger. This is called with
+// `hook, err := NewSyslogHook("udp", "localhost:514", syslog.LOG_DEBUG, "")`
+// `if err == nil { log.Hooks.Add(hook) }`
+func NewSyslogHook(network, raddr string, priority syslog.Priority, tag string) (*SyslogHook, error) {
+ w, err := syslog.Dial(network, raddr, priority, tag)
+ return &SyslogHook{w, network, raddr}, err
+}
+
+func (hook *SyslogHook) Fire(entry *logrus.Entry) error {
+ line, err := entry.String()
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "Unable to read entry, %v", err)
+ return err
+ }
+
+ switch entry.Level {
+ case logrus.PanicLevel:
+ return hook.Writer.Crit(line)
+ case logrus.FatalLevel:
+ return hook.Writer.Crit(line)
+ case logrus.ErrorLevel:
+ return hook.Writer.Err(line)
+ case logrus.WarnLevel:
+ return hook.Writer.Warning(line)
+ case logrus.InfoLevel:
+ return hook.Writer.Info(line)
+ case logrus.DebugLevel:
+ return hook.Writer.Debug(line)
+ default:
+ return nil
+ }
+}
+
+func (hook *SyslogHook) Levels() []logrus.Level {
+ return []logrus.Level{
+ logrus.PanicLevel,
+ logrus.FatalLevel,
+ logrus.ErrorLevel,
+ logrus.WarnLevel,
+ logrus.InfoLevel,
+ logrus.DebugLevel,
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/syslog/syslog_test.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/syslog/syslog_test.go
new file mode 100644
index 0000000..42762dc
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/syslog/syslog_test.go
@@ -0,0 +1,26 @@
+package logrus_syslog
+
+import (
+ "github.com/Sirupsen/logrus"
+ "log/syslog"
+ "testing"
+)
+
+func TestLocalhostAddAndPrint(t *testing.T) {
+ log := logrus.New()
+ hook, err := NewSyslogHook("udp", "localhost:514", syslog.LOG_INFO, "")
+
+ if err != nil {
+ t.Errorf("Unable to connect to local syslog.")
+ }
+
+ log.Hooks.Add(hook)
+
+ for _, level := range hook.Levels() {
+ if len(log.Hooks[level]) != 1 {
+ t.Errorf("SyslogHook was not added. The length of log.Hooks[%v]: %v", level, len(log.Hooks[level]))
+ }
+ }
+
+ log.Info("Congratulations!")
+}
diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/json_formatter.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/json_formatter.go
new file mode 100644
index 0000000..2ad6dc5
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/Sirupsen/logrus/json_formatter.go
@@ -0,0 +1,41 @@
+package logrus
+
+import (
+ "encoding/json"
+ "fmt"
+)
+
+type JSONFormatter struct {
+ // TimestampFormat sets the format used for marshaling timestamps.
+ TimestampFormat string
+}
+
+func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) {
+ data := make(Fields, len(entry.Data)+3)
+ for k, v := range entry.Data {
+ switch v := v.(type) {
+ case error:
+ // Otherwise errors are ignored by `encoding/json`
+ // https://github.com/Sirupsen/logrus/issues/137
+ data[k] = v.Error()
+ default:
+ data[k] = v
+ }
+ }
+ prefixFieldClashes(data)
+
+ timestampFormat := f.TimestampFormat
+ if timestampFormat == "" {
+ timestampFormat = DefaultTimestampFormat
+ }
+
+ data["time"] = entry.Time.Format(timestampFormat)
+ data["msg"] = entry.Message
+ data["level"] = entry.Level.String()
+
+ serialized, err := json.Marshal(data)
+ if err != nil {
+ return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err)
+ }
+ return append(serialized, '\n'), nil
+}
diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/json_formatter_test.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/json_formatter_test.go
new file mode 100644
index 0000000..1d70873
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/Sirupsen/logrus/json_formatter_test.go
@@ -0,0 +1,120 @@
+package logrus
+
+import (
+ "encoding/json"
+ "errors"
+
+ "testing"
+)
+
+func TestErrorNotLost(t *testing.T) {
+ formatter := &JSONFormatter{}
+
+ b, err := formatter.Format(WithField("error", errors.New("wild walrus")))
+ if err != nil {
+ t.Fatal("Unable to format entry: ", err)
+ }
+
+ entry := make(map[string]interface{})
+ err = json.Unmarshal(b, &entry)
+ if err != nil {
+ t.Fatal("Unable to unmarshal formatted entry: ", err)
+ }
+
+ if entry["error"] != "wild walrus" {
+ t.Fatal("Error field not set")
+ }
+}
+
+func TestErrorNotLostOnFieldNotNamedError(t *testing.T) {
+ formatter := &JSONFormatter{}
+
+ b, err := formatter.Format(WithField("omg", errors.New("wild walrus")))
+ if err != nil {
+ t.Fatal("Unable to format entry: ", err)
+ }
+
+ entry := make(map[string]interface{})
+ err = json.Unmarshal(b, &entry)
+ if err != nil {
+ t.Fatal("Unable to unmarshal formatted entry: ", err)
+ }
+
+ if entry["omg"] != "wild walrus" {
+ t.Fatal("Error field not set")
+ }
+}
+
+func TestFieldClashWithTime(t *testing.T) {
+ formatter := &JSONFormatter{}
+
+ b, err := formatter.Format(WithField("time", "right now!"))
+ if err != nil {
+ t.Fatal("Unable to format entry: ", err)
+ }
+
+ entry := make(map[string]interface{})
+ err = json.Unmarshal(b, &entry)
+ if err != nil {
+ t.Fatal("Unable to unmarshal formatted entry: ", err)
+ }
+
+ if entry["fields.time"] != "right now!" {
+ t.Fatal("fields.time not set to original time field")
+ }
+
+ if entry["time"] != "0001-01-01T00:00:00Z" {
+ t.Fatal("time field not set to current time, was: ", entry["time"])
+ }
+}
+
+func TestFieldClashWithMsg(t *testing.T) {
+ formatter := &JSONFormatter{}
+
+ b, err := formatter.Format(WithField("msg", "something"))
+ if err != nil {
+ t.Fatal("Unable to format entry: ", err)
+ }
+
+ entry := make(map[string]interface{})
+ err = json.Unmarshal(b, &entry)
+ if err != nil {
+ t.Fatal("Unable to unmarshal formatted entry: ", err)
+ }
+
+ if entry["fields.msg"] != "something" {
+ t.Fatal("fields.msg not set to original msg field")
+ }
+}
+
+func TestFieldClashWithLevel(t *testing.T) {
+ formatter := &JSONFormatter{}
+
+ b, err := formatter.Format(WithField("level", "something"))
+ if err != nil {
+ t.Fatal("Unable to format entry: ", err)
+ }
+
+ entry := make(map[string]interface{})
+ err = json.Unmarshal(b, &entry)
+ if err != nil {
+ t.Fatal("Unable to unmarshal formatted entry: ", err)
+ }
+
+ if entry["fields.level"] != "something" {
+ t.Fatal("fields.level not set to original level field")
+ }
+}
+
+func TestJSONEntryEndsWithNewline(t *testing.T) {
+ formatter := &JSONFormatter{}
+
+ b, err := formatter.Format(WithField("level", "something"))
+ if err != nil {
+ t.Fatal("Unable to format entry: ", err)
+ }
+
+ if b[len(b)-1] != '\n' {
+ t.Fatal("Expected JSON log entry to end with a newline")
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/logger.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/logger.go
new file mode 100644
index 0000000..e4974bf
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/Sirupsen/logrus/logger.go
@@ -0,0 +1,206 @@
+package logrus
+
+import (
+ "io"
+ "os"
+ "sync"
+)
+
+type Logger struct {
+ // The logs are `io.Copy`'d to this in a mutex. It's common to set this to a
+ // file, or leave it default which is `os.Stdout`. You can also set this to
+ // something more adventorous, such as logging to Kafka.
+ Out io.Writer
+ // Hooks for the logger instance. These allow firing events based on logging
+ // levels and log entries. For example, to send errors to an error tracking
+ // service, log to StatsD or dump the core on fatal errors.
+ Hooks LevelHooks
+ // All log entries pass through the formatter before logged to Out. The
+ // included formatters are `TextFormatter` and `JSONFormatter` for which
+ // TextFormatter is the default. In development (when a TTY is attached) it
+ // logs with colors, but to a file it wouldn't. You can easily implement your
+ // own that implements the `Formatter` interface, see the `README` or included
+ // formatters for examples.
+ Formatter Formatter
+ // The logging level the logger should log at. This is typically (and defaults
+ // to) `logrus.Info`, which allows Info(), Warn(), Error() and Fatal() to be
+ // logged. `logrus.Debug` is useful in
+ Level Level
+ // Used to sync writing to the log.
+ mu sync.Mutex
+}
+
+// Creates a new logger. Configuration should be set by changing `Formatter`,
+// `Out` and `Hooks` directly on the default logger instance. You can also just
+// instantiate your own:
+//
+// var log = &Logger{
+// Out: os.Stderr,
+// Formatter: new(JSONFormatter),
+// Hooks: make(LevelHooks),
+// Level: logrus.DebugLevel,
+// }
+//
+// It's recommended to make this a global instance called `log`.
+func New() *Logger {
+ return &Logger{
+ Out: os.Stderr,
+ Formatter: new(TextFormatter),
+ Hooks: make(LevelHooks),
+ Level: InfoLevel,
+ }
+}
+
+// Adds a field to the log entry, note that you it doesn't log until you call
+// Debug, Print, Info, Warn, Fatal or Panic. It only creates a log entry.
+// Ff you want multiple fields, use `WithFields`.
+func (logger *Logger) WithField(key string, value interface{}) *Entry {
+ return NewEntry(logger).WithField(key, value)
+}
+
+// Adds a struct of fields to the log entry. All it does is call `WithField` for
+// each `Field`.
+func (logger *Logger) WithFields(fields Fields) *Entry {
+ return NewEntry(logger).WithFields(fields)
+}
+
+func (logger *Logger) Debugf(format string, args ...interface{}) {
+ if logger.Level >= DebugLevel {
+ NewEntry(logger).Debugf(format, args...)
+ }
+}
+
+func (logger *Logger) Infof(format string, args ...interface{}) {
+ if logger.Level >= InfoLevel {
+ NewEntry(logger).Infof(format, args...)
+ }
+}
+
+func (logger *Logger) Printf(format string, args ...interface{}) {
+ NewEntry(logger).Printf(format, args...)
+}
+
+func (logger *Logger) Warnf(format string, args ...interface{}) {
+ if logger.Level >= WarnLevel {
+ NewEntry(logger).Warnf(format, args...)
+ }
+}
+
+func (logger *Logger) Warningf(format string, args ...interface{}) {
+ if logger.Level >= WarnLevel {
+ NewEntry(logger).Warnf(format, args...)
+ }
+}
+
+func (logger *Logger) Errorf(format string, args ...interface{}) {
+ if logger.Level >= ErrorLevel {
+ NewEntry(logger).Errorf(format, args...)
+ }
+}
+
+func (logger *Logger) Fatalf(format string, args ...interface{}) {
+ if logger.Level >= FatalLevel {
+ NewEntry(logger).Fatalf(format, args...)
+ }
+ os.Exit(1)
+}
+
+func (logger *Logger) Panicf(format string, args ...interface{}) {
+ if logger.Level >= PanicLevel {
+ NewEntry(logger).Panicf(format, args...)
+ }
+}
+
+func (logger *Logger) Debug(args ...interface{}) {
+ if logger.Level >= DebugLevel {
+ NewEntry(logger).Debug(args...)
+ }
+}
+
+func (logger *Logger) Info(args ...interface{}) {
+ if logger.Level >= InfoLevel {
+ NewEntry(logger).Info(args...)
+ }
+}
+
+func (logger *Logger) Print(args ...interface{}) {
+ NewEntry(logger).Info(args...)
+}
+
+func (logger *Logger) Warn(args ...interface{}) {
+ if logger.Level >= WarnLevel {
+ NewEntry(logger).Warn(args...)
+ }
+}
+
+func (logger *Logger) Warning(args ...interface{}) {
+ if logger.Level >= WarnLevel {
+ NewEntry(logger).Warn(args...)
+ }
+}
+
+func (logger *Logger) Error(args ...interface{}) {
+ if logger.Level >= ErrorLevel {
+ NewEntry(logger).Error(args...)
+ }
+}
+
+func (logger *Logger) Fatal(args ...interface{}) {
+ if logger.Level >= FatalLevel {
+ NewEntry(logger).Fatal(args...)
+ }
+ os.Exit(1)
+}
+
+func (logger *Logger) Panic(args ...interface{}) {
+ if logger.Level >= PanicLevel {
+ NewEntry(logger).Panic(args...)
+ }
+}
+
+func (logger *Logger) Debugln(args ...interface{}) {
+ if logger.Level >= DebugLevel {
+ NewEntry(logger).Debugln(args...)
+ }
+}
+
+func (logger *Logger) Infoln(args ...interface{}) {
+ if logger.Level >= InfoLevel {
+ NewEntry(logger).Infoln(args...)
+ }
+}
+
+func (logger *Logger) Println(args ...interface{}) {
+ NewEntry(logger).Println(args...)
+}
+
+func (logger *Logger) Warnln(args ...interface{}) {
+ if logger.Level >= WarnLevel {
+ NewEntry(logger).Warnln(args...)
+ }
+}
+
+func (logger *Logger) Warningln(args ...interface{}) {
+ if logger.Level >= WarnLevel {
+ NewEntry(logger).Warnln(args...)
+ }
+}
+
+func (logger *Logger) Errorln(args ...interface{}) {
+ if logger.Level >= ErrorLevel {
+ NewEntry(logger).Errorln(args...)
+ }
+}
+
+func (logger *Logger) Fatalln(args ...interface{}) {
+ if logger.Level >= FatalLevel {
+ NewEntry(logger).Fatalln(args...)
+ }
+ os.Exit(1)
+}
+
+func (logger *Logger) Panicln(args ...interface{}) {
+ if logger.Level >= PanicLevel {
+ NewEntry(logger).Panicln(args...)
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/logrus.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/logrus.go
new file mode 100644
index 0000000..43ee12e
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/Sirupsen/logrus/logrus.go
@@ -0,0 +1,94 @@
+package logrus
+
+import (
+ "fmt"
+ "log"
+)
+
+// Fields type, used to pass to `WithFields`.
+type Fields map[string]interface{}
+
+// Level type
+type Level uint8
+
+// Convert the Level to a string. E.g. PanicLevel becomes "panic".
+func (level Level) String() string {
+ switch level {
+ case DebugLevel:
+ return "debug"
+ case InfoLevel:
+ return "info"
+ case WarnLevel:
+ return "warning"
+ case ErrorLevel:
+ return "error"
+ case FatalLevel:
+ return "fatal"
+ case PanicLevel:
+ return "panic"
+ }
+
+ return "unknown"
+}
+
+// ParseLevel takes a string level and returns the Logrus log level constant.
+func ParseLevel(lvl string) (Level, error) {
+ switch lvl {
+ case "panic":
+ return PanicLevel, nil
+ case "fatal":
+ return FatalLevel, nil
+ case "error":
+ return ErrorLevel, nil
+ case "warn", "warning":
+ return WarnLevel, nil
+ case "info":
+ return InfoLevel, nil
+ case "debug":
+ return DebugLevel, nil
+ }
+
+ var l Level
+ return l, fmt.Errorf("not a valid logrus Level: %q", lvl)
+}
+
+// These are the different logging levels. You can set the logging level to log
+// on your instance of logger, obtained with `logrus.New()`.
+const (
+ // PanicLevel level, highest level of severity. Logs and then calls panic with the
+ // message passed to Debug, Info, ...
+ PanicLevel Level = iota
+ // FatalLevel level. Logs and then calls `os.Exit(1)`. It will exit even if the
+ // logging level is set to Panic.
+ FatalLevel
+ // ErrorLevel level. Logs. Used for errors that should definitely be noted.
+ // Commonly used for hooks to send errors to an error tracking service.
+ ErrorLevel
+ // WarnLevel level. Non-critical entries that deserve eyes.
+ WarnLevel
+ // InfoLevel level. General operational entries about what's going on inside the
+ // application.
+ InfoLevel
+ // DebugLevel level. Usually only enabled when debugging. Very verbose logging.
+ DebugLevel
+)
+
+// Won't compile if StdLogger can't be realized by a log.Logger
+var _ StdLogger = &log.Logger{}
+
+// StdLogger is what your logrus-enabled library should take, that way
+// it'll accept a stdlib logger and a logrus logger. There's no standard
+// interface, this is the closest we get, unfortunately.
+type StdLogger interface {
+ Print(...interface{})
+ Printf(string, ...interface{})
+ Println(...interface{})
+
+ Fatal(...interface{})
+ Fatalf(string, ...interface{})
+ Fatalln(...interface{})
+
+ Panic(...interface{})
+ Panicf(string, ...interface{})
+ Panicln(...interface{})
+}
diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/logrus_test.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/logrus_test.go
new file mode 100644
index 0000000..efaacea
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/Sirupsen/logrus/logrus_test.go
@@ -0,0 +1,301 @@
+package logrus
+
+import (
+ "bytes"
+ "encoding/json"
+ "strconv"
+ "strings"
+ "sync"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func LogAndAssertJSON(t *testing.T, log func(*Logger), assertions func(fields Fields)) {
+ var buffer bytes.Buffer
+ var fields Fields
+
+ logger := New()
+ logger.Out = &buffer
+ logger.Formatter = new(JSONFormatter)
+
+ log(logger)
+
+ err := json.Unmarshal(buffer.Bytes(), &fields)
+ assert.Nil(t, err)
+
+ assertions(fields)
+}
+
+func LogAndAssertText(t *testing.T, log func(*Logger), assertions func(fields map[string]string)) {
+ var buffer bytes.Buffer
+
+ logger := New()
+ logger.Out = &buffer
+ logger.Formatter = &TextFormatter{
+ DisableColors: true,
+ }
+
+ log(logger)
+
+ fields := make(map[string]string)
+ for _, kv := range strings.Split(buffer.String(), " ") {
+ if !strings.Contains(kv, "=") {
+ continue
+ }
+ kvArr := strings.Split(kv, "=")
+ key := strings.TrimSpace(kvArr[0])
+ val := kvArr[1]
+ if kvArr[1][0] == '"' {
+ var err error
+ val, err = strconv.Unquote(val)
+ assert.NoError(t, err)
+ }
+ fields[key] = val
+ }
+ assertions(fields)
+}
+
+func TestPrint(t *testing.T) {
+ LogAndAssertJSON(t, func(log *Logger) {
+ log.Print("test")
+ }, func(fields Fields) {
+ assert.Equal(t, fields["msg"], "test")
+ assert.Equal(t, fields["level"], "info")
+ })
+}
+
+func TestInfo(t *testing.T) {
+ LogAndAssertJSON(t, func(log *Logger) {
+ log.Info("test")
+ }, func(fields Fields) {
+ assert.Equal(t, fields["msg"], "test")
+ assert.Equal(t, fields["level"], "info")
+ })
+}
+
+func TestWarn(t *testing.T) {
+ LogAndAssertJSON(t, func(log *Logger) {
+ log.Warn("test")
+ }, func(fields Fields) {
+ assert.Equal(t, fields["msg"], "test")
+ assert.Equal(t, fields["level"], "warning")
+ })
+}
+
+func TestInfolnShouldAddSpacesBetweenStrings(t *testing.T) {
+ LogAndAssertJSON(t, func(log *Logger) {
+ log.Infoln("test", "test")
+ }, func(fields Fields) {
+ assert.Equal(t, fields["msg"], "test test")
+ })
+}
+
+func TestInfolnShouldAddSpacesBetweenStringAndNonstring(t *testing.T) {
+ LogAndAssertJSON(t, func(log *Logger) {
+ log.Infoln("test", 10)
+ }, func(fields Fields) {
+ assert.Equal(t, fields["msg"], "test 10")
+ })
+}
+
+func TestInfolnShouldAddSpacesBetweenTwoNonStrings(t *testing.T) {
+ LogAndAssertJSON(t, func(log *Logger) {
+ log.Infoln(10, 10)
+ }, func(fields Fields) {
+ assert.Equal(t, fields["msg"], "10 10")
+ })
+}
+
+func TestInfoShouldAddSpacesBetweenTwoNonStrings(t *testing.T) {
+ LogAndAssertJSON(t, func(log *Logger) {
+ log.Infoln(10, 10)
+ }, func(fields Fields) {
+ assert.Equal(t, fields["msg"], "10 10")
+ })
+}
+
+func TestInfoShouldNotAddSpacesBetweenStringAndNonstring(t *testing.T) {
+ LogAndAssertJSON(t, func(log *Logger) {
+ log.Info("test", 10)
+ }, func(fields Fields) {
+ assert.Equal(t, fields["msg"], "test10")
+ })
+}
+
+func TestInfoShouldNotAddSpacesBetweenStrings(t *testing.T) {
+ LogAndAssertJSON(t, func(log *Logger) {
+ log.Info("test", "test")
+ }, func(fields Fields) {
+ assert.Equal(t, fields["msg"], "testtest")
+ })
+}
+
+func TestWithFieldsShouldAllowAssignments(t *testing.T) {
+ var buffer bytes.Buffer
+ var fields Fields
+
+ logger := New()
+ logger.Out = &buffer
+ logger.Formatter = new(JSONFormatter)
+
+ localLog := logger.WithFields(Fields{
+ "key1": "value1",
+ })
+
+ localLog.WithField("key2", "value2").Info("test")
+ err := json.Unmarshal(buffer.Bytes(), &fields)
+ assert.Nil(t, err)
+
+ assert.Equal(t, "value2", fields["key2"])
+ assert.Equal(t, "value1", fields["key1"])
+
+ buffer = bytes.Buffer{}
+ fields = Fields{}
+ localLog.Info("test")
+ err = json.Unmarshal(buffer.Bytes(), &fields)
+ assert.Nil(t, err)
+
+ _, ok := fields["key2"]
+ assert.Equal(t, false, ok)
+ assert.Equal(t, "value1", fields["key1"])
+}
+
+func TestUserSuppliedFieldDoesNotOverwriteDefaults(t *testing.T) {
+ LogAndAssertJSON(t, func(log *Logger) {
+ log.WithField("msg", "hello").Info("test")
+ }, func(fields Fields) {
+ assert.Equal(t, fields["msg"], "test")
+ })
+}
+
+func TestUserSuppliedMsgFieldHasPrefix(t *testing.T) {
+ LogAndAssertJSON(t, func(log *Logger) {
+ log.WithField("msg", "hello").Info("test")
+ }, func(fields Fields) {
+ assert.Equal(t, fields["msg"], "test")
+ assert.Equal(t, fields["fields.msg"], "hello")
+ })
+}
+
+func TestUserSuppliedTimeFieldHasPrefix(t *testing.T) {
+ LogAndAssertJSON(t, func(log *Logger) {
+ log.WithField("time", "hello").Info("test")
+ }, func(fields Fields) {
+ assert.Equal(t, fields["fields.time"], "hello")
+ })
+}
+
+func TestUserSuppliedLevelFieldHasPrefix(t *testing.T) {
+ LogAndAssertJSON(t, func(log *Logger) {
+ log.WithField("level", 1).Info("test")
+ }, func(fields Fields) {
+ assert.Equal(t, fields["level"], "info")
+ assert.Equal(t, fields["fields.level"], 1.0) // JSON has floats only
+ })
+}
+
+func TestDefaultFieldsAreNotPrefixed(t *testing.T) {
+ LogAndAssertText(t, func(log *Logger) {
+ ll := log.WithField("herp", "derp")
+ ll.Info("hello")
+ ll.Info("bye")
+ }, func(fields map[string]string) {
+ for _, fieldName := range []string{"fields.level", "fields.time", "fields.msg"} {
+ if _, ok := fields[fieldName]; ok {
+ t.Fatalf("should not have prefixed %q: %v", fieldName, fields)
+ }
+ }
+ })
+}
+
+func TestDoubleLoggingDoesntPrefixPreviousFields(t *testing.T) {
+
+ var buffer bytes.Buffer
+ var fields Fields
+
+ logger := New()
+ logger.Out = &buffer
+ logger.Formatter = new(JSONFormatter)
+
+ llog := logger.WithField("context", "eating raw fish")
+
+ llog.Info("looks delicious")
+
+ err := json.Unmarshal(buffer.Bytes(), &fields)
+ assert.NoError(t, err, "should have decoded first message")
+ assert.Equal(t, len(fields), 4, "should only have msg/time/level/context fields")
+ assert.Equal(t, fields["msg"], "looks delicious")
+ assert.Equal(t, fields["context"], "eating raw fish")
+
+ buffer.Reset()
+
+ llog.Warn("omg it is!")
+
+ err = json.Unmarshal(buffer.Bytes(), &fields)
+ assert.NoError(t, err, "should have decoded second message")
+ assert.Equal(t, len(fields), 4, "should only have msg/time/level/context fields")
+ assert.Equal(t, fields["msg"], "omg it is!")
+ assert.Equal(t, fields["context"], "eating raw fish")
+ assert.Nil(t, fields["fields.msg"], "should not have prefixed previous `msg` entry")
+
+}
+
+func TestConvertLevelToString(t *testing.T) {
+ assert.Equal(t, "debug", DebugLevel.String())
+ assert.Equal(t, "info", InfoLevel.String())
+ assert.Equal(t, "warning", WarnLevel.String())
+ assert.Equal(t, "error", ErrorLevel.String())
+ assert.Equal(t, "fatal", FatalLevel.String())
+ assert.Equal(t, "panic", PanicLevel.String())
+}
+
+func TestParseLevel(t *testing.T) {
+ l, err := ParseLevel("panic")
+ assert.Nil(t, err)
+ assert.Equal(t, PanicLevel, l)
+
+ l, err = ParseLevel("fatal")
+ assert.Nil(t, err)
+ assert.Equal(t, FatalLevel, l)
+
+ l, err = ParseLevel("error")
+ assert.Nil(t, err)
+ assert.Equal(t, ErrorLevel, l)
+
+ l, err = ParseLevel("warn")
+ assert.Nil(t, err)
+ assert.Equal(t, WarnLevel, l)
+
+ l, err = ParseLevel("warning")
+ assert.Nil(t, err)
+ assert.Equal(t, WarnLevel, l)
+
+ l, err = ParseLevel("info")
+ assert.Nil(t, err)
+ assert.Equal(t, InfoLevel, l)
+
+ l, err = ParseLevel("debug")
+ assert.Nil(t, err)
+ assert.Equal(t, DebugLevel, l)
+
+ l, err = ParseLevel("invalid")
+ assert.Equal(t, "not a valid logrus Level: \"invalid\"", err.Error())
+}
+
+func TestGetSetLevelRace(t *testing.T) {
+ wg := sync.WaitGroup{}
+ for i := 0; i < 100; i++ {
+ wg.Add(1)
+ go func(i int) {
+ defer wg.Done()
+ if i%2 == 0 {
+ SetLevel(InfoLevel)
+ } else {
+ GetLevel()
+ }
+ }(i)
+
+ }
+ wg.Wait()
+}
diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/terminal_bsd.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/terminal_bsd.go
new file mode 100644
index 0000000..71f8d67
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/Sirupsen/logrus/terminal_bsd.go
@@ -0,0 +1,9 @@
+// +build darwin freebsd openbsd netbsd dragonfly
+
+package logrus
+
+import "syscall"
+
+const ioctlReadTermios = syscall.TIOCGETA
+
+type Termios syscall.Termios
diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/terminal_linux.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/terminal_linux.go
new file mode 100644
index 0000000..a2c0b40
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/Sirupsen/logrus/terminal_linux.go
@@ -0,0 +1,12 @@
+// Based on ssh/terminal:
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package logrus
+
+import "syscall"
+
+const ioctlReadTermios = syscall.TCGETS
+
+type Termios syscall.Termios
diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/terminal_notwindows.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/terminal_notwindows.go
new file mode 100644
index 0000000..4bb5376
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/Sirupsen/logrus/terminal_notwindows.go
@@ -0,0 +1,21 @@
+// Based on ssh/terminal:
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build linux darwin freebsd openbsd netbsd dragonfly
+
+package logrus
+
+import (
+ "syscall"
+ "unsafe"
+)
+
+// IsTerminal returns true if the given file descriptor is a terminal.
+func IsTerminal() bool {
+ fd := syscall.Stdout
+ var termios Termios
+ _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0)
+ return err == 0
+}
diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/terminal_windows.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/terminal_windows.go
new file mode 100644
index 0000000..2e09f6f
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/Sirupsen/logrus/terminal_windows.go
@@ -0,0 +1,27 @@
+// Based on ssh/terminal:
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build windows
+
+package logrus
+
+import (
+ "syscall"
+ "unsafe"
+)
+
+var kernel32 = syscall.NewLazyDLL("kernel32.dll")
+
+var (
+ procGetConsoleMode = kernel32.NewProc("GetConsoleMode")
+)
+
+// IsTerminal returns true if the given file descriptor is a terminal.
+func IsTerminal() bool {
+ fd := syscall.Stdout
+ var st uint32
+ r, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(fd), uintptr(unsafe.Pointer(&st)), 0)
+ return r != 0 && e == 0
+}
diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/text_formatter.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/text_formatter.go
new file mode 100644
index 0000000..17cc298
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/Sirupsen/logrus/text_formatter.go
@@ -0,0 +1,159 @@
+package logrus
+
+import (
+ "bytes"
+ "fmt"
+ "runtime"
+ "sort"
+ "strings"
+ "time"
+)
+
+const (
+ nocolor = 0
+ red = 31
+ green = 32
+ yellow = 33
+ blue = 34
+ gray = 37
+)
+
+var (
+ baseTimestamp time.Time
+ isTerminal bool
+)
+
+func init() {
+ baseTimestamp = time.Now()
+ isTerminal = IsTerminal()
+}
+
+func miniTS() int {
+ return int(time.Since(baseTimestamp) / time.Second)
+}
+
+type TextFormatter struct {
+ // Set to true to bypass checking for a TTY before outputting colors.
+ ForceColors bool
+
+ // Force disabling colors.
+ DisableColors bool
+
+ // Disable timestamp logging. useful when output is redirected to logging
+ // system that already adds timestamps.
+ DisableTimestamp bool
+
+ // Enable logging the full timestamp when a TTY is attached instead of just
+ // the time passed since beginning of execution.
+ FullTimestamp bool
+
+ // TimestampFormat to use for display when a full timestamp is printed
+ TimestampFormat string
+
+ // The fields are sorted by default for a consistent output. For applications
+ // that log extremely frequently and don't use the JSON formatter this may not
+ // be desired.
+ DisableSorting bool
+}
+
+func (f *TextFormatter) Format(entry *Entry) ([]byte, error) {
+ var keys []string = make([]string, 0, len(entry.Data))
+ for k := range entry.Data {
+ keys = append(keys, k)
+ }
+
+ if !f.DisableSorting {
+ sort.Strings(keys)
+ }
+
+ b := &bytes.Buffer{}
+
+ prefixFieldClashes(entry.Data)
+
+ isColorTerminal := isTerminal && (runtime.GOOS != "windows")
+ isColored := (f.ForceColors || isColorTerminal) && !f.DisableColors
+
+ timestampFormat := f.TimestampFormat
+ if timestampFormat == "" {
+ timestampFormat = DefaultTimestampFormat
+ }
+ if isColored {
+ f.printColored(b, entry, keys, timestampFormat)
+ } else {
+ if !f.DisableTimestamp {
+ f.appendKeyValue(b, "time", entry.Time.Format(timestampFormat))
+ }
+ f.appendKeyValue(b, "level", entry.Level.String())
+ f.appendKeyValue(b, "msg", entry.Message)
+ for _, key := range keys {
+ f.appendKeyValue(b, key, entry.Data[key])
+ }
+ }
+
+ b.WriteByte('\n')
+ return b.Bytes(), nil
+}
+
+func (f *TextFormatter) printColored(b *bytes.Buffer, entry *Entry, keys []string, timestampFormat string) {
+ var levelColor int
+ switch entry.Level {
+ case DebugLevel:
+ levelColor = gray
+ case WarnLevel:
+ levelColor = yellow
+ case ErrorLevel, FatalLevel, PanicLevel:
+ levelColor = red
+ default:
+ levelColor = blue
+ }
+
+ levelText := strings.ToUpper(entry.Level.String())[0:4]
+
+ if !f.FullTimestamp {
+ fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%04d] %-44s ", levelColor, levelText, miniTS(), entry.Message)
+ } else {
+ fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%s] %-44s ", levelColor, levelText, entry.Time.Format(timestampFormat), entry.Message)
+ }
+ for _, k := range keys {
+ v := entry.Data[k]
+ fmt.Fprintf(b, " \x1b[%dm%s\x1b[0m=%+v", levelColor, k, v)
+ }
+}
+
+func needsQuoting(text string) bool {
+ for _, ch := range text {
+ if !((ch >= 'a' && ch <= 'z') ||
+ (ch >= 'A' && ch <= 'Z') ||
+ (ch >= '0' && ch <= '9') ||
+ ch == '-' || ch == '.') {
+ return false
+ }
+ }
+ return true
+}
+
+func (f *TextFormatter) appendKeyValue(b *bytes.Buffer, key string, value interface{}) {
+
+ b.WriteString(key)
+ b.WriteByte('=')
+
+ switch value := value.(type) {
+ case string:
+ if needsQuoting(value) {
+ b.WriteString(value)
+ } else {
+ fmt.Fprintf(b, "%q", value)
+ }
+ case error:
+ errmsg := value.Error()
+ if needsQuoting(errmsg) {
+ b.WriteString(errmsg)
+ } else {
+ fmt.Fprintf(b, "%q", value)
+ }
+ default:
+ fmt.Fprint(b, value)
+ }
+
+ b.WriteByte(' ')
+}
diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/text_formatter_test.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/text_formatter_test.go
new file mode 100644
index 0000000..e25a44f
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/Sirupsen/logrus/text_formatter_test.go
@@ -0,0 +1,61 @@
+package logrus
+
+import (
+ "bytes"
+ "errors"
+ "testing"
+ "time"
+)
+
+func TestQuoting(t *testing.T) {
+ tf := &TextFormatter{DisableColors: true}
+
+ checkQuoting := func(q bool, value interface{}) {
+ b, _ := tf.Format(WithField("test", value))
+ idx := bytes.Index(b, ([]byte)("test="))
+ cont := bytes.Contains(b[idx+5:], []byte{'"'})
+ if cont != q {
+ if q {
+ t.Errorf("quoting expected for: %#v", value)
+ } else {
+ t.Errorf("quoting not expected for: %#v", value)
+ }
+ }
+ }
+
+ checkQuoting(false, "abcd")
+ checkQuoting(false, "v1.0")
+ checkQuoting(false, "1234567890")
+ checkQuoting(true, "/foobar")
+ checkQuoting(true, "x y")
+ checkQuoting(true, "x,y")
+ checkQuoting(false, errors.New("invalid"))
+ checkQuoting(true, errors.New("invalid argument"))
+}
+
+func TestTimestampFormat(t *testing.T) {
+ checkTimeStr := func(format string) {
+ customFormatter := &TextFormatter{DisableColors: true, TimestampFormat: format}
+ customStr, _ := customFormatter.Format(WithField("test", "test"))
+ timeStart := bytes.Index(customStr, ([]byte)("time="))
+ timeEnd := bytes.Index(customStr, ([]byte)("level="))
+ timeStr := customStr[timeStart+5 : timeEnd-1]
+ if timeStr[0] == '"' && timeStr[len(timeStr)-1] == '"' {
+ timeStr = timeStr[1 : len(timeStr)-1]
+ }
+ if format == "" {
+ format = time.RFC3339
+ }
+ _, e := time.Parse(format, (string)(timeStr))
+ if e != nil {
+ t.Errorf("time string \"%s\" did not match provided time format \"%s\": %s", timeStr, format, e)
+ }
+ }
+
+ checkTimeStr("2006-01-02T15:04:05.000000000Z07:00")
+ checkTimeStr("Mon Jan _2 15:04:05 2006")
+ checkTimeStr("")
+}
+
+// TODO add tests for sorting etc., this requires a parser for the text
+// formatter output.
diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/writer.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/writer.go
new file mode 100644
index 0000000..1e30b1c
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/Sirupsen/logrus/writer.go
@@ -0,0 +1,31 @@
+package logrus
+
+import (
+ "bufio"
+ "io"
+ "runtime"
+)
+
+func (logger *Logger) Writer() *io.PipeWriter {
+ reader, writer := io.Pipe()
+
+ go logger.writerScanner(reader)
+ runtime.SetFinalizer(writer, writerFinalizer)
+
+ return writer
+}
+
+func (logger *Logger) writerScanner(reader *io.PipeReader) {
+ scanner := bufio.NewScanner(reader)
+ for scanner.Scan() {
+ logger.Print(scanner.Text())
+ }
+ if err := scanner.Err(); err != nil {
+ logger.Errorf("Error while reading from Writer: %s", err)
+ }
+ reader.Close()
+}
+
+func writerFinalizer(writer *io.PipeWriter) {
+ writer.Close()
+}
diff --git a/Godeps/_workspace/src/github.com/containerops/wrench/db/db.go b/Godeps/_workspace/src/github.com/containerops/wrench/db/db.go
index 7dc3899..74d9923 100644
--- a/Godeps/_workspace/src/github.com/containerops/wrench/db/db.go
+++ b/Godeps/_workspace/src/github.com/containerops/wrench/db/db.go
@@ -14,6 +14,7 @@ const (
GLOBAL_TARSUM_INDEX = "GLOBAL_TARSUM_INDEX"
GLOBAL_TAG_INDEX = "GLOBAL_TAG_INDEX"
GLOBAL_COMPOSE_INDEX = "GLOBAL_COMPOSE_INDEX"
+ GLOBAL_LIBRARY_INDEX = "GLOBAL_LIBRARY_INDEX"
//Sail Data Index
GLOBAL_USER_INDEX = "GLOBAL_USER_INDEX"
GLOBAL_ORGANIZATION_INDEX = "GLOBAL_ORGANIZATION_INDEX"
@@ -70,6 +71,9 @@ func Key(object string, keys ...string) (result string) {
case "COMPOSE":
case "compose":
result = fmt.Sprintf("COMPOSE-%s-%s", keys[0], keys[1])
+ case "LIBRARY":
+ case "library":
+ result = fmt.Sprintf("LIBRARY-%s", keys[0])
case "ADMIN":
case "admin":
result = fmt.Sprintf("ADMIN-%s", keys[0])
diff --git a/Godeps/_workspace/src/github.com/containerops/wrench/utils/digest.go b/Godeps/_workspace/src/github.com/containerops/wrench/utils/digest.go
new file mode 100644
index 0000000..75aaf52
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/containerops/wrench/utils/digest.go
@@ -0,0 +1,130 @@
+package utils
+
+import (
+ "bytes"
+ "crypto"
+ "fmt"
+ "hash"
+ "io"
+ "strings"
+
+ "github.com/docker/libtrust"
+)
+
+//port from distribution in order to match the digest that generated by docker client
+
+// Algorithm identifies and implementation of a digester by an identifier.
+// Note the that this defines both the hash algorithm used and the string
+// encoding.
+type Algorithm string
+
+// supported digest types
+const (
+ SHA256 Algorithm = "sha256" // sha256 with hex encoding
+ SHA384 Algorithm = "sha384" // sha384 with hex encoding
+ SHA512 Algorithm = "sha512" // sha512 with hex encoding
+ TarsumV1SHA256 Algorithm = "tarsum+v1+sha256" // supported tarsum version, verification only
+
+ // Canonical is the primary digest algorithm used with the distribution
+ // project. Other digests may be used but this one is the primary storage
+ // digest.
+ Canonical = SHA256
+)
+
+var (
+ // TODO(stevvooe): Follow the pattern of the standard crypto package for
+ // registration of digests. Effectively, we are a registerable set and
+ // common symbol access.
+
+ // algorithms maps values to hash.Hash implementations. Other algorithms
+ // may be available but they cannot be calculated by the digest package.
+ algorithms = map[Algorithm]crypto.Hash{
+ SHA256: crypto.SHA256,
+ SHA384: crypto.SHA384,
+ SHA512: crypto.SHA512,
+ }
+)
+
+// Available returns true if the digest type is available for use. If this
+// returns false, New and Hash will return nil.
+func (a Algorithm) Available() bool {
+ h, ok := algorithms[a]
+ if !ok {
+ return false
+ }
+
+ // check availability of the hash, as well
+ return h.Available()
+}
+
+func (a Algorithm) New() Digester {
+ return &digester{
+ alg: a,
+ hash: a.Hash(),
+ }
+}
+
+func (a Algorithm) Hash() hash.Hash {
+ if !a.Available() {
+ return nil
+ }
+
+ return algorithms[a].New()
+}
+
+type Digester interface {
+ Hash() hash.Hash // provides direct access to underlying hash instance.
+ Digest() string
+}
+
+// digester provides a simple digester definition that embeds a hasher.
+type digester struct {
+ alg Algorithm
+ hash hash.Hash
+}
+
+func (d *digester) Hash() hash.Hash {
+ return d.hash
+}
+
+func (d *digester) Digest() string {
+ return string(fmt.Sprintf("%s:%x", d.alg, d.hash.Sum(nil)))
+}
+
+func FromReader(rd io.Reader) (string, error) {
+ digester := Canonical.New()
+
+ if _, err := io.Copy(digester.Hash(), rd); err != nil {
+ return "", err
+ }
+
+ return digester.Digest(), nil
+}
+
+func Payload(data []byte) ([]byte, error) {
+ jsig, err := libtrust.ParsePrettySignature(data, "signatures")
+ if err != nil {
+ return nil, err
+ }
+
+ // Resolve the payload in the manifest.
+ return jsig.Payload()
+}
+
+func DigestManifest(data []byte) (string, error) {
+ p, err := Payload(data)
+ if err != nil {
+ if !strings.Contains(err.Error(), "missing signature key") {
+ return "", err
+ }
+
+ p = data
+ }
+
+ digest, err := FromReader(bytes.NewReader(p))
+ if err != nil {
+ return "", err
+ }
+
+ return digest, err
+}
diff --git a/Godeps/_workspace/src/github.com/containerops/wrench/utils/generator.go b/Godeps/_workspace/src/github.com/containerops/wrench/utils/generator.go
new file mode 100644
index 0000000..174c8ec
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/containerops/wrench/utils/generator.go
@@ -0,0 +1,3432 @@
+package utils
+
+import (
+ "bufio"
+ "bytes"
+ "crypto/tls"
+ "crypto/x509"
+ "encoding/base64"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "math"
+ "net"
+ "net/http"
+ "net/http/httputil"
+ "net/url"
+ "os"
+ "path"
+ "path/filepath"
+ "reflect"
+ "strconv"
+ "strings"
+ "sync"
+ "sync/atomic"
+ "time"
+
+ "github.com/docker/docker/pkg/archive"
+ "github.com/docker/docker/pkg/fileutils"
+ "github.com/docker/docker/pkg/stdcopy"
+)
+
+//****************************************************************//
+//auth use type
+//****************************************************************//
+
+// AuthConfiguration represents authentication options to use in the PushImage
+// method. It represents the authentication in the Docker index server.
+type AuthConfiguration struct {
+ Username string `json:"username,omitempty"`
+ Password string `json:"password,omitempty"`
+ Email string `json:"email,omitempty"`
+ ServerAddress string `json:"serveraddress,omitempty"`
+}
+
+// AuthConfigurations represents authentication options to use for the
+// PushImage method accommodating the new X-Registry-Config header
+type AuthConfigurations struct {
+ Configs map[string]AuthConfiguration `json:"configs"`
+}
+
+// dockerConfig represents a registry authentation configuration from the
+// .dockercfg file.
+type dockerConfig struct {
+ Auth string `json:"auth"`
+ Email string `json:"email"`
+}
+
+//****************************************************************//
+//change use type
+//****************************************************************//
+
+// ChangeType is a type for constants indicating the type of change
+// in a container
+type ChangeType int
+
+// Change represents a change in a container.
+//
+// See https://docs.docker.com/reference/api/docker_remote_api_v1.19/#inspect-changes-on-a-container-s-filesystem for more details.
+type Change struct {
+ Path string
+ Kind ChangeType
+}
+
+//****************************************************************//
+//clent use type
+//****************************************************************//
+// APIVersion is an internal representation of a version of the Remote API.
+type APIVersion []int
+
+// Client is the basic type of this package. It provides methods for
+// interaction with the API.
+type Client struct {
+ SkipServerVersionCheck bool
+ HTTPClient *http.Client
+ transport *http.Transport
+ TLSConfig *tls.Config
+
+ endpoint string
+ endpointURL *url.URL
+ eventMonitor *eventMonitoringState
+ requestedAPIVersion APIVersion
+ serverAPIVersion APIVersion
+ expectedAPIVersion APIVersion
+}
+
+type doOptions struct {
+ data interface{}
+ forceJSON bool
+}
+
+type streamOptions struct {
+ setRawTerminal bool
+ rawJSONStream bool
+ useJSONDecoder bool
+ headers map[string]string
+ in io.Reader
+ stdout io.Writer
+ stderr io.Writer
+}
+
+type hijackOptions struct {
+ success chan struct{}
+ setRawTerminal bool
+ in io.Reader
+ stdout io.Writer
+ stderr io.Writer
+ data interface{}
+}
+
+type jsonMessage struct {
+ Status string `json:"status,omitempty"`
+ Progress string `json:"progress,omitempty"`
+ Error string `json:"error,omitempty"`
+ Stream string `json:"stream,omitempty"`
+}
+
+// Error represents failures in the API. It represents a failure from the API.
+type Error struct {
+ Status int
+ Message string
+}
+
+//****************************************************************//
+//Container use type
+//****************************************************************//
+
+// ListContainersOptions specify parameters to the ListContainers function.
+//
+// See https://docs.docker.com/reference/api/docker_remote_api_v1.19/#list-containers for more details.
+/*
+List containers
+GET /containers/json
+Example request:
+ GET /containers/json?all=1&before=8dfafdbc3a40&size=1 HTTP/1.1
+Example response:
+ HTTP/1.1 200 OK
+ Content-Type: application/json
+[
+ {
+ "Id": "8dfafdbc3a40",
+ "Image": "ubuntu:latest",
+ "Command": "echo 1",
+ "Created": 1367854155,
+ "Status": "Exit 0",
+ "Ports": [{"PrivatePort": 2222, "PublicPort": 3333, "Type": "tcp"}],
+ "SizeRw": 12288,
+ "SizeRootFs": 0
+ },
+ ......
+]
+Query Parameters:
+ all – 1/True/true or 0/False/false, Show all containers. Only running containers are shown by default (i.e., this defaults to false)
+ limit – Show limit last created containers, include non-running ones.
+ since – Show only containers created since Id, include non-running ones.
+ before – Show only containers created before Id, include non-running ones.
+ size – 1/True/true or 0/False/false, Show the containers sizes
+ filters - a JSON encoded value of the filters (a map[string][]string) to process on the containers list. Available filters:
+ exited=; – containers with exit code of ;
+ status=(restarting|running|paused|exited)
+ label=key or key=value of a container label
+Status Codes:
+ 200 – no error
+ 400 – bad parameter
+ 500 – server error
+*/
+type ListContainersOptions struct {
+ All bool
+ Limit int
+ Since string
+ Before string
+ Size bool
+ Filters map[string][]string
+ Exited int
+ Status string
+ Label string
+ Key string
+}
+
+// APIPort is a type that represents a port mapping returned by the Docker API
+type APIPort struct {
+ PrivatePort int64 `json:"PrivatePort,omitempty" yaml:"PrivatePort,omitempty"`
+ PublicPort int64 `json:"PublicPort,omitempty" yaml:"PublicPort,omitempty"`
+ Type string `json:"Type,omitempty" yaml:"Type,omitempty"`
+ IP string `json:"IP,omitempty" yaml:"IP,omitempty"`
+}
+
+// APIContainers represents a container.
+//
+// See https://docs.docker.com/reference/api/docker_remote_api_v1.19/#list-containers for more details.
+type APIContainers struct {
+ ID string `json:"Id" yaml:"Id"`
+ Image string `json:"Image,omitempty" yaml:"Image,omitempty"`
+ Command string `json:"Command,omitempty" yaml:"Command,omitempty"`
+ Created int64 `json:"Created,omitempty" yaml:"Created,omitempty"`
+ Status string `json:"Status,omitempty" yaml:"Status,omitempty"`
+ Ports []APIPort `json:"Ports,omitempty" yaml:"Ports,omitempty"`
+ SizeRw int64 `json:"SizeRw,omitempty" yaml:"SizeRw,omitempty"`
+ SizeRootFs int64 `json:"SizeRootFs,omitempty" yaml:"SizeRootFs,omitempty"`
+ Names []string `json:"Names,omitempty" yaml:"Names,omitempty"` //is exist???
+}
+
+// Port represents the port number and the protocol, in the form
+// /. For example: 80/tcp.
+type Port string
+
+// State represents the state of a container.
+type State struct {
+ Running bool `json:"Running,omitempty" yaml:"Running,omitempty"`
+ Paused bool `json:"Paused,omitempty" yaml:"Paused,omitempty"`
+ Restarting bool `json:"Restarting,omitempty" yaml:"Restarting,omitempty"`
+ OOMKilled bool `json:"OOMKilled,omitempty" yaml:"OOMKilled,omitempty"`
+ Pid int `json:"Pid,omitempty" yaml:"Pid,omitempty"`
+ ExitCode int `json:"ExitCode,omitempty" yaml:"ExitCode,omitempty"`
+ Error string `json:"Error,omitempty" yaml:"Error,omitempty"`
+ StartedAt time.Time `json:"StartedAt,omitempty" yaml:"StartedAt,omitempty"`
+ FinishedAt time.Time `json:"FinishedAt,omitempty" yaml:"FinishedAt,omitempty"`
+}
+
+// PortBinding represents the host/container port mapping as returned in the
+// `docker inspect` json
+type PortBinding struct {
+ HostIP string `json:"HostIP,omitempty" yaml:"HostIP,omitempty"`
+ HostPort string `json:"HostPort,omitempty" yaml:"HostPort,omitempty"`
+}
+
+// PortMapping represents a deprecated field in the `docker inspect` output,
+// and its value as found in NetworkSettings should always be nil
+type PortMapping map[string]string
+
+// NetworkSettings contains network-related information about a container
+type NetworkSettings struct {
+ IPAddress string `json:"IPAddress,omitempty" yaml:"IPAddress,omitempty"`
+ IPPrefixLen int `json:"IPPrefixLen,omitempty" yaml:"IPPrefixLen,omitempty"`
+ Gateway string `json:"Gateway,omitempty" yaml:"Gateway,omitempty"`
+ Bridge string `json:"Bridge,omitempty" yaml:"Bridge,omitempty"`
+ PortMapping map[string]PortMapping `json:"PortMapping,omitempty" yaml:"PortMapping,omitempty"`
+ Ports map[Port][]PortBinding `json:"Ports,omitempty" yaml:"Ports,omitempty"`
+}
+
+// Config is the list of configuration options used when creating a container.
+// Config does not contain the options that are specific to starting a container on a
+// given host. Those are contained in HostConfig
+type Config struct {
+ Hostname string `json:"Hostname,omitempty" yaml:"Hostname,omitempty"`
+ Domainname string `json:"Domainname,omitempty" yaml:"Domainname,omitempty"`
+ User string `json:"User,omitempty" yaml:"User,omitempty"`
+ Memory int64 `json:"Memory,omitempty" yaml:"Memory,omitempty"`
+ MemorySwap int64 `json:"MemorySwap,omitempty" yaml:"MemorySwap,omitempty"`
+ CPUShares int64 `json:"CpuShares,omitempty" yaml:"CpuShares,omitempty"`
+ CPUSet string `json:"Cpuset,omitempty" yaml:"Cpuset,omitempty"`
+ AttachStdin bool `json:"AttachStdin,omitempty" yaml:"AttachStdin,omitempty"`
+ AttachStdout bool `json:"AttachStdout,omitempty" yaml:"AttachStdout,omitempty"`
+ AttachStderr bool `json:"AttachStderr,omitempty" yaml:"AttachStderr,omitempty"`
+ PortSpecs []string `json:"PortSpecs,omitempty" yaml:"PortSpecs,omitempty"`
+ ExposedPorts map[Port]struct{} `json:"ExposedPorts,omitempty" yaml:"ExposedPorts,omitempty"`
+ Tty bool `json:"Tty,omitempty" yaml:"Tty,omitempty"`
+ OpenStdin bool `json:"OpenStdin,omitempty" yaml:"OpenStdin,omitempty"`
+ StdinOnce bool `json:"StdinOnce,omitempty" yaml:"StdinOnce,omitempty"`
+ Env []string `json:"Env,omitempty" yaml:"Env,omitempty"`
+ Cmd []string `json:"Cmd,omitempty" yaml:"Cmd,omitempty"`
+ DNS []string `json:"Dns,omitempty" yaml:"Dns,omitempty"` // For Docker API v1.9 and below only
+ Image string `json:"Image,omitempty" yaml:"Image,omitempty"`
+ Volumes map[string]struct{} `json:"Volumes,omitempty" yaml:"Volumes,omitempty"`
+ VolumesFrom string `json:"VolumesFrom,omitempty" yaml:"VolumesFrom,omitempty"`
+ WorkingDir string `json:"WorkingDir,omitempty" yaml:"WorkingDir,omitempty"`
+ MacAddress string `json:"MacAddress,omitempty" yaml:"MacAddress,omitempty"`
+ Entrypoint []string `json:"Entrypoint,omitempty" yaml:"Entrypoint,omitempty"`
+ NetworkDisabled bool `json:"NetworkDisabled,omitempty" yaml:"NetworkDisabled,omitempty"`
+ SecurityOpts []string `json:"SecurityOpts,omitempty" yaml:"SecurityOpts,omitempty"`
+ OnBuild []string `json:"OnBuild,omitempty" yaml:"OnBuild,omitempty"`
+ Labels map[string]string `json:"Labels,omitempty" yaml:"Labels,omitempty"`
+}
+
+// LogConfig defines the log driver type and the configuration for it.
+type LogConfig struct {
+ Type string `json:"Type,omitempty" yaml:"Type,omitempty"`
+ Config map[string]string `json:"Config,omitempty" yaml:"Config,omitempty"`
+}
+
+// ULimit defines system-wide resource limitations
+// This can help a lot in system administration, e.g. when a user starts too many processes and therefore makes the system unresponsive for other users.
+type ULimit struct {
+ Name string `json:"Name,omitempty" yaml:"Name,omitempty"`
+ Soft int64 `json:"Soft,omitempty" yaml:"Soft,omitempty"`
+ Hard int64 `json:"Hard,omitempty" yaml:"Hard,omitempty"`
+}
+
+// SwarmNode containers information about which Swarm node the container is on
+type SwarmNode struct {
+ ID string `json:"ID,omitempty" yaml:"ID,omitempty"`
+ IP string `json:"IP,omitempty" yaml:"IP,omitempty"`
+ Addr string `json:"Addr,omitempty" yaml:"Addr,omitempty"`
+ Name string `json:"Name,omitempty" yaml:"Name,omitempty"`
+ CPUs int64 `json:"CPUs,omitempty" yaml:"CPUs,omitempty"`
+ Memory int64 `json:"Memory,omitempty" yaml:"Memory,omitempty"`
+ Labels map[string]string `json:"Labels,omitempty" yaml:"Labels,omitempty"`
+}
+
+// Container is the type encompasing everything about a container - its config,
+// hostconfig, etc.
+type Container struct {
+ ID string `json:"Id" yaml:"Id"`
+
+ Created time.Time `json:"Created,omitempty" yaml:"Created,omitempty"`
+
+ Path string `json:"Path,omitempty" yaml:"Path,omitempty"`
+ Args []string `json:"Args,omitempty" yaml:"Args,omitempty"`
+
+ Config *Config `json:"Config,omitempty" yaml:"Config,omitempty"`
+ State State `json:"State,omitempty" yaml:"State,omitempty"`
+ Image string `json:"Image,omitempty" yaml:"Image,omitempty"`
+
+ Node *SwarmNode `json:"Node,omitempty" yaml:"Node,omitempty"`
+
+ NetworkSettings *NetworkSettings `json:"NetworkSettings,omitempty" yaml:"NetworkSettings,omitempty"`
+
+ SysInitPath string `json:"SysInitPath,omitempty" yaml:"SysInitPath,omitempty"`
+ ResolvConfPath string `json:"ResolvConfPath,omitempty" yaml:"ResolvConfPath,omitempty"`
+ HostnamePath string `json:"HostnamePath,omitempty" yaml:"HostnamePath,omitempty"`
+ HostsPath string `json:"HostsPath,omitempty" yaml:"HostsPath,omitempty"`
+ Name string `json:"Name,omitempty" yaml:"Name,omitempty"`
+ Driver string `json:"Driver,omitempty" yaml:"Driver,omitempty"`
+
+ Volumes map[string]string `json:"Volumes,omitempty" yaml:"Volumes,omitempty"`
+ VolumesRW map[string]bool `json:"VolumesRW,omitempty" yaml:"VolumesRW,omitempty"`
+ HostConfig *HostConfig `json:"HostConfig,omitempty" yaml:"HostConfig,omitempty"`
+ ExecIDs []string `json:"ExecIDs,omitempty" yaml:"ExecIDs,omitempty"`
+
+ RestartCount int `json:"RestartCount,omitempty" yaml:"RestartCount,omitempty"`
+
+ AppArmorProfile string `json:"AppArmorProfile,omitempty" yaml:"AppArmorProfile,omitempty"`
+}
+
+// RenameContainerOptions specify parameters to the RenameContainer function.
+//
+// See https://docs.docker.com/reference/api/docker_remote_api_v1.19/#rename-a-container for more details.
+/*
+-------------------------------------------------------------------------------------
+Rename a container
+POST /containers/(id)/rename
+Rename the container id to a new_name
+Example request:
+ POST /containers/e90e34656806/rename?name=new_name HTTP/1.1
+Example response:
+ HTTP/1.1 204 No Content
+
+Query Parameters:
+ name – new name for the container
+ Status Codes:
+ 204 – no error
+ 404 – no such container
+ 409 - conflict name already assigned
+ 500 – server error
+-------------------------------------------------------------------------------------
+*/
+
+type RenameContainerOptions struct {
+ // ID of container to rename
+ ID string `qs:"-"`
+
+ // New name
+ Name string `json:"name,omitempty" yaml:"name,omitempty"`
+}
+
+// CreateContainerOptions specify parameters to the CreateContainer function.
+//
+// See https://docs.docker.com/reference/api/docker_remote_api_v1.19/#create-a-container for more details.
+type CreateContainerOptions struct {
+ Name string
+ Config *Config `qs:"-"`
+ HostConfig *HostConfig
+}
+
+// KeyValuePair is a type for generic key/value pairs as used in the Lxc
+// configuration
+type KeyValuePair struct {
+ Key string `json:"Key,omitempty" yaml:"Key,omitempty"`
+ Value string `json:"Value,omitempty" yaml:"Value,omitempty"`
+}
+
+// RestartPolicy represents the policy for automatically restarting a container.
+//
+// Possible values are:
+//
+// - always: the docker daemon will always restart the container
+// - on-failure: the docker daemon will restart the container on failures, at
+// most MaximumRetryCount times
+// - no: the docker daemon will not restart the container automatically
+type RestartPolicy struct {
+ Name string `json:"Name,omitempty" yaml:"Name,omitempty"`
+ MaximumRetryCount int `json:"MaximumRetryCount,omitempty" yaml:"MaximumRetryCount,omitempty"`
+}
+
+// Device represents a device mapping between the Docker host and the
+// container.
+type Device struct {
+ PathOnHost string `json:"PathOnHost,omitempty" yaml:"PathOnHost,omitempty"`
+ PathInContainer string `json:"PathInContainer,omitempty" yaml:"PathInContainer,omitempty"`
+ CgroupPermissions string `json:"CgroupPermissions,omitempty" yaml:"CgroupPermissions,omitempty"`
+}
+
+// HostConfig contains the container options related to starting a container on
+// a given host
+type HostConfig struct {
+ Binds []string `json:"Binds,omitempty" yaml:"Binds,omitempty"`
+ CapAdd []string `json:"CapAdd,omitempty" yaml:"CapAdd,omitempty"`
+ CapDrop []string `json:"CapDrop,omitempty" yaml:"CapDrop,omitempty"`
+ ContainerIDFile string `json:"ContainerIDFile,omitempty" yaml:"ContainerIDFile,omitempty"`
+ LxcConf []KeyValuePair `json:"LxcConf,omitempty" yaml:"LxcConf,omitempty"`
+ Privileged bool `json:"Privileged,omitempty" yaml:"Privileged,omitempty"`
+ PortBindings map[Port][]PortBinding `json:"PortBindings,omitempty" yaml:"PortBindings,omitempty"`
+ Links []string `json:"Links,omitempty" yaml:"Links,omitempty"`
+ PublishAllPorts bool `json:"PublishAllPorts,omitempty" yaml:"PublishAllPorts,omitempty"`
+ DNS []string `json:"Dns,omitempty" yaml:"Dns,omitempty"` // For Docker API v1.10 and above only
+ DNSSearch []string `json:"DnsSearch,omitempty" yaml:"DnsSearch,omitempty"`
+ ExtraHosts []string `json:"ExtraHosts,omitempty" yaml:"ExtraHosts,omitempty"`
+ VolumesFrom []string `json:"VolumesFrom,omitempty" yaml:"VolumesFrom,omitempty"`
+ NetworkMode string `json:"NetworkMode,omitempty" yaml:"NetworkMode,omitempty"`
+ IpcMode string `json:"IpcMode,omitempty" yaml:"IpcMode,omitempty"`
+ PidMode string `json:"PidMode,omitempty" yaml:"PidMode,omitempty"`
+ RestartPolicy RestartPolicy `json:"RestartPolicy,omitempty" yaml:"RestartPolicy,omitempty"`
+ Devices []Device `json:"Devices,omitempty" yaml:"Devices,omitempty"`
+ LogConfig LogConfig `json:"LogConfig,omitempty" yaml:"LogConfig,omitempty"`
+ ReadonlyRootfs bool `json:"ReadonlyRootfs,omitempty" yaml:"ReadonlyRootfs,omitempty"`
+ SecurityOpt []string `json:"SecurityOpt,omitempty" yaml:"SecurityOpt,omitempty"`
+ CgroupParent string `json:"CgroupParent,omitempty" yaml:"CgroupParent,omitempty"`
+ Memory int64 `json:"Memory,omitempty" yaml:"Memory,omitempty"`
+ MemorySwap int64 `json:"MemorySwap,omitempty" yaml:"MemorySwap,omitempty"`
+ CPUShares int64 `json:"CpuShares,omitempty" yaml:"CpuShares,omitempty"`
+ CPUSet string `json:"Cpuset,omitempty" yaml:"Cpuset,omitempty"`
+ CPUQuota int64 `json:"CpuQuota,omitempty" yaml:"CpuQuota,omitempty"`
+ CPUPeriod int64 `json:"CpuPeriod,omitempty" yaml:"CpuPeriod,omitempty"`
+ Ulimits []ULimit `json:"Ulimits,omitempty" yaml:"Ulimits,omitempty"`
+}
+
+// TopResult represents the list of processes running in a container, as
+// returned by /containers//top.
+//
+// See https://docs.docker.com/reference/api/docker_remote_api_v1.19/#list-processes-running-inside-a-container for more details.
+type TopResult struct {
+ Titles []string
+ Processes [][]string
+}
+
+// Stats represents container statistics, returned by /containers//stats.
+//
+// See https://docs.docker.com/reference/api/docker_remote_api_v1.19/#get-container-stats-based-on-resource-usage for more details.
+type Stats struct {
+ Read time.Time `json:"read,omitempty" yaml:"read,omitempty"`
+ Network struct {
+ RxDropped uint64 `json:"rx_dropped,omitempty" yaml:"rx_dropped,omitempty"`
+ RxBytes uint64 `json:"rx_bytes,omitempty" yaml:"rx_bytes,omitempty"`
+ RxErrors uint64 `json:"rx_errors,omitempty" yaml:"rx_errors,omitempty"`
+ TxPackets uint64 `json:"tx_packets,omitempty" yaml:"tx_packets,omitempty"`
+ TxDropped uint64 `json:"tx_dropped,omitempty" yaml:"tx_dropped,omitempty"`
+ RxPackets uint64 `json:"rx_packets,omitempty" yaml:"rx_packets,omitempty"`
+ TxErrors uint64 `json:"tx_errors,omitempty" yaml:"tx_errors,omitempty"`
+ TxBytes uint64 `json:"tx_bytes,omitempty" yaml:"tx_bytes,omitempty"`
+ } `json:"network,omitempty" yaml:"network,omitempty"`
+ MemoryStats struct {
+ Stats struct {
+ TotalPgmafault uint64 `json:"total_pgmafault,omitempty" yaml:"total_pgmafault,omitempty"`
+ Cache uint64 `json:"cache,omitempty" yaml:"cache,omitempty"`
+ MappedFile uint64 `json:"mapped_file,omitempty" yaml:"mapped_file,omitempty"`
+ TotalInactiveFile uint64 `json:"total_inactive_file,omitempty" yaml:"total_inactive_file,omitempty"`
+ Pgpgout uint64 `json:"pgpgout,omitempty" yaml:"pgpgout,omitempty"`
+ Rss uint64 `json:"rss,omitempty" yaml:"rss,omitempty"`
+ TotalMappedFile uint64 `json:"total_mapped_file,omitempty" yaml:"total_mapped_file,omitempty"`
+ Writeback uint64 `json:"writeback,omitempty" yaml:"writeback,omitempty"`
+ Unevictable uint64 `json:"unevictable,omitempty" yaml:"unevictable,omitempty"`
+ Pgpgin uint64 `json:"pgpgin,omitempty" yaml:"pgpgin,omitempty"`
+ TotalUnevictable uint64 `json:"total_unevictable,omitempty" yaml:"total_unevictable,omitempty"`
+ Pgmajfault uint64 `json:"pgmajfault,omitempty" yaml:"pgmajfault,omitempty"`
+ TotalRss uint64 `json:"total_rss,omitempty" yaml:"total_rss,omitempty"`
+ TotalRssHuge uint64 `json:"total_rss_huge,omitempty" yaml:"total_rss_huge,omitempty"`
+ TotalWriteback uint64 `json:"total_writeback,omitempty" yaml:"total_writeback,omitempty"`
+ TotalInactiveAnon uint64 `json:"total_inactive_anon,omitempty" yaml:"total_inactive_anon,omitempty"`
+ RssHuge uint64 `json:"rss_huge,omitempty" yaml:"rss_huge,omitempty"`
+ HierarchicalMemoryLimit uint64 `json:"hierarchical_memory_limit,omitempty" yaml:"hierarchical_memory_limit,omitempty"`
+ TotalPgfault uint64 `json:"total_pgfault,omitempty" yaml:"total_pgfault,omitempty"`
+ TotalActiveFile uint64 `json:"total_active_file,omitempty" yaml:"total_active_file,omitempty"`
+ ActiveAnon uint64 `json:"active_anon,omitempty" yaml:"active_anon,omitempty"`
+ TotalActiveAnon uint64 `json:"total_active_anon,omitempty" yaml:"total_active_anon,omitempty"`
+ TotalPgpgout uint64 `json:"total_pgpgout,omitempty" yaml:"total_pgpgout,omitempty"`
+ TotalCache uint64 `json:"total_cache,omitempty" yaml:"total_cache,omitempty"`
+ InactiveAnon uint64 `json:"inactive_anon,omitempty" yaml:"inactive_anon,omitempty"`
+ ActiveFile uint64 `json:"active_file,omitempty" yaml:"active_file,omitempty"`
+ Pgfault uint64 `json:"pgfault,omitempty" yaml:"pgfault,omitempty"`
+ InactiveFile uint64 `json:"inactive_file,omitempty" yaml:"inactive_file,omitempty"`
+ TotalPgpgin uint64 `json:"total_pgpgin,omitempty" yaml:"total_pgpgin,omitempty"`
+ } `json:"stats,omitempty" yaml:"stats,omitempty"`
+ MaxUsage uint64 `json:"max_usage,omitempty" yaml:"max_usage,omitempty"`
+ Usage uint64 `json:"usage,omitempty" yaml:"usage,omitempty"`
+ Failcnt uint64 `json:"failcnt,omitempty" yaml:"failcnt,omitempty"`
+ Limit uint64 `json:"limit,omitempty" yaml:"limit,omitempty"`
+ } `json:"memory_stats,omitempty" yaml:"memory_stats,omitempty"`
+ BlkioStats struct {
+ IOServiceBytesRecursive []BlkioStatsEntry `json:"io_service_bytes_recursive,omitempty" yaml:"io_service_bytes_recursive,omitempty"`
+ IOServicedRecursive []BlkioStatsEntry `json:"io_serviced_recursive,omitempty" yaml:"io_serviced_recursive,omitempty"`
+ IOQueueRecursive []BlkioStatsEntry `json:"io_queue_recursive,omitempty" yaml:"io_queue_recursive,omitempty"`
+ IOServiceTimeRecursive []BlkioStatsEntry `json:"io_service_time_recursive,omitempty" yaml:"io_service_time_recursive,omitempty"`
+ IOWaitTimeRecursive []BlkioStatsEntry `json:"io_wait_time_recursive,omitempty" yaml:"io_wait_time_recursive,omitempty"`
+ IOMergedRecursive []BlkioStatsEntry `json:"io_merged_recursive,omitempty" yaml:"io_merged_recursive,omitempty"`
+ IOTimeRecursive []BlkioStatsEntry `json:"io_time_recursive,omitempty" yaml:"io_time_recursive,omitempty"`
+ SectorsRecursive []BlkioStatsEntry `json:"sectors_recursive,omitempty" yaml:"sectors_recursive,omitempty"`
+ } `json:"blkio_stats,omitempty" yaml:"blkio_stats,omitempty"`
+ CPUStats struct {
+ CPUUsage struct {
+ PercpuUsage []uint64 `json:"percpu_usage,omitempty" yaml:"percpu_usage,omitempty"`
+ UsageInUsermode uint64 `json:"usage_in_usermode,omitempty" yaml:"usage_in_usermode,omitempty"`
+ TotalUsage uint64 `json:"total_usage,omitempty" yaml:"total_usage,omitempty"`
+ UsageInKernelmode uint64 `json:"usage_in_kernelmode,omitempty" yaml:"usage_in_kernelmode,omitempty"`
+ } `json:"cpu_usage,omitempty" yaml:"cpu_usage,omitempty"`
+ SystemCPUUsage uint64 `json:"system_cpu_usage,omitempty" yaml:"system_cpu_usage,omitempty"`
+ ThrottlingData struct {
+ Periods uint64 `json:"periods,omitempty"`
+ ThrottledPeriods uint64 `json:"throttled_periods,omitempty"`
+ ThrottledTime uint64 `json:"throttled_time,omitempty"`
+ } `json:"throttling_data,omitempty" yaml:"throttling_data,omitempty"`
+ } `json:"cpu_stats,omitempty" yaml:"cpu_stats,omitempty"`
+}
+
+// BlkioStatsEntry is a stats entry for blkio_stats
+type BlkioStatsEntry struct {
+ Major uint64 `json:"major,omitempty" yaml:"major,omitempty"`
+ Minor uint64 `json:"minor,omitempty" yaml:"minor,omitempty"`
+ Op string `json:"op,omitempty" yaml:"op,omitempty"`
+ Value uint64 `json:"value,omitempty" yaml:"value,omitempty"`
+}
+
+// StatsOptions specify parameters to the Stats function.
+//
+// See http://goo.gl/DFMiYD for more details.
+type StatsOptions struct {
+ ID string
+ Stats chan<- *Stats
+ Stream bool
+}
+
+// KillContainerOptions represents the set of options that can be used in a
+// call to KillContainer.
+//
+// See https://docs.docker.com/reference/api/docker_remote_api_v1.19/#kill-a-container for more details.
+type KillContainerOptions struct {
+ // The ID of the container.
+ ID string `qs:"-"`
+
+ // The signal to send to the container. When omitted, Docker server
+ // will assume SIGKILL.
+ Signal Signal
+}
+
+// RemoveContainerOptions encapsulates options to remove a container.
+//
+// See https://docs.docker.com/reference/api/docker_remote_api_v1.19/#remove-a-container for more details.
+type RemoveContainerOptions struct {
+ // The ID of the container.
+ ID string `qs:"-"`
+
+ // A flag that indicates whether Docker should remove the volumes
+ // associated to the container.
+ RemoveVolumes bool `qs:"v"`
+
+ // A flag that indicates whether Docker should remove the container
+ // even if it is currently running.
+ Force bool
+}
+
+// CopyFromContainerOptions is the set of options that can be used when copying
+// files or folders from a container.
+//
+// See https://docs.docker.com/reference/api/docker_remote_api_v1.19/#copy-files-or-folders-from-a-container for more details.
+type CopyFromContainerOptions struct {
+ OutputStream io.Writer `json:"-"`
+ Container string `json:"-"`
+ Resource string
+}
+
+// CommitContainerOptions aggregates parameters to the CommitContainer method.
+//
+// See http://goo.gl/Jn8pe8 for more details.
+type CommitContainerOptions struct {
+ Container string
+ Repository string `qs:"repo"`
+ Tag string
+ Message string `qs:"m"`
+ Author string
+ Run *Config `qs:"-"`
+}
+
+// AttachToContainerOptions is the set of options that can be used when
+// attaching to a container.
+//
+// See http://goo.gl/RRAhws for more details.
+type AttachToContainerOptions struct {
+ Container string `qs:"-"`
+ InputStream io.Reader `qs:"-"`
+ OutputStream io.Writer `qs:"-"`
+ ErrorStream io.Writer `qs:"-"`
+
+ // Get container logs, sending it to OutputStream.
+ Logs bool
+
+ // Stream the response?
+ Stream bool
+
+ // Attach to stdin, and use InputStream.
+ Stdin bool
+
+ // Attach to stdout, and use OutputStream.
+ Stdout bool
+
+ // Attach to stderr, and use ErrorStream.
+ Stderr bool
+
+ // If set, after a successful connect, a sentinel will be sent and then the
+ // client will block on receive before continuing.
+ //
+ // It must be an unbuffered channel. Using a buffered channel can lead
+ // to unexpected behavior.
+ Success chan struct{}
+
+ // Use raw terminal? Usually true when the container contains a TTY.
+ RawTerminal bool `qs:"-"`
+}
+
+// LogsOptions represents the set of options used when getting logs from a
+// container.
+//
+// See http://goo.gl/rLhKSU for more details.
+type LogsOptions struct {
+ Container string `qs:"-"`
+ OutputStream io.Writer `qs:"-"`
+ ErrorStream io.Writer `qs:"-"`
+ Follow bool
+ Stdout bool
+ Stderr bool
+ Timestamps bool
+ Tail string
+
+ // Use raw terminal? Usually true when the container contains a TTY.
+ RawTerminal bool `qs:"-"`
+}
+
+// ExportContainerOptions is the set of parameters to the ExportContainer
+// method.
+//
+// See http://goo.gl/hnzE62 for more details.
+type ExportContainerOptions struct {
+ ID string
+ OutputStream io.Writer
+}
+
+// NoSuchContainer is the error returned when a given container does not exist.
+type NoSuchContainer struct {
+ ID string
+ Err error
+}
+
+// ContainerAlreadyRunning is the error returned when a given container is
+// already running.
+type ContainerAlreadyRunning struct {
+ ID string
+}
+
+// ContainerNotRunning is the error returned when a given container is not
+// running.
+type ContainerNotRunning struct {
+ ID string
+}
+
+//****************************************************************//
+//env use type
+//****************************************************************//
+
+// Env represents a list of key-pair represented in the form KEY=VALUE.
+type Env []string
+
+//****************************************************************//
+//enent use type
+//****************************************************************//
+
+// APIEvents represents an event returned by the API.
+type APIEvents struct {
+ Status string `json:"Status,omitempty" yaml:"Status,omitempty"`
+ ID string `json:"ID,omitempty" yaml:"ID,omitempty"`
+ From string `json:"From,omitempty" yaml:"From,omitempty"`
+ Time int64 `json:"Time,omitempty" yaml:"Time,omitempty"`
+}
+
+type eventMonitoringState struct {
+ sync.RWMutex
+ sync.WaitGroup
+ enabled bool
+ lastSeen *int64
+ C chan *APIEvents
+ errC chan error
+ listeners []chan<- *APIEvents
+}
+
+//****************************************************************//
+//exec use type
+//****************************************************************//
+
+// CreateExecOptions specify parameters to the CreateExecContainer function.
+//
+// See http://goo.gl/8izrzI for more details
+type CreateExecOptions struct {
+ AttachStdin bool `json:"AttachStdin,omitempty" yaml:"AttachStdin,omitempty"`
+ AttachStdout bool `json:"AttachStdout,omitempty" yaml:"AttachStdout,omitempty"`
+ AttachStderr bool `json:"AttachStderr,omitempty" yaml:"AttachStderr,omitempty"`
+ Tty bool `json:"Tty,omitempty" yaml:"Tty,omitempty"`
+ Cmd []string `json:"Cmd,omitempty" yaml:"Cmd,omitempty"`
+ Container string `json:"Container,omitempty" yaml:"Container,omitempty"`
+ User string `json:"User,omitempty" yaml:"User,omitempty"`
+}
+
+// StartExecOptions specify parameters to the StartExecContainer function.
+//
+// See http://goo.gl/JW8Lxl for more details
+type StartExecOptions struct {
+ Detach bool `json:"Detach,omitempty" yaml:"Detach,omitempty"`
+
+ Tty bool `json:"Tty,omitempty" yaml:"Tty,omitempty"`
+
+ InputStream io.Reader `qs:"-"`
+ OutputStream io.Writer `qs:"-"`
+ ErrorStream io.Writer `qs:"-"`
+
+ // Use raw terminal? Usually true when the container contains a TTY.
+ RawTerminal bool `qs:"-"`
+
+ // If set, after a successful connect, a sentinel will be sent and then the
+ // client will block on receive before continuing.
+ //
+ // It must be an unbuffered channel. Using a buffered channel can lead
+ // to unexpected behavior.
+ Success chan struct{} `json:"-"`
+}
+
+// Exec is the type representing a `docker exec` instance and containing the
+// instance ID
+type Exec struct {
+ ID string `json:"Id,omitempty" yaml:"Id,omitempty"`
+}
+
+// ExecProcessConfig is a type describing the command associated to a Exec
+// instance. It's used in the ExecInspect type.
+//
+// See http://goo.gl/ypQULN for more details
+type ExecProcessConfig struct {
+ Privileged bool `json:"privileged,omitempty" yaml:"privileged,omitempty"`
+ User string `json:"user,omitempty" yaml:"user,omitempty"`
+ Tty bool `json:"tty,omitempty" yaml:"tty,omitempty"`
+ EntryPoint string `json:"entrypoint,omitempty" yaml:"entrypoint,omitempty"`
+ Arguments []string `json:"arguments,omitempty" yaml:"arguments,omitempty"`
+}
+
+// ExecInspect is a type with details about a exec instance, including the
+// exit code if the command has finished running. It's returned by a api
+// call to /exec/(id)/json
+//
+// See http://goo.gl/ypQULN for more details
+type ExecInspect struct {
+ ID string `json:"ID,omitempty" yaml:"ID,omitempty"`
+ Running bool `json:"Running,omitempty" yaml:"Running,omitempty"`
+ ExitCode int `json:"ExitCode,omitempty" yaml:"ExitCode,omitempty"`
+ OpenStdin bool `json:"OpenStdin,omitempty" yaml:"OpenStdin,omitempty"`
+ OpenStderr bool `json:"OpenStderr,omitempty" yaml:"OpenStderr,omitempty"`
+ OpenStdout bool `json:"OpenStdout,omitempty" yaml:"OpenStdout,omitempty"`
+ ProcessConfig ExecProcessConfig `json:"ProcessConfig,omitempty" yaml:"ProcessConfig,omitempty"`
+ Container Container `json:"Container,omitempty" yaml:"Container,omitempty"`
+}
+
+// NoSuchExec is the error returned when a given exec instance does not exist.
+type NoSuchExec struct {
+ ID string
+}
+
+//****************************************************************//
+//image use type
+//****************************************************************//
+
+// APIImages represent an image returned in the ListImages call.
+type APIImages struct {
+ ID string `json:"Id" yaml:"Id"`
+ RepoTags []string `json:"RepoTags,omitempty" yaml:"RepoTags,omitempty"`
+ Created int64 `json:"Created,omitempty" yaml:"Created,omitempty"`
+ Size int64 `json:"Size,omitempty" yaml:"Size,omitempty"`
+ VirtualSize int64 `json:"VirtualSize,omitempty" yaml:"VirtualSize,omitempty"`
+ ParentID string `json:"ParentId,omitempty" yaml:"ParentId,omitempty"`
+ RepoDigests []string `json:"RepoDigests,omitempty" yaml:"RepoDigests,omitempty"`
+}
+
+// Image is the type representing a docker image and its various properties
+type Image struct {
+ ID string `json:"Id" yaml:"Id"`
+ Parent string `json:"Parent,omitempty" yaml:"Parent,omitempty"`
+ Comment string `json:"Comment,omitempty" yaml:"Comment,omitempty"`
+ Created time.Time `json:"Created,omitempty" yaml:"Created,omitempty"`
+ Container string `json:"Container,omitempty" yaml:"Container,omitempty"`
+ ContainerConfig Config `json:"ContainerConfig,omitempty" yaml:"ContainerConfig,omitempty"`
+ DockerVersion string `json:"DockerVersion,omitempty" yaml:"DockerVersion,omitempty"`
+ Author string `json:"Author,omitempty" yaml:"Author,omitempty"`
+ Config *Config `json:"Config,omitempty" yaml:"Config,omitempty"`
+ Architecture string `json:"Architecture,omitempty" yaml:"Architecture,omitempty"`
+ Size int64 `json:"Size,omitempty" yaml:"Size,omitempty"`
+ VirtualSize int64 `json:"VirtualSize,omitempty" yaml:"VirtualSize,omitempty"`
+}
+
+// ImageHistory represent a layer in an image's history returned by the
+// ImageHistory call.
+type ImageHistory struct {
+ ID string `json:"Id" yaml:"Id"`
+ Tags []string `json:"Tags,omitempty" yaml:"Tags,omitempty"`
+ Created int64 `json:"Created,omitempty" yaml:"Created,omitempty"`
+ CreatedBy string `json:"CreatedBy,omitempty" yaml:"CreatedBy,omitempty"`
+ Size int64 `json:"Size,omitempty" yaml:"Size,omitempty"`
+}
+
+// ImagePre012 serves the same purpose as the Image type except that it is for
+// earlier versions of the Docker API (pre-012 to be specific)
+type ImagePre012 struct {
+ ID string `json:"id"`
+ Parent string `json:"parent,omitempty"`
+ Comment string `json:"comment,omitempty"`
+ Created time.Time `json:"created"`
+ Container string `json:"container,omitempty"`
+ ContainerConfig Config `json:"container_config,omitempty"`
+ DockerVersion string `json:"docker_version,omitempty"`
+ Author string `json:"author,omitempty"`
+ Config *Config `json:"config,omitempty"`
+ Architecture string `json:"architecture,omitempty"`
+ Size int64 `json:"size,omitempty"`
+}
+
+// ListImagesOptions specify parameters to the ListImages function.
+//
+// See http://goo.gl/HRVN1Z for more details.
+type ListImagesOptions struct {
+ All bool
+ Filters map[string][]string
+ Digests bool
+}
+
+// RemoveImageOptions present the set of options available for removing an image
+// from a registry.
+//
+// See http://goo.gl/6V48bF for more details.
+type RemoveImageOptions struct {
+ Force bool `qs:"force"`
+ NoPrune bool `qs:"noprune"`
+}
+
+// PushImageOptions represents options to use in the PushImage method.
+//
+// See http://goo.gl/pN8A3P for more details.
+type PushImageOptions struct {
+ // Name of the image
+ Name string
+
+ // Tag of the image
+ Tag string
+
+ // Registry server to push the image
+ Registry string
+
+ OutputStream io.Writer `qs:"-"`
+ RawJSONStream bool `qs:"-"`
+}
+
+// PullImageOptions present the set of options available for pulling an image
+// from a registry.
+//
+// See http://goo.gl/ACyYNS for more details.
+type PullImageOptions struct {
+ Repository string `qs:"fromImage"`
+ Registry string
+ Tag string
+ OutputStream io.Writer `qs:"-"`
+ RawJSONStream bool `qs:"-"`
+}
+
+// LoadImageOptions represents the options for LoadImage Docker API Call
+//
+// See http://goo.gl/Y8NNCq for more details.
+type LoadImageOptions struct {
+ InputStream io.Reader
+}
+
+// ExportImageOptions represent the options for ExportImage Docker API call
+//
+// See http://goo.gl/mi6kvk for more details.
+type ExportImageOptions struct {
+ Name string
+ OutputStream io.Writer
+}
+
+// ExportImagesOptions represent the options for ExportImages Docker API call
+//
+// See http://goo.gl/YeZzQK for more details.
+type ExportImagesOptions struct {
+ Names []string
+ OutputStream io.Writer `qs:"-"`
+}
+
+// ImportImageOptions present the set of informations available for importing
+// an image from a source file or the stdin.
+//
+// See http://goo.gl/PhBKnS for more details.
+type ImportImageOptions struct {
+ Repository string `qs:"repo"`
+ Source string `qs:"fromSrc"`
+ Tag string `qs:"tag"`
+
+ InputStream io.Reader `qs:"-"`
+ OutputStream io.Writer `qs:"-"`
+ RawJSONStream bool `qs:"-"`
+}
+
+// BuildImageOptions present the set of informations available for building an
+// image from a tarfile with a Dockerfile in it.
+//
+// For more details about the Docker building process, see
+// http://goo.gl/tlPXPu.
+type BuildImageOptions struct {
+ Name string `qs:"t"`
+ Dockerfile string `qs:"dockerfile"`
+ NoCache bool `qs:"nocache"`
+ SuppressOutput bool `qs:"q"`
+ Pull bool `qs:"pull"`
+ RmTmpContainer bool `qs:"rm"`
+ ForceRmTmpContainer bool `qs:"forcerm"`
+ Memory int64 `qs:"memory"`
+ Memswap int64 `qs:"memswap"`
+ CPUShares int64 `qs:"cpushares"`
+ CPUSetCPUs string `qs:"cpusetcpus"`
+ InputStream io.Reader `qs:"-"`
+ OutputStream io.Writer `qs:"-"`
+ RawJSONStream bool `qs:"-"`
+ Remote string `qs:"remote"`
+ Auth AuthConfiguration `qs:"-"` // for older docker X-Registry-Auth header
+ AuthConfigs AuthConfigurations `qs:"-"` // for newer docker X-Registry-Config header
+ ContextDir string `qs:"-"`
+}
+
+// TagImageOptions present the set of options to tag an image.
+//
+// See http://goo.gl/5g6qFy for more details.
+type TagImageOptions struct {
+ Repo string
+ Tag string
+ Force bool
+}
+
+// APIImageSearch reflect the result of a search on the dockerHub
+//
+// See http://goo.gl/xI5lLZ for more details.
+type APIImageSearch struct {
+ Description string `json:"description,omitempty" yaml:"description,omitempty"`
+ IsOfficial bool `json:"is_official,omitempty" yaml:"is_official,omitempty"`
+ IsAutomated bool `json:"is_automated,omitempty" yaml:"is_automated,omitempty"`
+ Name string `json:"name,omitempty" yaml:"name,omitempty"`
+ StarCount int `json:"star_count,omitempty" yaml:"star_count,omitempty"`
+}
+
+//****************************************************************//
+//signal use type
+//****************************************************************//
+
+// Signal represents a signal that can be send to the container on
+// KillContainer call.
+type Signal int
+
+//****************************************************************//
+//tls use type
+//****************************************************************//
+
+type tlsClientCon struct {
+ *tls.Conn
+ rawConn net.Conn
+}
+
+//****************************************************************//
+//change use const
+//****************************************************************//
+
+const (
+ // ChangeModify is the ChangeType for container modifications
+ ChangeModify ChangeType = iota
+
+ // ChangeAdd is the ChangeType for additions to a container
+ ChangeAdd
+
+ // ChangeDelete is the ChangeType for deletions from a container
+ ChangeDelete
+)
+
+//****************************************************************//
+//client use const
+//****************************************************************//
+
+const generatorAgent = "go-generator-client"
+
+//****************************************************************//
+//event use const
+//****************************************************************//
+
+const (
+ maxMonitorConnRetries = 5
+ retryInitialWaitTime = 10.
+)
+
+//****************************************************************//
+//signal use const
+//****************************************************************//
+
+// These values represent all signals available on Linux, where containers will
+// be running.
+const (
+ SIGABRT = Signal(0x6)
+ SIGALRM = Signal(0xe)
+ SIGBUS = Signal(0x7)
+ SIGCHLD = Signal(0x11)
+ SIGCLD = Signal(0x11)
+ SIGCONT = Signal(0x12)
+ SIGFPE = Signal(0x8)
+ SIGHUP = Signal(0x1)
+ SIGILL = Signal(0x4)
+ SIGINT = Signal(0x2)
+ SIGIO = Signal(0x1d)
+ SIGIOT = Signal(0x6)
+ SIGKILL = Signal(0x9)
+ SIGPIPE = Signal(0xd)
+ SIGPOLL = Signal(0x1d)
+ SIGPROF = Signal(0x1b)
+ SIGPWR = Signal(0x1e)
+ SIGQUIT = Signal(0x3)
+ SIGSEGV = Signal(0xb)
+ SIGSTKFLT = Signal(0x10)
+ SIGSTOP = Signal(0x13)
+ SIGSYS = Signal(0x1f)
+ SIGTERM = Signal(0xf)
+ SIGTRAP = Signal(0x5)
+ SIGTSTP = Signal(0x14)
+ SIGTTIN = Signal(0x15)
+ SIGTTOU = Signal(0x16)
+ SIGUNUSED = Signal(0x1f)
+ SIGURG = Signal(0x17)
+ SIGUSR1 = Signal(0xa)
+ SIGUSR2 = Signal(0xc)
+ SIGVTALRM = Signal(0x1a)
+ SIGWINCH = Signal(0x1c)
+ SIGXCPU = Signal(0x18)
+ SIGXFSZ = Signal(0x19)
+)
+
+//****************************************************************//
+//client use var
+//****************************************************************//
+
+var (
+ // ErrInvalidEndpoint is returned when the endpoint is not a valid HTTP URL.
+ ErrInvalidEndpoint = errors.New("invalid endpoint")
+
+ // ErrConnectionRefused is returned when the client cannot connect to the given endpoint.
+ ErrConnectionRefused = errors.New("cannot connect to Docker endpoint")
+
+ apiVersion112, _ = NewAPIVersion("1.12")
+)
+
+//****************************************************************//
+//Container use var
+//****************************************************************//
+
+// ErrContainerAlreadyExists is the error returned by CreateContainer when the container already exists.
+var ErrContainerAlreadyExists = errors.New("container already exists")
+
+//****************************************************************//
+//event use var
+//****************************************************************//
+
+var (
+ // ErrNoListeners is the error returned when no listeners are available
+ // to receive an event.
+ ErrNoListeners = errors.New("no listeners present to receive event")
+
+ // ErrListenerAlreadyExists is the error returned when the listerner already
+ // exists.
+ ErrListenerAlreadyExists = errors.New("listener already exists for docker events")
+
+ // EOFEvent is sent when the event listener receives an EOF error.
+ EOFEvent = &APIEvents{
+ Status: "EOF",
+ }
+)
+
+//****************************************************************//
+//image use var
+//****************************************************************//
+
+var (
+ // ErrNoSuchImage is the error returned when the image does not exist.
+ ErrNoSuchImage = errors.New("no such image")
+
+ // ErrMissingRepo is the error returned when the remote repository is
+ // missing.
+ ErrMissingRepo = errors.New("missing remote repository e.g. 'github.com/user/repo'")
+
+ // ErrMissingOutputStream is the error returned when no output stream
+ // is provided to some calls, like BuildImage.
+ ErrMissingOutputStream = errors.New("missing output stream")
+
+ // ErrMultipleContexts is the error returned when both a ContextDir and
+ // InputStream are provided in BuildImageOptions
+ ErrMultipleContexts = errors.New("image build may not be provided BOTH context dir and input stream")
+
+ // ErrMustSpecifyNames is the error rreturned when the Names field on
+ // ExportImagesOptions is nil or empty
+ ErrMustSpecifyNames = errors.New("must specify at least one name to export")
+)
+
+//****************************************************************//
+//auth need func
+//****************************************************************//
+
+// NewAuthConfigurationsFromDockerCfg returns AuthConfigurations from the
+// ~/.dockercfg file.
+func NewAuthConfigurationsFromDockerCfg() (*AuthConfigurations, error) {
+ p := path.Join(os.Getenv("HOME"), ".dockercfg")
+ r, err := os.Open(p)
+ if err != nil {
+ return nil, err
+ }
+ return NewAuthConfigurations(r)
+}
+
+// NewAuthConfigurations returns AuthConfigurations from a JSON encoded string in the
+// same format as the .dockercfg file.
+func NewAuthConfigurations(r io.Reader) (*AuthConfigurations, error) {
+ var auth *AuthConfigurations
+ var confs map[string]dockerConfig
+ if err := json.NewDecoder(r).Decode(&confs); err != nil {
+ return nil, err
+ }
+ auth, err := authConfigs(confs)
+ if err != nil {
+ return nil, err
+ }
+ return auth, nil
+}
+
+// authConfigs converts a dockerConfigs map to a AuthConfigurations object.
+func authConfigs(confs map[string]dockerConfig) (*AuthConfigurations, error) {
+ c := &AuthConfigurations{
+ Configs: make(map[string]AuthConfiguration),
+ }
+ for reg, conf := range confs {
+ data, err := base64.StdEncoding.DecodeString(conf.Auth)
+ if err != nil {
+ return nil, err
+ }
+ userpass := strings.Split(string(data), ":")
+ c.Configs[reg] = AuthConfiguration{
+ Email: conf.Email,
+ Username: userpass[0],
+ Password: userpass[1],
+ ServerAddress: reg,
+ }
+ }
+ return c, nil
+}
+
+// AuthCheck validates the given credentials. It returns nil if successful.
+//
+// See https://goo.gl/vPoEfJ for more details.
+func (c *Client) AuthCheck(conf *AuthConfiguration) error {
+ if conf == nil {
+ return fmt.Errorf("conf is nil")
+ }
+ body, statusCode, err := c.do("POST", "/auth", doOptions{data: conf})
+ if err != nil {
+ return err
+ }
+ if statusCode > 400 {
+ return fmt.Errorf("auth error (%d): %s", statusCode, body)
+ }
+ return nil
+}
+
+//****************************************************************//
+//change need func
+//****************************************************************//
+
+func (change *Change) String() string {
+ var kind string
+ switch change.Kind {
+ case ChangeModify:
+ kind = "C"
+ case ChangeAdd:
+ kind = "A"
+ case ChangeDelete:
+ kind = "D"
+ }
+ return fmt.Sprintf("%s %s", kind, change.Path)
+}
+
+//****************************************************************//
+//client need func
+//****************************************************************//
+
+// NewAPIVersion returns an instance of APIVersion for the given string.
+//
+// The given string must be in the form .., where ,
+// and are integer numbers.
+func NewAPIVersion(input string) (APIVersion, error) {
+ if !strings.Contains(input, ".") {
+ return nil, fmt.Errorf("Unable to parse version %q", input)
+ }
+ arr := strings.Split(input, ".")
+ ret := make(APIVersion, len(arr))
+ var err error
+ for i, val := range arr {
+ ret[i], err = strconv.Atoi(val)
+ if err != nil {
+ return nil, fmt.Errorf("Unable to parse version %q: %q is not an integer", input, val)
+ }
+ }
+ return ret, nil
+}
+
+func (version APIVersion) String() string {
+ var str string
+ for i, val := range version {
+ str += strconv.Itoa(val)
+ if i < len(version)-1 {
+ str += "."
+ }
+ }
+ return str
+}
+
+// LessThan is a function for comparing APIVersion structs
+func (version APIVersion) LessThan(other APIVersion) bool {
+ return version.compare(other) < 0
+}
+
+// LessThanOrEqualTo is a function for comparing APIVersion structs
+func (version APIVersion) LessThanOrEqualTo(other APIVersion) bool {
+ return version.compare(other) <= 0
+}
+
+// GreaterThan is a function for comparing APIVersion structs
+func (version APIVersion) GreaterThan(other APIVersion) bool {
+ return version.compare(other) > 0
+}
+
+// GreaterThanOrEqualTo is a function for comparing APIVersion structs
+func (version APIVersion) GreaterThanOrEqualTo(other APIVersion) bool {
+ return version.compare(other) >= 0
+}
+
+func (version APIVersion) compare(other APIVersion) int {
+ for i, v := range version {
+ if i <= len(other)-1 {
+ otherVersion := other[i]
+
+ if v < otherVersion {
+ return -1
+ } else if v > otherVersion {
+ return 1
+ }
+ }
+ }
+ if len(version) > len(other) {
+ return 1
+ }
+ if len(version) < len(other) {
+ return -1
+ }
+ return 0
+}
+
+// NewClient returns a Client instance ready for communication with the given
+// server endpoint. It will use the latest remote API version available in the
+// server.
+func NewClient(endpoint string) (*Client, error) {
+ client, err := NewVersionedClient(endpoint, "")
+ if err != nil {
+ return nil, err
+ }
+ client.SkipServerVersionCheck = true
+ return client, nil
+}
+
+// NewTLSClient returns a Client instance ready for TLS communications with the givens
+// server endpoint, key and certificates . It will use the latest remote API version
+// available in the server.
+func NewTLSClient(endpoint string, cert, key, ca string) (*Client, error) {
+ client, err := NewVersionedTLSClient(endpoint, cert, key, ca, "")
+ if err != nil {
+ return nil, err
+ }
+ client.SkipServerVersionCheck = true
+ return client, nil
+}
+
+// NewTLSClientFromBytes returns a Client instance ready for TLS communications with the givens
+// server endpoint, key and certificates (passed inline to the function as opposed to being
+// read from a local file). It will use the latest remote API version available in the server.
+func NewTLSClientFromBytes(endpoint string, certPEMBlock, keyPEMBlock, caPEMCert []byte) (*Client, error) {
+ client, err := NewVersionedTLSClientFromBytes(endpoint, certPEMBlock, keyPEMBlock, caPEMCert, "")
+ if err != nil {
+ return nil, err
+ }
+ client.SkipServerVersionCheck = true
+ return client, nil
+}
+
+// NewVersionedClient returns a Client instance ready for communication with
+// the given server endpoint, using a specific remote API version.
+func NewVersionedClient(endpoint string, apiVersionString string) (*Client, error) {
+ u, err := parseEndpoint(endpoint, false)
+ if err != nil {
+ return nil, err
+ }
+ var requestedAPIVersion APIVersion
+ if strings.Contains(apiVersionString, ".") {
+ requestedAPIVersion, err = NewAPIVersion(apiVersionString)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ tr := &http.Transport{}
+ return &Client{
+ HTTPClient: http.DefaultClient,
+ transport: tr,
+ endpoint: endpoint,
+ endpointURL: u,
+ eventMonitor: new(eventMonitoringState),
+ requestedAPIVersion: requestedAPIVersion,
+ }, nil
+}
+
+// NewVersionnedTLSClient has been DEPRECATED, please use NewVersionedTLSClient.
+func NewVersionnedTLSClient(endpoint string, cert, key, ca, apiVersionString string) (*Client, error) {
+ return NewVersionedTLSClient(endpoint, cert, key, ca, apiVersionString)
+}
+
+// NewVersionedTLSClient returns a Client instance ready for TLS communications with the givens
+// server endpoint, key and certificates, using a specific remote API version.
+func NewVersionedTLSClient(endpoint string, cert, key, ca, apiVersionString string) (*Client, error) {
+ certPEMBlock, err := ioutil.ReadFile(cert)
+ if err != nil {
+ return nil, err
+ }
+ keyPEMBlock, err := ioutil.ReadFile(key)
+ if err != nil {
+ return nil, err
+ }
+ caPEMCert, err := ioutil.ReadFile(ca)
+ if err != nil {
+ return nil, err
+ }
+ return NewVersionedTLSClientFromBytes(endpoint, certPEMBlock, keyPEMBlock, caPEMCert, apiVersionString)
+}
+
+// NewVersionedTLSClientFromBytes returns a Client instance ready for TLS communications with the givens
+// server endpoint, key and certificates (passed inline to the function as opposed to being
+// read from a local file), using a specific remote API version.
+func NewVersionedTLSClientFromBytes(endpoint string, certPEMBlock, keyPEMBlock, caPEMCert []byte, apiVersionString string) (*Client, error) {
+ u, err := parseEndpoint(endpoint, true)
+ if err != nil {
+ return nil, err
+ }
+ var requestedAPIVersion APIVersion
+ if strings.Contains(apiVersionString, ".") {
+ requestedAPIVersion, err = NewAPIVersion(apiVersionString)
+ if err != nil {
+ return nil, err
+ }
+ }
+ if certPEMBlock == nil || keyPEMBlock == nil {
+ return nil, errors.New("Both cert and key are required")
+ }
+ tlsCert, err := tls.X509KeyPair(certPEMBlock, keyPEMBlock)
+ if err != nil {
+ return nil, err
+ }
+ tlsConfig := &tls.Config{Certificates: []tls.Certificate{tlsCert}}
+ if caPEMCert == nil {
+ tlsConfig.InsecureSkipVerify = true
+ } else {
+ caPool := x509.NewCertPool()
+ if !caPool.AppendCertsFromPEM(caPEMCert) {
+ return nil, errors.New("Could not add RootCA pem")
+ }
+ tlsConfig.RootCAs = caPool
+ }
+ tr := &http.Transport{
+ TLSClientConfig: tlsConfig,
+ }
+ if err != nil {
+ return nil, err
+ }
+ return &Client{
+ HTTPClient: &http.Client{Transport: tr},
+ transport: tr,
+ TLSConfig: tlsConfig,
+ endpoint: endpoint,
+ endpointURL: u,
+ eventMonitor: new(eventMonitoringState),
+ requestedAPIVersion: requestedAPIVersion,
+ }, nil
+}
+
+func (c *Client) checkAPIVersion() error {
+ serverAPIVersionString, err := c.getServerAPIVersionString()
+ if err != nil {
+ return err
+ }
+ c.serverAPIVersion, err = NewAPIVersion(serverAPIVersionString)
+ if err != nil {
+ return err
+ }
+ if c.requestedAPIVersion == nil {
+ c.expectedAPIVersion = c.serverAPIVersion
+ } else {
+ c.expectedAPIVersion = c.requestedAPIVersion
+ }
+ return nil
+}
+
+// Ping pings the docker server
+//
+// See http://goo.gl/stJENm for more details.
+func (c *Client) Ping() error {
+ path := "/_ping"
+ body, status, err := c.do("GET", path, doOptions{})
+ if err != nil {
+ return err
+ }
+ if status != http.StatusOK {
+ return newError(status, body)
+ }
+ return nil
+}
+
+func (c *Client) getServerAPIVersionString() (version string, err error) {
+ body, status, err := c.do("GET", "/version", doOptions{})
+ if err != nil {
+ return "", err
+ }
+ if status != http.StatusOK {
+ return "", fmt.Errorf("Received unexpected status %d while trying to retrieve the server version", status)
+ }
+ var versionResponse map[string]interface{}
+ err = json.Unmarshal(body, &versionResponse)
+ if err != nil {
+ return "", err
+ }
+ if version, ok := (versionResponse["ApiVersion"]).(string); ok {
+ return version, nil
+ }
+ return "", nil
+}
+
+func (c *Client) do(method, path string, doOptions doOptions) ([]byte, int, error) {
+ var params io.Reader
+ if doOptions.data != nil || doOptions.forceJSON {
+ buf, err := json.Marshal(doOptions.data)
+ if err != nil {
+ return nil, -1, err
+ }
+ params = bytes.NewBuffer(buf)
+ }
+ if path != "/version" && !c.SkipServerVersionCheck && c.expectedAPIVersion == nil {
+ err := c.checkAPIVersion()
+ if err != nil {
+ return nil, -1, err
+ }
+ }
+ req, err := http.NewRequest(method, c.getURL(path), params)
+ if err != nil {
+ return nil, -1, err
+ }
+ req.Header.Set("User-Agent", generatorAgent)
+ if doOptions.data != nil {
+ req.Header.Set("Content-Type", "application/json")
+ } else if method == "POST" {
+ req.Header.Set("Content-Type", "plain/text")
+ }
+ var resp *http.Response
+ protocol := c.endpointURL.Scheme
+ address := c.endpointURL.Path
+ if protocol == "unix" {
+ dial, err := net.Dial(protocol, address)
+ if err != nil {
+ return nil, -1, err
+ }
+ defer dial.Close()
+ breader := bufio.NewReader(dial)
+ err = req.Write(dial)
+ if err != nil {
+ return nil, -1, err
+ }
+ resp, err = http.ReadResponse(breader, req)
+ } else {
+ resp, err = c.HTTPClient.Do(req)
+ }
+ if err != nil {
+ if strings.Contains(err.Error(), "connection refused") {
+ return nil, -1, ErrConnectionRefused
+ }
+ return nil, -1, err
+ }
+ defer resp.Body.Close()
+ body, err := ioutil.ReadAll(resp.Body)
+ if err != nil {
+ return nil, -1, err
+ }
+ if resp.StatusCode < 200 || resp.StatusCode >= 400 {
+ return nil, resp.StatusCode, newError(resp.StatusCode, body)
+ }
+ return body, resp.StatusCode, nil
+}
+
+func (c *Client) stream(method, path string, streamOptions streamOptions) error {
+ if (method == "POST" || method == "PUT") && streamOptions.in == nil {
+ streamOptions.in = bytes.NewReader(nil)
+ }
+ if path != "/version" && !c.SkipServerVersionCheck && c.expectedAPIVersion == nil {
+ err := c.checkAPIVersion()
+ if err != nil {
+ return err
+ }
+ }
+ req, err := http.NewRequest(method, c.getURL(path), streamOptions.in)
+ if err != nil {
+ return err
+ }
+ req.Header.Set("User-Agent", generatorAgent)
+ if method == "POST" {
+ req.Header.Set("Content-Type", "plain/text")
+ }
+ for key, val := range streamOptions.headers {
+ req.Header.Set(key, val)
+ }
+ var resp *http.Response
+ protocol := c.endpointURL.Scheme
+ address := c.endpointURL.Path
+ if streamOptions.stdout == nil {
+ streamOptions.stdout = ioutil.Discard
+ }
+ if streamOptions.stderr == nil {
+ streamOptions.stderr = ioutil.Discard
+ }
+ if protocol == "unix" {
+ dial, err := net.Dial(protocol, address)
+ if err != nil {
+ return err
+ }
+ defer dial.Close()
+ breader := bufio.NewReader(dial)
+ err = req.Write(dial)
+ if err != nil {
+ return err
+ }
+ if resp, err = http.ReadResponse(breader, req); err != nil {
+ if strings.Contains(err.Error(), "connection refused") {
+ return ErrConnectionRefused
+ }
+ return err
+ }
+ defer resp.Body.Close()
+ } else {
+ if resp, err = c.HTTPClient.Do(req); err != nil {
+ if strings.Contains(err.Error(), "connection refused") {
+ return ErrConnectionRefused
+ }
+ return err
+ }
+ defer resp.Body.Close()
+ defer c.transport.CancelRequest(req)
+ }
+ if resp.StatusCode < 200 || resp.StatusCode >= 400 {
+ body, err := ioutil.ReadAll(resp.Body)
+ if err != nil {
+ return err
+ }
+ return newError(resp.StatusCode, body)
+ }
+ if streamOptions.useJSONDecoder || resp.Header.Get("Content-Type") == "application/json" {
+ // if we want to get raw json stream, just copy it back to output
+ // without decoding it
+ if streamOptions.rawJSONStream {
+ _, err = io.Copy(streamOptions.stdout, resp.Body)
+ return err
+ }
+ dec := json.NewDecoder(resp.Body)
+ for {
+ var m jsonMessage
+ if err := dec.Decode(&m); err == io.EOF {
+ break
+ } else if err != nil {
+ return err
+ }
+ if m.Stream != "" {
+ fmt.Fprint(streamOptions.stdout, m.Stream)
+ } else if m.Progress != "" {
+ fmt.Fprintf(streamOptions.stdout, "%s %s\r", m.Status, m.Progress)
+ } else if m.Error != "" {
+ return errors.New(m.Error)
+ }
+ if m.Status != "" {
+ fmt.Fprintln(streamOptions.stdout, m.Status)
+ }
+ }
+ } else {
+ if streamOptions.setRawTerminal {
+ _, err = io.Copy(streamOptions.stdout, resp.Body)
+ } else {
+ _, err = stdcopy.StdCopy(streamOptions.stdout, streamOptions.stderr, resp.Body)
+ }
+ return err
+ }
+ return nil
+}
+
+func (c *Client) hijack(method, path string, hijackOptions hijackOptions) error {
+ if path != "/version" && !c.SkipServerVersionCheck && c.expectedAPIVersion == nil {
+ err := c.checkAPIVersion()
+ if err != nil {
+ return err
+ }
+ }
+
+ var params io.Reader
+ if hijackOptions.data != nil {
+ buf, err := json.Marshal(hijackOptions.data)
+ if err != nil {
+ return err
+ }
+ params = bytes.NewBuffer(buf)
+ }
+
+ if hijackOptions.stdout == nil {
+ hijackOptions.stdout = ioutil.Discard
+ }
+ if hijackOptions.stderr == nil {
+ hijackOptions.stderr = ioutil.Discard
+ }
+ req, err := http.NewRequest(method, c.getURL(path), params)
+ if err != nil {
+ return err
+ }
+ req.Header.Set("Content-Type", "plain/text")
+ protocol := c.endpointURL.Scheme
+ address := c.endpointURL.Path
+ if protocol != "unix" {
+ protocol = "tcp"
+ address = c.endpointURL.Host
+ }
+ var dial net.Conn
+ if c.TLSConfig != nil && protocol != "unix" {
+ dial, err = tlsDial(protocol, address, c.TLSConfig)
+ if err != nil {
+ return err
+ }
+ } else {
+ dial, err = net.Dial(protocol, address)
+ if err != nil {
+ return err
+ }
+ }
+ clientconn := httputil.NewClientConn(dial, nil)
+ defer clientconn.Close()
+ clientconn.Do(req)
+ if hijackOptions.success != nil {
+ hijackOptions.success <- struct{}{}
+ <-hijackOptions.success
+ }
+ rwc, br := clientconn.Hijack()
+ defer rwc.Close()
+ errChanOut := make(chan error, 1)
+ errChanIn := make(chan error, 1)
+ exit := make(chan bool)
+ go func() {
+ defer close(exit)
+ defer close(errChanOut)
+ var err error
+ if hijackOptions.setRawTerminal {
+ // When TTY is ON, use regular copy
+ _, err = io.Copy(hijackOptions.stdout, br)
+ } else {
+ _, err = stdcopy.StdCopy(hijackOptions.stdout, hijackOptions.stderr, br)
+ }
+ errChanOut <- err
+ }()
+ go func() {
+ if hijackOptions.in != nil {
+ _, err := io.Copy(rwc, hijackOptions.in)
+ errChanIn <- err
+ }
+ rwc.(interface {
+ CloseWrite() error
+ }).CloseWrite()
+ }()
+ <-exit
+ select {
+ case err = <-errChanIn:
+ return err
+ case err = <-errChanOut:
+ return err
+ }
+}
+
+func (c *Client) getURL(path string) string {
+ urlStr := strings.TrimRight(c.endpointURL.String(), "/")
+ if c.endpointURL.Scheme == "unix" {
+ urlStr = ""
+ }
+
+ if c.requestedAPIVersion != nil {
+ return fmt.Sprintf("%s/v%s%s", urlStr, c.requestedAPIVersion, path)
+ }
+ return fmt.Sprintf("%s%s", urlStr, path)
+}
+
+func queryString(opts interface{}) string {
+ if opts == nil {
+ return ""
+ }
+ value := reflect.ValueOf(opts)
+ if value.Kind() == reflect.Ptr {
+ value = value.Elem()
+ }
+ if value.Kind() != reflect.Struct {
+ return ""
+ }
+ items := url.Values(map[string][]string{})
+ for i := 0; i < value.NumField(); i++ {
+ field := value.Type().Field(i)
+ if field.PkgPath != "" {
+ continue
+ }
+ key := field.Tag.Get("qs")
+ if key == "" {
+ key = strings.ToLower(field.Name)
+ } else if key == "-" {
+ continue
+ }
+ addQueryStringValue(items, key, value.Field(i))
+ }
+ return items.Encode()
+}
+
+func addQueryStringValue(items url.Values, key string, v reflect.Value) {
+ switch v.Kind() {
+ case reflect.Bool:
+ if v.Bool() {
+ items.Add(key, "1")
+ }
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ if v.Int() > 0 {
+ items.Add(key, strconv.FormatInt(v.Int(), 10))
+ }
+ case reflect.Float32, reflect.Float64:
+ if v.Float() > 0 {
+ items.Add(key, strconv.FormatFloat(v.Float(), 'f', -1, 64))
+ }
+ case reflect.String:
+ if v.String() != "" {
+ items.Add(key, v.String())
+ }
+ case reflect.Ptr:
+ if !v.IsNil() {
+ if b, err := json.Marshal(v.Interface()); err == nil {
+ items.Add(key, string(b))
+ }
+ }
+ case reflect.Map:
+ if len(v.MapKeys()) > 0 {
+ if b, err := json.Marshal(v.Interface()); err == nil {
+ items.Add(key, string(b))
+ }
+ }
+ case reflect.Array, reflect.Slice:
+ vLen := v.Len()
+ if vLen > 0 {
+ for i := 0; i < vLen; i++ {
+ addQueryStringValue(items, key, v.Index(i))
+ }
+ }
+ }
+}
+
+func newError(status int, body []byte) *Error {
+ return &Error{Status: status, Message: string(body)}
+}
+
+func (e *Error) Error() string {
+ return fmt.Sprintf("API error (%d): %s", e.Status, e.Message)
+}
+
+func parseEndpoint(endpoint string, tls bool) (*url.URL, error) {
+ u, err := url.Parse(endpoint)
+ if err != nil {
+ return nil, ErrInvalidEndpoint
+ }
+ if tls {
+ u.Scheme = "https"
+ }
+ switch u.Scheme {
+ case "unix":
+ return u, nil
+ case "http", "https", "tcp":
+ _, port, err := net.SplitHostPort(u.Host)
+ if err != nil {
+ if e, ok := err.(*net.AddrError); ok {
+ if e.Err == "missing port in address" {
+ return u, nil
+ }
+ }
+ return nil, ErrInvalidEndpoint
+ }
+ number, err := strconv.ParseInt(port, 10, 64)
+ if err == nil && number > 0 && number < 65536 {
+ if u.Scheme == "tcp" {
+ if number == 2376 {
+ u.Scheme = "https"
+ } else {
+ u.Scheme = "http"
+ }
+ }
+ return u, nil
+ }
+ return nil, ErrInvalidEndpoint
+ default:
+ return nil, ErrInvalidEndpoint
+ }
+}
+
+//****************************************************************//
+//container need func
+//****************************************************************//
+
+// ListContainers returns a slice of containers matching the given criteria.
+//
+// See https://docs.docker.com/reference/api/docker_remote_api_v1.19/#list-containers for more details.
+func (c *Client) ListContainers(opts ListContainersOptions) ([]APIContainers, error) {
+ path := "/containers/json?" + queryString(opts)
+ body, _, err := c.do("GET", path, doOptions{})
+ if err != nil {
+ return nil, err
+ }
+ var containers []APIContainers
+ err = json.Unmarshal(body, &containers)
+ if err != nil {
+ return nil, err
+ }
+ return containers, nil
+}
+
+// Port returns the number of the port.
+func (p Port) Port() string {
+ return strings.Split(string(p), "/")[0]
+}
+
+// Proto returns the name of the protocol.
+func (p Port) Proto() string {
+ parts := strings.Split(string(p), "/")
+ if len(parts) == 1 {
+ return "tcp"
+ }
+ return parts[1]
+}
+
+// String returns the string representation of a state.
+func (s *State) String() string {
+ if s.Running {
+ if s.Paused {
+ return "paused"
+ }
+ return fmt.Sprintf("Up %s", time.Now().UTC().Sub(s.StartedAt))
+ }
+ return fmt.Sprintf("Exit %d", s.ExitCode)
+}
+
+// PortMappingAPI translates the port mappings as contained in NetworkSettings
+// into the format in which they would appear when returned by the API
+func (settings *NetworkSettings) PortMappingAPI() []APIPort {
+ var mapping []APIPort
+ for port, bindings := range settings.Ports {
+ p, _ := parsePort(port.Port())
+ if len(bindings) == 0 {
+ mapping = append(mapping, APIPort{
+ PublicPort: int64(p),
+ Type: port.Proto(),
+ })
+ continue
+ }
+ for _, binding := range bindings {
+ p, _ := parsePort(port.Port())
+ h, _ := parsePort(binding.HostPort)
+ mapping = append(mapping, APIPort{
+ PrivatePort: int64(p),
+ PublicPort: int64(h),
+ Type: port.Proto(),
+ IP: binding.HostIP,
+ })
+ }
+ }
+ return mapping
+}
+
+func parsePort(rawPort string) (int, error) {
+ port, err := strconv.ParseUint(rawPort, 10, 16)
+ if err != nil {
+ return 0, err
+ }
+ return int(port), nil
+}
+
+// RenameContainer updates and existing containers name
+//
+// See https://docs.docker.com/reference/api/docker_remote_api_v1.19/#rename-a-container for more details.
+func (c *Client) RenameContainer(opts RenameContainerOptions) error {
+ _, _, err := c.do("POST", fmt.Sprintf("/containers/"+opts.ID+"/rename?%s", queryString(opts)), doOptions{})
+ return err
+}
+
+// InspectContainer returns information about a container by its ID.
+//
+// See https://docs.docker.com/reference/api/docker_remote_api_v1.19/#inspect-a-container for more details.
+func (c *Client) InspectContainer(id string) (*Container, error) {
+ path := "/containers/" + id + "/json"
+ body, status, err := c.do("GET", path, doOptions{})
+ if status == http.StatusNotFound {
+ return nil, &NoSuchContainer{ID: id}
+ }
+ if err != nil {
+ return nil, err
+ }
+ var container Container
+ err = json.Unmarshal(body, &container)
+ if err != nil {
+ return nil, err
+ }
+ return &container, nil
+}
+
+// ContainerChanges returns changes in the filesystem of the given container.
+//
+// See https://docs.docker.com/reference/api/docker_remote_api_v1.19/#inspect-changes-on-a-container-s-filesystem for more details.
+func (c *Client) ContainerChanges(id string) ([]Change, error) {
+ path := "/containers/" + id + "/changes"
+ body, status, err := c.do("GET", path, doOptions{})
+ if status == http.StatusNotFound {
+ return nil, &NoSuchContainer{ID: id}
+ }
+ if err != nil {
+ return nil, err
+ }
+ var changes []Change
+ err = json.Unmarshal(body, &changes)
+ if err != nil {
+ return nil, err
+ }
+ return changes, nil
+}
+
+// CreateContainer creates a new container, returning the container instance,
+// or an error in case of failure.
+//
+// See https://docs.docker.com/reference/api/docker_remote_api_v1.19/#create-a-container for more details.
+func (c *Client) CreateContainer(opts CreateContainerOptions) (*Container, error) {
+ path := "/containers/create?" + queryString(opts)
+ body, status, err := c.do(
+ "POST",
+ path,
+ doOptions{
+ data: struct {
+ *Config
+ HostConfig *HostConfig `json:"HostConfig,omitempty" yaml:"HostConfig,omitempty"`
+ }{
+ opts.Config,
+ opts.HostConfig,
+ },
+ },
+ )
+
+ if status == http.StatusNotFound {
+ return nil, ErrNoSuchImage
+ }
+ if status == http.StatusConflict {
+ return nil, ErrContainerAlreadyExists
+ }
+ if err != nil {
+ return nil, err
+ }
+ var container Container
+ err = json.Unmarshal(body, &container)
+ if err != nil {
+ return nil, err
+ }
+
+ container.Name = opts.Name
+
+ return &container, nil
+}
+
+// AlwaysRestart returns a restart policy that tells the Docker daemon to
+// always restart the container.
+func AlwaysRestart() RestartPolicy {
+ return RestartPolicy{Name: "always"}
+}
+
+// RestartOnFailure returns a restart policy that tells the Docker daemon to
+// restart the container on failures, trying at most maxRetry times.
+func RestartOnFailure(maxRetry int) RestartPolicy {
+ return RestartPolicy{Name: "on-failure", MaximumRetryCount: maxRetry}
+}
+
+// NeverRestart returns a restart policy that tells the Docker daemon to never
+// restart the container on failures.
+func NeverRestart() RestartPolicy {
+ return RestartPolicy{Name: "no"}
+}
+
+// StartContainer starts a container, returning an error in case of failure.
+//
+// See https://docs.docker.com/reference/api/docker_remote_api_v1.19/#start-a-container for more details.
+func (c *Client) StartContainer(id string, hostConfig *HostConfig) error {
+ path := "/containers/" + id + "/start"
+ _, status, err := c.do("POST", path, doOptions{data: hostConfig, forceJSON: true})
+ if status == http.StatusNotFound {
+ return &NoSuchContainer{ID: id, Err: err}
+ }
+ if status == http.StatusNotModified {
+ return &ContainerAlreadyRunning{ID: id}
+ }
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
+// StopContainer stops a container, killing it after the given timeout (in
+// seconds).
+//
+// See https://docs.docker.com/reference/api/docker_remote_api_v1.19/#stop-a-container for more details.
+func (c *Client) StopContainer(id string, timeout uint) error {
+ path := fmt.Sprintf("/containers/%s/stop?t=%d", id, timeout)
+ _, status, err := c.do("POST", path, doOptions{})
+ if status == http.StatusNotFound {
+ return &NoSuchContainer{ID: id}
+ }
+ if status == http.StatusNotModified {
+ return &ContainerNotRunning{ID: id}
+ }
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
+// RestartContainer stops a container, killing it after the given timeout (in
+// seconds), during the stop process.
+//
+// See https://docs.docker.com/reference/api/docker_remote_api_v1.19/#restart-a-container for more details.
+func (c *Client) RestartContainer(id string, timeout uint) error {
+ path := fmt.Sprintf("/containers/%s/restart?t=%d", id, timeout)
+ _, status, err := c.do("POST", path, doOptions{})
+ if status == http.StatusNotFound {
+ return &NoSuchContainer{ID: id}
+ }
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
+// PauseContainer pauses the given container.
+//
+// See https://docs.docker.com/reference/api/docker_remote_api_v1.19/#pause-a-container for more details.
+func (c *Client) PauseContainer(id string) error {
+ path := fmt.Sprintf("/containers/%s/pause", id)
+ _, status, err := c.do("POST", path, doOptions{})
+ if status == http.StatusNotFound {
+ return &NoSuchContainer{ID: id}
+ }
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
+// UnpauseContainer unpauses the given container.
+//
+// See https://docs.docker.com/reference/api/docker_remote_api_v1.19/#unpause-a-container for more details.
+func (c *Client) UnpauseContainer(id string) error {
+ path := fmt.Sprintf("/containers/%s/unpause", id)
+ _, status, err := c.do("POST", path, doOptions{})
+ if status == http.StatusNotFound {
+ return &NoSuchContainer{ID: id}
+ }
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
+// TopContainer returns processes running inside a container
+//
+// See https://docs.docker.com/reference/api/docker_remote_api_v1.19/#list-processes-running-inside-a-container for more details.
+func (c *Client) TopContainer(id string, psArgs string) (TopResult, error) {
+ var args string
+ var result TopResult
+ if psArgs != "" {
+ args = fmt.Sprintf("?ps_args=%s", psArgs)
+ }
+ path := fmt.Sprintf("/containers/%s/top%s", id, args)
+ body, status, err := c.do("GET", path, doOptions{})
+ if status == http.StatusNotFound {
+ return result, &NoSuchContainer{ID: id}
+ }
+ if err != nil {
+ return result, err
+ }
+ err = json.Unmarshal(body, &result)
+ if err != nil {
+ return result, err
+ }
+ return result, nil
+}
+
+// Stats sends container statistics for the given container to the given channel.
+//
+// This function is blocking, similar to a streaming call for logs, and should be run
+// on a separate goroutine from the caller. Note that this function will block until
+// the given container is removed, not just exited. When finished, this function
+// will close the given channel.
+//
+// See https://docs.docker.com/reference/api/docker_remote_api_v1.19/#get-container-stats-based-on-resource-usage for more details.
+func (c *Client) Stats(opts StatsOptions) (retErr error) {
+ errC := make(chan error, 1)
+ readCloser, writeCloser := io.Pipe()
+
+ defer func() {
+ close(opts.Stats)
+ if err := <-errC; err != nil && retErr == nil {
+ retErr = err
+ }
+ if err := readCloser.Close(); err != nil && retErr == nil {
+ retErr = err
+ }
+ }()
+
+ go func() {
+ err := c.stream("GET", fmt.Sprintf("/containers/%s/stats?stream=%v", opts.ID, opts.Stream), streamOptions{
+ rawJSONStream: true,
+ useJSONDecoder: true,
+ stdout: writeCloser,
+ })
+ if err != nil {
+ dockerError, ok := err.(*Error)
+ if ok {
+ if dockerError.Status == http.StatusNotFound {
+ err = &NoSuchContainer{ID: opts.ID}
+ }
+ }
+ }
+ if closeErr := writeCloser.Close(); closeErr != nil && err == nil {
+ err = closeErr
+ }
+ errC <- err
+ close(errC)
+ }()
+
+ decoder := json.NewDecoder(readCloser)
+ stats := new(Stats)
+ for err := decoder.Decode(&stats); err != io.EOF; err = decoder.Decode(stats) {
+ if err != nil {
+ return err
+ }
+ opts.Stats <- stats
+ stats = new(Stats)
+ }
+ return nil
+}
+
+// KillContainer kills a container, returning an error in case of failure.
+//
+// See https://docs.docker.com/reference/api/docker_remote_api_v1.19/#kill-a-container for more details.
+func (c *Client) KillContainer(opts KillContainerOptions) error {
+ path := "/containers/" + opts.ID + "/kill" + "?" + queryString(opts)
+ _, status, err := c.do("POST", path, doOptions{})
+ if status == http.StatusNotFound {
+ return &NoSuchContainer{ID: opts.ID}
+ }
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
+// RemoveContainer removes a container, returning an error in case of failure.
+//
+// See https://docs.docker.com/reference/api/docker_remote_api_v1.19/#remove-a-container for more details.
+func (c *Client) RemoveContainer(opts RemoveContainerOptions) error {
+ path := "/containers/" + opts.ID + "?" + queryString(opts)
+ _, status, err := c.do("DELETE", path, doOptions{})
+ if status == http.StatusNotFound {
+ return &NoSuchContainer{ID: opts.ID}
+ }
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
+// CopyFromContainer copy files or folders from a container, using a given
+// resource.
+//
+// See https://docs.docker.com/reference/api/docker_remote_api_v1.19/#copy-files-or-folders-from-a-container for more details.
+func (c *Client) CopyFromContainer(opts CopyFromContainerOptions) error {
+ if opts.Container == "" {
+ return &NoSuchContainer{ID: opts.Container}
+ }
+ url := fmt.Sprintf("/containers/%s/copy", opts.Container)
+ body, status, err := c.do("POST", url, doOptions{data: opts})
+ if status == http.StatusNotFound {
+ return &NoSuchContainer{ID: opts.Container}
+ }
+ if err != nil {
+ return err
+ }
+ _, err = io.Copy(opts.OutputStream, bytes.NewBuffer(body))
+ return err
+}
+
+// WaitContainer blocks until the given container stops, return the exit code
+// of the container status.
+//
+// See http://goo.gl/J88DHU for more details.
+func (c *Client) WaitContainer(id string) (int, error) {
+ body, status, err := c.do("POST", "/containers/"+id+"/wait", doOptions{})
+ if status == http.StatusNotFound {
+ return 0, &NoSuchContainer{ID: id}
+ }
+ if err != nil {
+ return 0, err
+ }
+ var r struct{ StatusCode int }
+ err = json.Unmarshal(body, &r)
+ if err != nil {
+ return 0, err
+ }
+ return r.StatusCode, nil
+}
+
+// CommitContainer creates a new image from a container's changes.
+//
+// See http://goo.gl/Jn8pe8 for more details.
+func (c *Client) CommitContainer(opts CommitContainerOptions) (*Image, error) {
+ path := "/commit?" + queryString(opts)
+ body, status, err := c.do("POST", path, doOptions{data: opts.Run})
+ if status == http.StatusNotFound {
+ return nil, &NoSuchContainer{ID: opts.Container}
+ }
+ if err != nil {
+ return nil, err
+ }
+ var image Image
+ err = json.Unmarshal(body, &image)
+ if err != nil {
+ return nil, err
+ }
+ return &image, nil
+}
+
+// AttachToContainer attaches to a container, using the given options.
+//
+// See http://goo.gl/RRAhws for more details.
+func (c *Client) AttachToContainer(opts AttachToContainerOptions) error {
+ if opts.Container == "" {
+ return &NoSuchContainer{ID: opts.Container}
+ }
+ path := "/containers/" + opts.Container + "/attach?" + queryString(opts)
+ return c.hijack("POST", path, hijackOptions{
+ success: opts.Success,
+ setRawTerminal: opts.RawTerminal,
+ in: opts.InputStream,
+ stdout: opts.OutputStream,
+ stderr: opts.ErrorStream,
+ })
+}
+
+// Logs gets stdout and stderr logs from the specified container.
+//
+// See http://goo.gl/rLhKSU for more details.
+func (c *Client) Logs(opts LogsOptions) error {
+ if opts.Container == "" {
+ return &NoSuchContainer{ID: opts.Container}
+ }
+ if opts.Tail == "" {
+ opts.Tail = "all"
+ }
+ path := "/containers/" + opts.Container + "/logs?" + queryString(opts)
+ return c.stream("GET", path, streamOptions{
+ setRawTerminal: opts.RawTerminal,
+ stdout: opts.OutputStream,
+ stderr: opts.ErrorStream,
+ })
+}
+
+// ResizeContainerTTY resizes the terminal to the given height and width.
+func (c *Client) ResizeContainerTTY(id string, height, width int) error {
+ params := make(url.Values)
+ params.Set("h", strconv.Itoa(height))
+ params.Set("w", strconv.Itoa(width))
+ _, _, err := c.do("POST", "/containers/"+id+"/resize?"+params.Encode(), doOptions{})
+ return err
+}
+
+// ExportContainer export the contents of container id as tar archive
+// and prints the exported contents to stdout.
+//
+// See http://goo.gl/hnzE62 for more details.
+func (c *Client) ExportContainer(opts ExportContainerOptions) error {
+ if opts.ID == "" {
+ return &NoSuchContainer{ID: opts.ID}
+ }
+ url := fmt.Sprintf("/containers/%s/export", opts.ID)
+ return c.stream("GET", url, streamOptions{
+ setRawTerminal: true,
+ stdout: opts.OutputStream,
+ })
+}
+
+func (err *NoSuchContainer) Error() string {
+ if err.Err != nil {
+ return err.Err.Error()
+ }
+ return "No such container: " + err.ID
+}
+
+func (err *ContainerAlreadyRunning) Error() string {
+ return "Container already running: " + err.ID
+}
+func (err *ContainerNotRunning) Error() string {
+ return "Container not running: " + err.ID
+}
+
+//****************************************************************//
+//env need func
+//****************************************************************//
+
+// Get returns the string value of the given key.
+func (env *Env) Get(key string) (value string) {
+ return env.Map()[key]
+}
+
+// Exists checks whether the given key is defined in the internal Env
+// representation.
+func (env *Env) Exists(key string) bool {
+ _, exists := env.Map()[key]
+ return exists
+}
+
+// GetBool returns a boolean representation of the given key. The key is false
+// whenever its value if 0, no, false, none or an empty string. Any other value
+// will be interpreted as true.
+func (env *Env) GetBool(key string) (value bool) {
+ s := strings.ToLower(strings.Trim(env.Get(key), " \t"))
+ if s == "" || s == "0" || s == "no" || s == "false" || s == "none" {
+ return false
+ }
+ return true
+}
+
+// SetBool defines a boolean value to the given key.
+func (env *Env) SetBool(key string, value bool) {
+ if value {
+ env.Set(key, "1")
+ } else {
+ env.Set(key, "0")
+ }
+}
+
+// GetInt returns the value of the provided key, converted to int.
+//
+// It the value cannot be represented as an integer, it returns -1.
+func (env *Env) GetInt(key string) int {
+ return int(env.GetInt64(key))
+}
+
+// SetInt defines an integer value to the given key.
+func (env *Env) SetInt(key string, value int) {
+ env.Set(key, strconv.Itoa(value))
+}
+
+// GetInt64 returns the value of the provided key, converted to int64.
+//
+// It the value cannot be represented as an integer, it returns -1.
+func (env *Env) GetInt64(key string) int64 {
+ s := strings.Trim(env.Get(key), " \t")
+ val, err := strconv.ParseInt(s, 10, 64)
+ if err != nil {
+ return -1
+ }
+ return val
+}
+
+// SetInt64 defines an integer (64-bit wide) value to the given key.
+func (env *Env) SetInt64(key string, value int64) {
+ env.Set(key, strconv.FormatInt(value, 10))
+}
+
+// GetJSON unmarshals the value of the provided key in the provided iface.
+//
+// iface is a value that can be provided to the json.Unmarshal function.
+func (env *Env) GetJSON(key string, iface interface{}) error {
+ sval := env.Get(key)
+ if sval == "" {
+ return nil
+ }
+ return json.Unmarshal([]byte(sval), iface)
+}
+
+// SetJSON marshals the given value to JSON format and stores it using the
+// provided key.
+func (env *Env) SetJSON(key string, value interface{}) error {
+ sval, err := json.Marshal(value)
+ if err != nil {
+ return err
+ }
+ env.Set(key, string(sval))
+ return nil
+}
+
+// GetList returns a list of strings matching the provided key. It handles the
+// list as a JSON representation of a list of strings.
+//
+// If the given key matches to a single string, it will return a list
+// containing only the value that matches the key.
+func (env *Env) GetList(key string) []string {
+ sval := env.Get(key)
+ if sval == "" {
+ return nil
+ }
+ var l []string
+ if err := json.Unmarshal([]byte(sval), &l); err != nil {
+ l = append(l, sval)
+ }
+ return l
+}
+
+// SetList stores the given list in the provided key, after serializing it to
+// JSON format.
+func (env *Env) SetList(key string, value []string) error {
+ return env.SetJSON(key, value)
+}
+
+// Set defines the value of a key to the given string.
+func (env *Env) Set(key, value string) {
+ *env = append(*env, key+"="+value)
+}
+
+// Decode decodes `src` as a json dictionary, and adds each decoded key-value
+// pair to the environment.
+//
+// If `src` cannot be decoded as a json dictionary, an error is returned.
+func (env *Env) Decode(src io.Reader) error {
+ m := make(map[string]interface{})
+ if err := json.NewDecoder(src).Decode(&m); err != nil {
+ return err
+ }
+ for k, v := range m {
+ env.SetAuto(k, v)
+ }
+ return nil
+}
+
+// SetAuto will try to define the Set* method to call based on the given value.
+func (env *Env) SetAuto(key string, value interface{}) {
+ if fval, ok := value.(float64); ok {
+ env.SetInt64(key, int64(fval))
+ } else if sval, ok := value.(string); ok {
+ env.Set(key, sval)
+ } else if val, err := json.Marshal(value); err == nil {
+ env.Set(key, string(val))
+ } else {
+ env.Set(key, fmt.Sprintf("%v", value))
+ }
+}
+
+// Map returns the map representation of the env.
+func (env *Env) Map() map[string]string {
+ if len(*env) == 0 {
+ return nil
+ }
+ m := make(map[string]string)
+ for _, kv := range *env {
+ parts := strings.SplitN(kv, "=", 2)
+ m[parts[0]] = parts[1]
+ }
+ return m
+}
+
+//****************************************************************//
+//event need func
+//****************************************************************//
+
+// AddEventListener adds a new listener to container events in the Docker API.
+//
+// The parameter is a channel through which events will be sent.
+func (c *Client) AddEventListener(listener chan<- *APIEvents) error {
+ var err error
+ if !c.eventMonitor.isEnabled() {
+ err = c.eventMonitor.enableEventMonitoring(c)
+ if err != nil {
+ return err
+ }
+ }
+ err = c.eventMonitor.addListener(listener)
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
+// RemoveEventListener removes a listener from the monitor.
+func (c *Client) RemoveEventListener(listener chan *APIEvents) error {
+ err := c.eventMonitor.removeListener(listener)
+ if err != nil {
+ return err
+ }
+ if len(c.eventMonitor.listeners) == 0 {
+ err = c.eventMonitor.disableEventMonitoring()
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (eventState *eventMonitoringState) addListener(listener chan<- *APIEvents) error {
+ eventState.Lock()
+ defer eventState.Unlock()
+ if listenerExists(listener, &eventState.listeners) {
+ return ErrListenerAlreadyExists
+ }
+ eventState.Add(1)
+ eventState.listeners = append(eventState.listeners, listener)
+ return nil
+}
+
+func (eventState *eventMonitoringState) removeListener(listener chan<- *APIEvents) error {
+ eventState.Lock()
+ defer eventState.Unlock()
+ if listenerExists(listener, &eventState.listeners) {
+ var newListeners []chan<- *APIEvents
+ for _, l := range eventState.listeners {
+ if l != listener {
+ newListeners = append(newListeners, l)
+ }
+ }
+ eventState.listeners = newListeners
+ eventState.Add(-1)
+ }
+ return nil
+}
+
+func (eventState *eventMonitoringState) closeListeners() {
+ eventState.Lock()
+ defer eventState.Unlock()
+ for _, l := range eventState.listeners {
+ close(l)
+ eventState.Add(-1)
+ }
+ eventState.listeners = nil
+}
+
+func listenerExists(a chan<- *APIEvents, list *[]chan<- *APIEvents) bool {
+ for _, b := range *list {
+ if b == a {
+ return true
+ }
+ }
+ return false
+}
+
+func (eventState *eventMonitoringState) enableEventMonitoring(c *Client) error {
+ eventState.Lock()
+ defer eventState.Unlock()
+ if !eventState.enabled {
+ eventState.enabled = true
+ var lastSeenDefault = int64(0)
+ eventState.lastSeen = &lastSeenDefault
+ eventState.C = make(chan *APIEvents, 100)
+ eventState.errC = make(chan error, 1)
+ go eventState.monitorEvents(c)
+ }
+ return nil
+}
+
+func (eventState *eventMonitoringState) disableEventMonitoring() error {
+ eventState.Wait()
+ eventState.Lock()
+ defer eventState.Unlock()
+ if eventState.enabled {
+ eventState.enabled = false
+ close(eventState.C)
+ close(eventState.errC)
+ }
+ return nil
+}
+
+func (eventState *eventMonitoringState) monitorEvents(c *Client) {
+ var err error
+ for eventState.noListeners() {
+ time.Sleep(10 * time.Millisecond)
+ }
+ if err = eventState.connectWithRetry(c); err != nil {
+ eventState.terminate()
+ }
+ for eventState.isEnabled() {
+ timeout := time.After(100 * time.Millisecond)
+ select {
+ case ev, ok := <-eventState.C:
+ if !ok {
+ return
+ }
+ if ev == EOFEvent {
+ eventState.closeListeners()
+ eventState.terminate()
+ return
+ }
+ eventState.updateLastSeen(ev)
+ go eventState.sendEvent(ev)
+ case err = <-eventState.errC:
+ if err == ErrNoListeners {
+ eventState.terminate()
+ return
+ } else if err != nil {
+ defer func() { go eventState.monitorEvents(c) }()
+ return
+ }
+ case <-timeout:
+ continue
+ }
+ }
+}
+
+func (eventState *eventMonitoringState) connectWithRetry(c *Client) error {
+ var retries int
+ var err error
+ for err = c.eventHijack(atomic.LoadInt64(eventState.lastSeen), eventState.C, eventState.errC); err != nil && retries < maxMonitorConnRetries; retries++ {
+ waitTime := int64(retryInitialWaitTime * math.Pow(2, float64(retries)))
+ time.Sleep(time.Duration(waitTime) * time.Millisecond)
+ err = c.eventHijack(atomic.LoadInt64(eventState.lastSeen), eventState.C, eventState.errC)
+ }
+ return err
+}
+
+func (eventState *eventMonitoringState) noListeners() bool {
+ eventState.RLock()
+ defer eventState.RUnlock()
+ return len(eventState.listeners) == 0
+}
+
+func (eventState *eventMonitoringState) isEnabled() bool {
+ eventState.RLock()
+ defer eventState.RUnlock()
+ return eventState.enabled
+}
+
+func (eventState *eventMonitoringState) sendEvent(event *APIEvents) {
+ eventState.RLock()
+ defer eventState.RUnlock()
+ eventState.Add(1)
+ defer eventState.Done()
+ if eventState.enabled {
+ if len(eventState.listeners) == 0 {
+ eventState.errC <- ErrNoListeners
+ return
+ }
+
+ for _, listener := range eventState.listeners {
+ listener <- event
+ }
+ }
+}
+
+func (eventState *eventMonitoringState) updateLastSeen(e *APIEvents) {
+ eventState.Lock()
+ defer eventState.Unlock()
+ if atomic.LoadInt64(eventState.lastSeen) < e.Time {
+ atomic.StoreInt64(eventState.lastSeen, e.Time)
+ }
+}
+
+func (eventState *eventMonitoringState) terminate() {
+ eventState.disableEventMonitoring()
+}
+
+func (c *Client) eventHijack(startTime int64, eventChan chan *APIEvents, errChan chan error) error {
+ uri := "/events"
+ if startTime != 0 {
+ uri += fmt.Sprintf("?since=%d", startTime)
+ }
+ protocol := c.endpointURL.Scheme
+ address := c.endpointURL.Path
+ if protocol != "unix" {
+ protocol = "tcp"
+ address = c.endpointURL.Host
+ }
+ var dial net.Conn
+ var err error
+ if c.TLSConfig == nil {
+ dial, err = net.Dial(protocol, address)
+ } else {
+ dial, err = tls.Dial(protocol, address, c.TLSConfig)
+ }
+ if err != nil {
+ return err
+ }
+ conn := httputil.NewClientConn(dial, nil)
+ req, err := http.NewRequest("GET", uri, nil)
+ if err != nil {
+ return err
+ }
+ res, err := conn.Do(req)
+ if err != nil {
+ return err
+ }
+ go func(res *http.Response, conn *httputil.ClientConn) {
+ defer conn.Close()
+ defer res.Body.Close()
+ decoder := json.NewDecoder(res.Body)
+ for {
+ var event APIEvents
+ if err = decoder.Decode(&event); err != nil {
+ if err == io.EOF || err == io.ErrUnexpectedEOF {
+ if c.eventMonitor.isEnabled() {
+ // Signal that we're exiting.
+ eventChan <- EOFEvent
+ }
+ break
+ }
+ errChan <- err
+ }
+ if event.Time == 0 {
+ continue
+ }
+ if !c.eventMonitor.isEnabled() {
+ return
+ }
+ eventChan <- &event
+ }
+ }(res, conn)
+ return nil
+}
+
+//****************************************************************//
+//exec need func
+//****************************************************************//
+
+// CreateExec sets up an exec instance in a running container `id`, returning the exec
+// instance, or an error in case of failure.
+//
+// See http://goo.gl/8izrzI for more details
+func (c *Client) CreateExec(opts CreateExecOptions) (*Exec, error) {
+ path := fmt.Sprintf("/containers/%s/exec", opts.Container)
+ body, status, err := c.do("POST", path, doOptions{data: opts})
+ if status == http.StatusNotFound {
+ return nil, &NoSuchContainer{ID: opts.Container}
+ }
+ if err != nil {
+ return nil, err
+ }
+ var exec Exec
+ err = json.Unmarshal(body, &exec)
+ if err != nil {
+ return nil, err
+ }
+
+ return &exec, nil
+}
+
+// StartExec starts a previously set up exec instance id. If opts.Detach is
+// true, it returns after starting the exec command. Otherwise, it sets up an
+// interactive session with the exec command.
+//
+// See http://goo.gl/JW8Lxl for more details
+func (c *Client) StartExec(id string, opts StartExecOptions) error {
+ if id == "" {
+ return &NoSuchExec{ID: id}
+ }
+
+ path := fmt.Sprintf("/exec/%s/start", id)
+
+ if opts.Detach {
+ _, status, err := c.do("POST", path, doOptions{data: opts})
+ if status == http.StatusNotFound {
+ return &NoSuchExec{ID: id}
+ }
+ if err != nil {
+ return err
+ }
+ return nil
+ }
+
+ return c.hijack("POST", path, hijackOptions{
+ success: opts.Success,
+ setRawTerminal: opts.RawTerminal,
+ in: opts.InputStream,
+ stdout: opts.OutputStream,
+ stderr: opts.ErrorStream,
+ data: opts,
+ })
+}
+
+// ResizeExecTTY resizes the tty session used by the exec command id. This API
+// is valid only if Tty was specified as part of creating and starting the exec
+// command.
+//
+// See http://goo.gl/YDSx1f for more details
+func (c *Client) ResizeExecTTY(id string, height, width int) error {
+ params := make(url.Values)
+ params.Set("h", strconv.Itoa(height))
+ params.Set("w", strconv.Itoa(width))
+
+ path := fmt.Sprintf("/exec/%s/resize?%s", id, params.Encode())
+ _, _, err := c.do("POST", path, doOptions{})
+ return err
+}
+
+// InspectExec returns low-level information about the exec command id.
+//
+// See http://goo.gl/ypQULN for more details
+func (c *Client) InspectExec(id string) (*ExecInspect, error) {
+ path := fmt.Sprintf("/exec/%s/json", id)
+ body, status, err := c.do("GET", path, doOptions{})
+ if status == http.StatusNotFound {
+ return nil, &NoSuchExec{ID: id}
+ }
+ if err != nil {
+ return nil, err
+ }
+ var exec ExecInspect
+ err = json.Unmarshal(body, &exec)
+ if err != nil {
+ return nil, err
+ }
+ return &exec, nil
+}
+
+func (err *NoSuchExec) Error() string {
+ return "No such exec instance: " + err.ID
+}
+
+//****************************************************************//
+//image need func
+//****************************************************************//
+
+// ListImages returns the list of available images in the server.
+//
+// See http://goo.gl/HRVN1Z for more details.
+func (c *Client) ListImages(opts ListImagesOptions) ([]APIImages, error) {
+ path := "/images/json?" + queryString(opts)
+ body, _, err := c.do("GET", path, doOptions{})
+ if err != nil {
+ return nil, err
+ }
+ var images []APIImages
+ err = json.Unmarshal(body, &images)
+ if err != nil {
+ return nil, err
+ }
+ return images, nil
+}
+
+// ImageHistory returns the history of the image by its name or ID.
+//
+// See http://goo.gl/2oJmNs for more details.
+func (c *Client) ImageHistory(name string) ([]ImageHistory, error) {
+ body, status, err := c.do("GET", "/images/"+name+"/history", doOptions{})
+ if status == http.StatusNotFound {
+ return nil, ErrNoSuchImage
+ }
+ if err != nil {
+ return nil, err
+ }
+ var history []ImageHistory
+ err = json.Unmarshal(body, &history)
+ if err != nil {
+ return nil, err
+ }
+ return history, nil
+}
+
+// RemoveImage removes an image by its name or ID.
+//
+// See http://goo.gl/znj0wM for more details.
+func (c *Client) RemoveImage(name string) error {
+ _, status, err := c.do("DELETE", "/images/"+name, doOptions{})
+ if status == http.StatusNotFound {
+ return ErrNoSuchImage
+ }
+ return err
+}
+
+// RemoveImageExtended removes an image by its name or ID.
+// Extra params can be passed, see RemoveImageOptions
+//
+// See http://goo.gl/znj0wM for more details.
+func (c *Client) RemoveImageExtended(name string, opts RemoveImageOptions) error {
+ uri := fmt.Sprintf("/images/%s?%s", name, queryString(&opts))
+ _, status, err := c.do("DELETE", uri, doOptions{})
+ if status == http.StatusNotFound {
+ return ErrNoSuchImage
+ }
+ return err
+}
+
+// InspectImage returns an image by its name or ID.
+//
+// See http://goo.gl/Q112NY for more details.
+func (c *Client) InspectImage(name string) (*Image, error) {
+ body, status, err := c.do("GET", "/images/"+name+"/json", doOptions{})
+ if status == http.StatusNotFound {
+ return nil, ErrNoSuchImage
+ }
+ if err != nil {
+ return nil, err
+ }
+
+ var image Image
+
+ // if the caller elected to skip checking the server's version, assume it's the latest
+ if c.SkipServerVersionCheck || c.expectedAPIVersion.GreaterThanOrEqualTo(apiVersion112) {
+ err = json.Unmarshal(body, &image)
+ if err != nil {
+ return nil, err
+ }
+ } else {
+ var imagePre012 ImagePre012
+ err = json.Unmarshal(body, &imagePre012)
+ if err != nil {
+ return nil, err
+ }
+
+ image.ID = imagePre012.ID
+ image.Parent = imagePre012.Parent
+ image.Comment = imagePre012.Comment
+ image.Created = imagePre012.Created
+ image.Container = imagePre012.Container
+ image.ContainerConfig = imagePre012.ContainerConfig
+ image.DockerVersion = imagePre012.DockerVersion
+ image.Author = imagePre012.Author
+ image.Config = imagePre012.Config
+ image.Architecture = imagePre012.Architecture
+ image.Size = imagePre012.Size
+ }
+
+ return &image, nil
+}
+
+// PushImage pushes an image to a remote registry, logging progress to w.
+//
+// An empty instance of AuthConfiguration may be used for unauthenticated
+// pushes.
+//
+// See http://goo.gl/pN8A3P for more details.
+func (c *Client) PushImage(opts PushImageOptions, auth AuthConfiguration) error {
+ if opts.Name == "" {
+ return ErrNoSuchImage
+ }
+ headers, err := headersWithAuth(auth)
+ if err != nil {
+ return err
+ }
+ name := opts.Name
+ opts.Name = ""
+ path := "/images/" + name + "/push?" + queryString(&opts)
+ return c.stream("POST", path, streamOptions{
+ setRawTerminal: true,
+ rawJSONStream: opts.RawJSONStream,
+ headers: headers,
+ stdout: opts.OutputStream,
+ })
+}
+
+// PullImage pulls an image from a remote registry, logging progress to opts.OutputStream.
+//
+// See http://goo.gl/ACyYNS for more details.
+func (c *Client) PullImage(opts PullImageOptions, auth AuthConfiguration) error {
+ if opts.Repository == "" {
+ return ErrNoSuchImage
+ }
+
+ headers, err := headersWithAuth(auth)
+ if err != nil {
+ return err
+ }
+ return c.createImage(queryString(&opts), headers, nil, opts.OutputStream, opts.RawJSONStream)
+}
+
+func (c *Client) createImage(qs string, headers map[string]string, in io.Reader, w io.Writer, rawJSONStream bool) error {
+ path := "/images/create?" + qs
+ return c.stream("POST", path, streamOptions{
+ setRawTerminal: true,
+ rawJSONStream: rawJSONStream,
+ headers: headers,
+ in: in,
+ stdout: w,
+ })
+}
+
+// LoadImage imports a tarball docker image
+//
+// See http://goo.gl/Y8NNCq for more details.
+func (c *Client) LoadImage(opts LoadImageOptions) error {
+ return c.stream("POST", "/images/load", streamOptions{
+ setRawTerminal: true,
+ in: opts.InputStream,
+ })
+}
+
+// ExportImage exports an image (as a tar file) into the stream
+//
+// See http://goo.gl/mi6kvk for more details.
+func (c *Client) ExportImage(opts ExportImageOptions) error {
+ return c.stream("GET", fmt.Sprintf("/images/%s/get", opts.Name), streamOptions{
+ setRawTerminal: true,
+ stdout: opts.OutputStream,
+ })
+}
+
+// ExportImages exports one or more images (as a tar file) into the stream
+//
+// See http://goo.gl/YeZzQK for more details.
+func (c *Client) ExportImages(opts ExportImagesOptions) error {
+ if opts.Names == nil || len(opts.Names) == 0 {
+ return ErrMustSpecifyNames
+ }
+ return c.stream("GET", "/images/get?"+queryString(&opts), streamOptions{
+ setRawTerminal: true,
+ stdout: opts.OutputStream,
+ })
+}
+
+// ImportImage imports an image from a url, a file or stdin
+//
+// See http://goo.gl/PhBKnS for more details.
+func (c *Client) ImportImage(opts ImportImageOptions) error {
+ if opts.Repository == "" {
+ return ErrNoSuchImage
+ }
+ if opts.Source != "-" {
+ opts.InputStream = nil
+ }
+ if opts.Source != "-" && !isURL(opts.Source) {
+ f, err := os.Open(opts.Source)
+ if err != nil {
+ return err
+ }
+ b, err := ioutil.ReadAll(f)
+ opts.InputStream = bytes.NewBuffer(b)
+ opts.Source = "-"
+ }
+ return c.createImage(queryString(&opts), nil, opts.InputStream, opts.OutputStream, opts.RawJSONStream)
+}
+
+// BuildImage builds an image from a tarball's url or a Dockerfile in the input
+// stream.
+//
+// See http://goo.gl/7nuGXa for more details.
+func (c *Client) BuildImage(opts BuildImageOptions) error {
+ if opts.OutputStream == nil {
+ return ErrMissingOutputStream
+ }
+ headers, err := headersWithAuth(opts.Auth, opts.AuthConfigs)
+ if err != nil {
+ return err
+ }
+
+ if opts.Remote != "" && opts.Name == "" {
+ opts.Name = opts.Remote
+ }
+ if opts.InputStream != nil || opts.ContextDir != "" {
+ headers["Content-Type"] = "application/tar"
+ } else if opts.Remote == "" {
+ return ErrMissingRepo
+ }
+ if opts.ContextDir != "" {
+ if opts.InputStream != nil {
+ return ErrMultipleContexts
+ }
+ var err error
+ if opts.InputStream, err = createTarStream(opts.ContextDir, opts.Dockerfile); err != nil {
+ return err
+ }
+ }
+
+ return c.stream("POST", fmt.Sprintf("/build?%s", queryString(&opts)), streamOptions{
+ setRawTerminal: true,
+ rawJSONStream: opts.RawJSONStream,
+ headers: headers,
+ in: opts.InputStream,
+ stdout: opts.OutputStream,
+ })
+}
+
+// TagImage adds a tag to the image identified by the given name.
+//
+// See http://goo.gl/5g6qFy for more details.
+func (c *Client) TagImage(name string, opts TagImageOptions) error {
+ if name == "" {
+ return ErrNoSuchImage
+ }
+ _, status, err := c.do("POST", fmt.Sprintf("/images/"+name+"/tag?%s",
+ queryString(&opts)), doOptions{})
+
+ if status == http.StatusNotFound {
+ return ErrNoSuchImage
+ }
+
+ return err
+}
+
+func isURL(u string) bool {
+ p, err := url.Parse(u)
+ if err != nil {
+ return false
+ }
+ return p.Scheme == "http" || p.Scheme == "https"
+}
+
+func headersWithAuth(auths ...interface{}) (map[string]string, error) {
+ var headers = make(map[string]string)
+
+ for _, auth := range auths {
+ switch auth.(type) {
+ case AuthConfiguration:
+ var buf bytes.Buffer
+ if err := json.NewEncoder(&buf).Encode(auth); err != nil {
+ return nil, err
+ }
+ headers["X-Registry-Auth"] = base64.URLEncoding.EncodeToString(buf.Bytes())
+ case AuthConfigurations:
+ var buf bytes.Buffer
+ if err := json.NewEncoder(&buf).Encode(auth); err != nil {
+ return nil, err
+ }
+ headers["X-Registry-Config"] = base64.URLEncoding.EncodeToString(buf.Bytes())
+ }
+ }
+
+ return headers, nil
+}
+
+// SearchImages search the docker hub with a specific given term.
+//
+// See http://goo.gl/xI5lLZ for more details.
+func (c *Client) SearchImages(term string) ([]APIImageSearch, error) {
+ body, _, err := c.do("GET", "/images/search?term="+term, doOptions{})
+ if err != nil {
+ return nil, err
+ }
+ var searchResult []APIImageSearch
+ err = json.Unmarshal(body, &searchResult)
+ if err != nil {
+ return nil, err
+ }
+ return searchResult, nil
+}
+
+//****************************************************************//
+//misc need func
+//****************************************************************//
+
+// Version returns version information about the docker server.
+//
+// See http://goo.gl/BOZrF5 for more details.
+func (c *Client) Version() (*Env, error) {
+ body, _, err := c.do("GET", "/version", doOptions{})
+ if err != nil {
+ return nil, err
+ }
+ var env Env
+ if err := env.Decode(bytes.NewReader(body)); err != nil {
+ return nil, err
+ }
+ return &env, nil
+}
+
+// Info returns system-wide information about the Docker server.
+//
+// See http://goo.gl/wmqZsW for more details.
+func (c *Client) Info() (*Env, error) {
+ body, _, err := c.do("GET", "/info", doOptions{})
+ if err != nil {
+ return nil, err
+ }
+ var info Env
+ err = info.Decode(bytes.NewReader(body))
+ if err != nil {
+ return nil, err
+ }
+ return &info, nil
+}
+
+// ParseRepositoryTag gets the name of the repository and returns it splitted
+// in two parts: the repository and the tag.
+//
+// Some examples:
+//
+// localhost.localdomain:5000/samalba/hipache:latest -> localhost.localdomain:5000/samalba/hipache, latest
+// localhost.localdomain:5000/samalba/hipache -> localhost.localdomain:5000/samalba/hipache, ""
+func ParseRepositoryTag(repoTag string) (repository string, tag string) {
+ n := strings.LastIndex(repoTag, ":")
+ if n < 0 {
+ return repoTag, ""
+ }
+ if tag := repoTag[n+1:]; !strings.Contains(tag, "/") {
+ return repoTag[:n], tag
+ }
+ return repoTag, ""
+}
+
+//****************************************************************//
+//tar need func
+//****************************************************************//
+
+func createTarStream(srcPath, dockerfilePath string) (io.ReadCloser, error) {
+ excludes, err := parseDockerignore(srcPath)
+ if err != nil {
+ return nil, err
+ }
+
+ includes := []string{"."}
+
+ // If .dockerignore mentions .dockerignore or the Dockerfile
+ // then make sure we send both files over to the daemon
+ // because Dockerfile is, obviously, needed no matter what, and
+ // .dockerignore is needed to know if either one needs to be
+ // removed. The deamon will remove them for us, if needed, after it
+ // parses the Dockerfile.
+ //
+ // https://github.com/docker/docker/issues/8330
+ //
+ forceIncludeFiles := []string{".dockerignore", dockerfilePath}
+
+ for _, includeFile := range forceIncludeFiles {
+ if includeFile == "" {
+ continue
+ }
+ keepThem, err := fileutils.Matches(includeFile, excludes)
+ if err != nil {
+ return nil, fmt.Errorf("cannot match .dockerfile: '%s', error: %s", includeFile, err)
+ }
+ if keepThem {
+ includes = append(includes, includeFile)
+ }
+ }
+
+ if err := validateContextDirectory(srcPath, excludes); err != nil {
+ return nil, err
+ }
+ tarOpts := &archive.TarOptions{
+ ExcludePatterns: excludes,
+ IncludeFiles: includes,
+ Compression: archive.Uncompressed,
+ NoLchown: true,
+ }
+ return archive.TarWithOptions(srcPath, tarOpts)
+}
+
+// validateContextDirectory checks if all the contents of the directory
+// can be read and returns an error if some files can't be read.
+// Symlinks which point to non-existing files don't trigger an error
+func validateContextDirectory(srcPath string, excludes []string) error {
+ return filepath.Walk(filepath.Join(srcPath, "."), func(filePath string, f os.FileInfo, err error) error {
+ // skip this directory/file if it's not in the path, it won't get added to the context
+ if relFilePath, err := filepath.Rel(srcPath, filePath); err != nil {
+ return err
+ } else if skip, err := fileutils.Matches(relFilePath, excludes); err != nil {
+ return err
+ } else if skip {
+ if f.IsDir() {
+ return filepath.SkipDir
+ }
+ return nil
+ }
+
+ if err != nil {
+ if os.IsPermission(err) {
+ return fmt.Errorf("can't stat '%s'", filePath)
+ }
+ if os.IsNotExist(err) {
+ return nil
+ }
+ return err
+ }
+
+ // skip checking if symlinks point to non-existing files, such symlinks can be useful
+ // also skip named pipes, because they hanging on open
+ if f.Mode()&(os.ModeSymlink|os.ModeNamedPipe) != 0 {
+ return nil
+ }
+
+ if !f.IsDir() {
+ currentFile, err := os.Open(filePath)
+ if err != nil && os.IsPermission(err) {
+ return fmt.Errorf("no permission to read from '%s'", filePath)
+ }
+ currentFile.Close()
+ }
+ return nil
+ })
+}
+
+func parseDockerignore(root string) ([]string, error) {
+ var excludes []string
+ ignore, err := ioutil.ReadFile(path.Join(root, ".dockerignore"))
+ if err != nil && !os.IsNotExist(err) {
+ return excludes, fmt.Errorf("error reading .dockerignore: '%s'", err)
+ }
+ excludes = strings.Split(string(ignore), "\n")
+
+ return excludes, nil
+}
+
+//****************************************************************//
+//tls need func
+//****************************************************************//
+
+func (c *tlsClientCon) CloseWrite() error {
+ // Go standard tls.Conn doesn't provide the CloseWrite() method so we do it
+ // on its underlying connection.
+ if cwc, ok := c.rawConn.(interface {
+ CloseWrite() error
+ }); ok {
+ return cwc.CloseWrite()
+ }
+ return nil
+}
+
+func tlsDialWithDialer(dialer *net.Dialer, network, addr string, config *tls.Config) (net.Conn, error) {
+ // We want the Timeout and Deadline values from dialer to cover the
+ // whole process: TCP connection and TLS handshake. This means that we
+ // also need to start our own timers now.
+ timeout := dialer.Timeout
+
+ if !dialer.Deadline.IsZero() {
+ deadlineTimeout := dialer.Deadline.Sub(time.Now())
+ if timeout == 0 || deadlineTimeout < timeout {
+ timeout = deadlineTimeout
+ }
+ }
+
+ var errChannel chan error
+
+ if timeout != 0 {
+ errChannel = make(chan error, 2)
+ time.AfterFunc(timeout, func() {
+ errChannel <- errors.New("")
+ })
+ }
+
+ rawConn, err := dialer.Dial(network, addr)
+ if err != nil {
+ return nil, err
+ }
+
+ colonPos := strings.LastIndex(addr, ":")
+ if colonPos == -1 {
+ colonPos = len(addr)
+ }
+ hostname := addr[:colonPos]
+
+ // If no ServerName is set, infer the ServerName
+ // from the hostname we're connecting to.
+ if config.ServerName == "" {
+ // Make a copy to avoid polluting argument or default.
+ c := *config
+ c.ServerName = hostname
+ config = &c
+ }
+
+ conn := tls.Client(rawConn, config)
+
+ if timeout == 0 {
+ err = conn.Handshake()
+ } else {
+ go func() {
+ errChannel <- conn.Handshake()
+ }()
+
+ err = <-errChannel
+ }
+
+ if err != nil {
+ rawConn.Close()
+ return nil, err
+ }
+
+ // This is Docker difference with standard's crypto/tls package: returned a
+ // wrapper which holds both the TLS and raw connections.
+ return &tlsClientCon{conn, rawConn}, nil
+}
+
+func tlsDial(network, addr string, config *tls.Config) (net.Conn, error) {
+ return tlsDialWithDialer(new(net.Dialer), network, addr, config)
+}
diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/README.md b/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/README.md
new file mode 100644
index 0000000..7307d96
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/README.md
@@ -0,0 +1 @@
+This code provides helper functions for dealing with archive files.
diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/archive.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/archive.go
new file mode 100644
index 0000000..04e40a9
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/archive.go
@@ -0,0 +1,884 @@
+package archive
+
+import (
+ "archive/tar"
+ "bufio"
+ "bytes"
+ "compress/bzip2"
+ "compress/gzip"
+ "errors"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "runtime"
+ "strings"
+ "syscall"
+
+ "github.com/Sirupsen/logrus"
+ "github.com/docker/docker/pkg/fileutils"
+ "github.com/docker/docker/pkg/pools"
+ "github.com/docker/docker/pkg/promise"
+ "github.com/docker/docker/pkg/system"
+)
+
+type (
+ Archive io.ReadCloser
+ ArchiveReader io.Reader
+ Compression int
+ TarChownOptions struct {
+ UID, GID int
+ }
+ TarOptions struct {
+ IncludeFiles []string
+ ExcludePatterns []string
+ Compression Compression
+ NoLchown bool
+ ChownOpts *TarChownOptions
+ Name string
+ IncludeSourceDir bool
+ // When unpacking, specifies whether overwriting a directory with a
+ // non-directory is allowed and vice versa.
+ NoOverwriteDirNonDir bool
+ }
+
+ // Archiver allows the reuse of most utility functions of this package
+ // with a pluggable Untar function.
+ Archiver struct {
+ Untar func(io.Reader, string, *TarOptions) error
+ }
+
+ // breakoutError is used to differentiate errors related to breaking out
+ // When testing archive breakout in the unit tests, this error is expected
+ // in order for the test to pass.
+ breakoutError error
+)
+
+var (
+ ErrNotImplemented = errors.New("Function not implemented")
+ defaultArchiver = &Archiver{Untar}
+)
+
+const (
+ Uncompressed Compression = iota
+ Bzip2
+ Gzip
+ Xz
+)
+
+func IsArchive(header []byte) bool {
+ compression := DetectCompression(header)
+ if compression != Uncompressed {
+ return true
+ }
+ r := tar.NewReader(bytes.NewBuffer(header))
+ _, err := r.Next()
+ return err == nil
+}
+
+func DetectCompression(source []byte) Compression {
+ for compression, m := range map[Compression][]byte{
+ Bzip2: {0x42, 0x5A, 0x68},
+ Gzip: {0x1F, 0x8B, 0x08},
+ Xz: {0xFD, 0x37, 0x7A, 0x58, 0x5A, 0x00},
+ } {
+ if len(source) < len(m) {
+ logrus.Debugf("Len too short")
+ continue
+ }
+ if bytes.Compare(m, source[:len(m)]) == 0 {
+ return compression
+ }
+ }
+ return Uncompressed
+}
+
+func xzDecompress(archive io.Reader) (io.ReadCloser, error) {
+ args := []string{"xz", "-d", "-c", "-q"}
+
+ return CmdStream(exec.Command(args[0], args[1:]...), archive)
+}
+
+func DecompressStream(archive io.Reader) (io.ReadCloser, error) {
+ p := pools.BufioReader32KPool
+ buf := p.Get(archive)
+ bs, err := buf.Peek(10)
+ if err != nil {
+ return nil, err
+ }
+
+ compression := DetectCompression(bs)
+ switch compression {
+ case Uncompressed:
+ readBufWrapper := p.NewReadCloserWrapper(buf, buf)
+ return readBufWrapper, nil
+ case Gzip:
+ gzReader, err := gzip.NewReader(buf)
+ if err != nil {
+ return nil, err
+ }
+ readBufWrapper := p.NewReadCloserWrapper(buf, gzReader)
+ return readBufWrapper, nil
+ case Bzip2:
+ bz2Reader := bzip2.NewReader(buf)
+ readBufWrapper := p.NewReadCloserWrapper(buf, bz2Reader)
+ return readBufWrapper, nil
+ case Xz:
+ xzReader, err := xzDecompress(buf)
+ if err != nil {
+ return nil, err
+ }
+ readBufWrapper := p.NewReadCloserWrapper(buf, xzReader)
+ return readBufWrapper, nil
+ default:
+ return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension())
+ }
+}
+
+func CompressStream(dest io.WriteCloser, compression Compression) (io.WriteCloser, error) {
+ p := pools.BufioWriter32KPool
+ buf := p.Get(dest)
+ switch compression {
+ case Uncompressed:
+ writeBufWrapper := p.NewWriteCloserWrapper(buf, buf)
+ return writeBufWrapper, nil
+ case Gzip:
+ gzWriter := gzip.NewWriter(dest)
+ writeBufWrapper := p.NewWriteCloserWrapper(buf, gzWriter)
+ return writeBufWrapper, nil
+ case Bzip2, Xz:
+ // archive/bzip2 does not support writing, and there is no xz support at all
+ // However, this is not a problem as docker only currently generates gzipped tars
+ return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension())
+ default:
+ return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension())
+ }
+}
+
+func (compression *Compression) Extension() string {
+ switch *compression {
+ case Uncompressed:
+ return "tar"
+ case Bzip2:
+ return "tar.bz2"
+ case Gzip:
+ return "tar.gz"
+ case Xz:
+ return "tar.xz"
+ }
+ return ""
+}
+
+type tarAppender struct {
+ TarWriter *tar.Writer
+ Buffer *bufio.Writer
+
+ // for hardlink mapping
+ SeenFiles map[uint64]string
+}
+
+// canonicalTarName provides a platform-independent and consistent posix-style
+//path for files and directories to be archived regardless of the platform.
+func canonicalTarName(name string, isDir bool) (string, error) {
+ name, err := CanonicalTarNameForPath(name)
+ if err != nil {
+ return "", err
+ }
+
+ // suffix with '/' for directories
+ if isDir && !strings.HasSuffix(name, "/") {
+ name += "/"
+ }
+ return name, nil
+}
+
+func (ta *tarAppender) addTarFile(path, name string) error {
+ fi, err := os.Lstat(path)
+ if err != nil {
+ return err
+ }
+
+ link := ""
+ if fi.Mode()&os.ModeSymlink != 0 {
+ if link, err = os.Readlink(path); err != nil {
+ return err
+ }
+ }
+
+ hdr, err := tar.FileInfoHeader(fi, link)
+ if err != nil {
+ return err
+ }
+ hdr.Mode = int64(chmodTarEntry(os.FileMode(hdr.Mode)))
+
+ name, err = canonicalTarName(name, fi.IsDir())
+ if err != nil {
+ return fmt.Errorf("tar: cannot canonicalize path: %v", err)
+ }
+ hdr.Name = name
+
+ nlink, inode, err := setHeaderForSpecialDevice(hdr, ta, name, fi.Sys())
+ if err != nil {
+ return err
+ }
+
+ // if it's a regular file and has more than 1 link,
+ // it's hardlinked, so set the type flag accordingly
+ if fi.Mode().IsRegular() && nlink > 1 {
+ // a link should have a name that it links too
+ // and that linked name should be first in the tar archive
+ if oldpath, ok := ta.SeenFiles[inode]; ok {
+ hdr.Typeflag = tar.TypeLink
+ hdr.Linkname = oldpath
+ hdr.Size = 0 // This Must be here for the writer math to add up!
+ } else {
+ ta.SeenFiles[inode] = name
+ }
+ }
+
+ capability, _ := system.Lgetxattr(path, "security.capability")
+ if capability != nil {
+ hdr.Xattrs = make(map[string]string)
+ hdr.Xattrs["security.capability"] = string(capability)
+ }
+
+ if err := ta.TarWriter.WriteHeader(hdr); err != nil {
+ return err
+ }
+
+ if hdr.Typeflag == tar.TypeReg {
+ file, err := os.Open(path)
+ if err != nil {
+ return err
+ }
+
+ ta.Buffer.Reset(ta.TarWriter)
+ defer ta.Buffer.Reset(nil)
+ _, err = io.Copy(ta.Buffer, file)
+ file.Close()
+ if err != nil {
+ return err
+ }
+ err = ta.Buffer.Flush()
+ if err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, Lchown bool, chownOpts *TarChownOptions) error {
+ // hdr.Mode is in linux format, which we can use for sycalls,
+ // but for os.Foo() calls we need the mode converted to os.FileMode,
+ // so use hdrInfo.Mode() (they differ for e.g. setuid bits)
+ hdrInfo := hdr.FileInfo()
+
+ switch hdr.Typeflag {
+ case tar.TypeDir:
+ // Create directory unless it exists as a directory already.
+ // In that case we just want to merge the two
+ if fi, err := os.Lstat(path); !(err == nil && fi.IsDir()) {
+ if err := os.Mkdir(path, hdrInfo.Mode()); err != nil {
+ return err
+ }
+ }
+
+ case tar.TypeReg, tar.TypeRegA:
+ // Source is regular file
+ file, err := os.OpenFile(path, os.O_CREATE|os.O_WRONLY, hdrInfo.Mode())
+ if err != nil {
+ return err
+ }
+ if _, err := io.Copy(file, reader); err != nil {
+ file.Close()
+ return err
+ }
+ file.Close()
+
+ case tar.TypeBlock, tar.TypeChar, tar.TypeFifo:
+ // Handle this is an OS-specific way
+ if err := handleTarTypeBlockCharFifo(hdr, path); err != nil {
+ return err
+ }
+
+ case tar.TypeLink:
+ targetPath := filepath.Join(extractDir, hdr.Linkname)
+ // check for hardlink breakout
+ if !strings.HasPrefix(targetPath, extractDir) {
+ return breakoutError(fmt.Errorf("invalid hardlink %q -> %q", targetPath, hdr.Linkname))
+ }
+ if err := os.Link(targetPath, path); err != nil {
+ return err
+ }
+
+ case tar.TypeSymlink:
+ // path -> hdr.Linkname = targetPath
+ // e.g. /extractDir/path/to/symlink -> ../2/file = /extractDir/path/2/file
+ targetPath := filepath.Join(filepath.Dir(path), hdr.Linkname)
+
+ // the reason we don't need to check symlinks in the path (with FollowSymlinkInScope) is because
+ // that symlink would first have to be created, which would be caught earlier, at this very check:
+ if !strings.HasPrefix(targetPath, extractDir) {
+ return breakoutError(fmt.Errorf("invalid symlink %q -> %q", path, hdr.Linkname))
+ }
+ if err := os.Symlink(hdr.Linkname, path); err != nil {
+ return err
+ }
+
+ case tar.TypeXGlobalHeader:
+ logrus.Debugf("PAX Global Extended Headers found and ignored")
+ return nil
+
+ default:
+ return fmt.Errorf("Unhandled tar header type %d\n", hdr.Typeflag)
+ }
+
+ // Lchown is not supported on Windows.
+ if Lchown && runtime.GOOS != "windows" {
+ if chownOpts == nil {
+ chownOpts = &TarChownOptions{UID: hdr.Uid, GID: hdr.Gid}
+ }
+ if err := os.Lchown(path, chownOpts.UID, chownOpts.GID); err != nil {
+ return err
+ }
+ }
+
+ for key, value := range hdr.Xattrs {
+ if err := system.Lsetxattr(path, key, []byte(value), 0); err != nil {
+ return err
+ }
+ }
+
+ // There is no LChmod, so ignore mode for symlink. Also, this
+ // must happen after chown, as that can modify the file mode
+ if err := handleLChmod(hdr, path, hdrInfo); err != nil {
+ return err
+ }
+
+ ts := []syscall.Timespec{timeToTimespec(hdr.AccessTime), timeToTimespec(hdr.ModTime)}
+ // syscall.UtimesNano doesn't support a NOFOLLOW flag atm
+ if hdr.Typeflag == tar.TypeLink {
+ if fi, err := os.Lstat(hdr.Linkname); err == nil && (fi.Mode()&os.ModeSymlink == 0) {
+ if err := system.UtimesNano(path, ts); err != nil && err != system.ErrNotSupportedPlatform {
+ return err
+ }
+ }
+ } else if hdr.Typeflag != tar.TypeSymlink {
+ if err := system.UtimesNano(path, ts); err != nil && err != system.ErrNotSupportedPlatform {
+ return err
+ }
+ } else {
+ if err := system.LUtimesNano(path, ts); err != nil && err != system.ErrNotSupportedPlatform {
+ return err
+ }
+ }
+ return nil
+}
+
+// Tar creates an archive from the directory at `path`, and returns it as a
+// stream of bytes.
+func Tar(path string, compression Compression) (io.ReadCloser, error) {
+ return TarWithOptions(path, &TarOptions{Compression: compression})
+}
+
+// TarWithOptions creates an archive from the directory at `path`, only including files whose relative
+// paths are included in `options.IncludeFiles` (if non-nil) or not in `options.ExcludePatterns`.
+func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error) {
+
+ patterns, patDirs, exceptions, err := fileutils.CleanPatterns(options.ExcludePatterns)
+
+ if err != nil {
+ return nil, err
+ }
+
+ pipeReader, pipeWriter := io.Pipe()
+
+ compressWriter, err := CompressStream(pipeWriter, options.Compression)
+ if err != nil {
+ return nil, err
+ }
+
+ go func() {
+ ta := &tarAppender{
+ TarWriter: tar.NewWriter(compressWriter),
+ Buffer: pools.BufioWriter32KPool.Get(nil),
+ SeenFiles: make(map[uint64]string),
+ }
+
+ defer func() {
+ // Make sure to check the error on Close.
+ if err := ta.TarWriter.Close(); err != nil {
+ logrus.Debugf("Can't close tar writer: %s", err)
+ }
+ if err := compressWriter.Close(); err != nil {
+ logrus.Debugf("Can't close compress writer: %s", err)
+ }
+ if err := pipeWriter.Close(); err != nil {
+ logrus.Debugf("Can't close pipe writer: %s", err)
+ }
+ }()
+
+ // this buffer is needed for the duration of this piped stream
+ defer pools.BufioWriter32KPool.Put(ta.Buffer)
+
+ // In general we log errors here but ignore them because
+ // during e.g. a diff operation the container can continue
+ // mutating the filesystem and we can see transient errors
+ // from this
+
+ stat, err := os.Lstat(srcPath)
+ if err != nil {
+ return
+ }
+
+ if !stat.IsDir() {
+ // We can't later join a non-dir with any includes because the
+ // 'walk' will error if "file/." is stat-ed and "file" is not a
+ // directory. So, we must split the source path and use the
+ // basename as the include.
+ if len(options.IncludeFiles) > 0 {
+ logrus.Warn("Tar: Can't archive a file with includes")
+ }
+
+ dir, base := SplitPathDirEntry(srcPath)
+ srcPath = dir
+ options.IncludeFiles = []string{base}
+ }
+
+ if len(options.IncludeFiles) == 0 {
+ options.IncludeFiles = []string{"."}
+ }
+
+ seen := make(map[string]bool)
+
+ var renamedRelFilePath string // For when tar.Options.Name is set
+ for _, include := range options.IncludeFiles {
+ // We can't use filepath.Join(srcPath, include) because this will
+ // clean away a trailing "." or "/" which may be important.
+ walkRoot := strings.Join([]string{srcPath, include}, string(filepath.Separator))
+ filepath.Walk(walkRoot, func(filePath string, f os.FileInfo, err error) error {
+ if err != nil {
+ logrus.Debugf("Tar: Can't stat file %s to tar: %s", srcPath, err)
+ return nil
+ }
+
+ relFilePath, err := filepath.Rel(srcPath, filePath)
+ if err != nil || (!options.IncludeSourceDir && relFilePath == "." && f.IsDir()) {
+ // Error getting relative path OR we are looking
+ // at the source directory path. Skip in both situations.
+ return nil
+ }
+
+ if options.IncludeSourceDir && include == "." && relFilePath != "." {
+ relFilePath = strings.Join([]string{".", relFilePath}, string(filepath.Separator))
+ }
+
+ skip := false
+
+ // If "include" is an exact match for the current file
+ // then even if there's an "excludePatterns" pattern that
+ // matches it, don't skip it. IOW, assume an explicit 'include'
+ // is asking for that file no matter what - which is true
+ // for some files, like .dockerignore and Dockerfile (sometimes)
+ if include != relFilePath {
+ skip, err = fileutils.OptimizedMatches(relFilePath, patterns, patDirs)
+ if err != nil {
+ logrus.Debugf("Error matching %s: %v", relFilePath, err)
+ return err
+ }
+ }
+
+ if skip {
+ if !exceptions && f.IsDir() {
+ return filepath.SkipDir
+ }
+ return nil
+ }
+
+ if seen[relFilePath] {
+ return nil
+ }
+ seen[relFilePath] = true
+
+ // TODO Windows: Verify if this needs to be os.Pathseparator
+ // Rename the base resource
+ if options.Name != "" && filePath == srcPath+"/"+filepath.Base(relFilePath) {
+ renamedRelFilePath = relFilePath
+ }
+ // Set this to make sure the items underneath also get renamed
+ if options.Name != "" {
+ relFilePath = strings.Replace(relFilePath, renamedRelFilePath, options.Name, 1)
+ }
+
+ if err := ta.addTarFile(filePath, relFilePath); err != nil {
+ logrus.Debugf("Can't add file %s to tar: %s", filePath, err)
+ }
+ return nil
+ })
+ }
+ }()
+
+ return pipeReader, nil
+}
+
+func Unpack(decompressedArchive io.Reader, dest string, options *TarOptions) error {
+ tr := tar.NewReader(decompressedArchive)
+ trBuf := pools.BufioReader32KPool.Get(nil)
+ defer pools.BufioReader32KPool.Put(trBuf)
+
+ var dirs []*tar.Header
+
+ // Iterate through the files in the archive.
+loop:
+ for {
+ hdr, err := tr.Next()
+ if err == io.EOF {
+ // end of tar archive
+ break
+ }
+ if err != nil {
+ return err
+ }
+
+ // Normalize name, for safety and for a simple is-root check
+ // This keeps "../" as-is, but normalizes "/../" to "/". Or Windows:
+ // This keeps "..\" as-is, but normalizes "\..\" to "\".
+ hdr.Name = filepath.Clean(hdr.Name)
+
+ for _, exclude := range options.ExcludePatterns {
+ if strings.HasPrefix(hdr.Name, exclude) {
+ continue loop
+ }
+ }
+
+ // After calling filepath.Clean(hdr.Name) above, hdr.Name will now be in
+ // the filepath format for the OS on which the daemon is running. Hence
+ // the check for a slash-suffix MUST be done in an OS-agnostic way.
+ if !strings.HasSuffix(hdr.Name, string(os.PathSeparator)) {
+ // Not the root directory, ensure that the parent directory exists
+ parent := filepath.Dir(hdr.Name)
+ parentPath := filepath.Join(dest, parent)
+ if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) {
+ err = system.MkdirAll(parentPath, 0777)
+ if err != nil {
+ return err
+ }
+ }
+ }
+
+ path := filepath.Join(dest, hdr.Name)
+ rel, err := filepath.Rel(dest, path)
+ if err != nil {
+ return err
+ }
+ if strings.HasPrefix(rel, ".."+string(os.PathSeparator)) {
+ return breakoutError(fmt.Errorf("%q is outside of %q", hdr.Name, dest))
+ }
+
+ // If path exits we almost always just want to remove and replace it
+ // The only exception is when it is a directory *and* the file from
+ // the layer is also a directory. Then we want to merge them (i.e.
+ // just apply the metadata from the layer).
+ if fi, err := os.Lstat(path); err == nil {
+ if options.NoOverwriteDirNonDir && fi.IsDir() && hdr.Typeflag != tar.TypeDir {
+ // If NoOverwriteDirNonDir is true then we cannot replace
+ // an existing directory with a non-directory from the archive.
+ return fmt.Errorf("cannot overwrite directory %q with non-directory %q", path, dest)
+ }
+
+ if options.NoOverwriteDirNonDir && !fi.IsDir() && hdr.Typeflag == tar.TypeDir {
+ // If NoOverwriteDirNonDir is true then we cannot replace
+ // an existing non-directory with a directory from the archive.
+ return fmt.Errorf("cannot overwrite non-directory %q with directory %q", path, dest)
+ }
+
+ if fi.IsDir() && hdr.Name == "." {
+ continue
+ }
+
+ if !(fi.IsDir() && hdr.Typeflag == tar.TypeDir) {
+ if err := os.RemoveAll(path); err != nil {
+ return err
+ }
+ }
+ }
+ trBuf.Reset(tr)
+
+ if err := createTarFile(path, dest, hdr, trBuf, !options.NoLchown, options.ChownOpts); err != nil {
+ return err
+ }
+
+ // Directory mtimes must be handled at the end to avoid further
+ // file creation in them to modify the directory mtime
+ if hdr.Typeflag == tar.TypeDir {
+ dirs = append(dirs, hdr)
+ }
+ }
+
+ for _, hdr := range dirs {
+ path := filepath.Join(dest, hdr.Name)
+ ts := []syscall.Timespec{timeToTimespec(hdr.AccessTime), timeToTimespec(hdr.ModTime)}
+ if err := syscall.UtimesNano(path, ts); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// Untar reads a stream of bytes from `archive`, parses it as a tar archive,
+// and unpacks it into the directory at `dest`.
+// The archive may be compressed with one of the following algorithms:
+// identity (uncompressed), gzip, bzip2, xz.
+// FIXME: specify behavior when target path exists vs. doesn't exist.
+func Untar(archive io.Reader, dest string, options *TarOptions) error {
+ if archive == nil {
+ return fmt.Errorf("Empty archive")
+ }
+ dest = filepath.Clean(dest)
+ if options == nil {
+ options = &TarOptions{}
+ }
+ if options.ExcludePatterns == nil {
+ options.ExcludePatterns = []string{}
+ }
+ decompressedArchive, err := DecompressStream(archive)
+ if err != nil {
+ return err
+ }
+ defer decompressedArchive.Close()
+ return Unpack(decompressedArchive, dest, options)
+}
+
+func (archiver *Archiver) TarUntar(src, dst string) error {
+ logrus.Debugf("TarUntar(%s %s)", src, dst)
+ archive, err := TarWithOptions(src, &TarOptions{Compression: Uncompressed})
+ if err != nil {
+ return err
+ }
+ defer archive.Close()
+ return archiver.Untar(archive, dst, nil)
+}
+
+// TarUntar is a convenience function which calls Tar and Untar, with the output of one piped into the other.
+// If either Tar or Untar fails, TarUntar aborts and returns the error.
+func TarUntar(src, dst string) error {
+ return defaultArchiver.TarUntar(src, dst)
+}
+
+func (archiver *Archiver) UntarPath(src, dst string) error {
+ archive, err := os.Open(src)
+ if err != nil {
+ return err
+ }
+ defer archive.Close()
+ if err := archiver.Untar(archive, dst, nil); err != nil {
+ return err
+ }
+ return nil
+}
+
+// UntarPath is a convenience function which looks for an archive
+// at filesystem path `src`, and unpacks it at `dst`.
+func UntarPath(src, dst string) error {
+ return defaultArchiver.UntarPath(src, dst)
+}
+
+func (archiver *Archiver) CopyWithTar(src, dst string) error {
+ srcSt, err := os.Stat(src)
+ if err != nil {
+ return err
+ }
+ if !srcSt.IsDir() {
+ return archiver.CopyFileWithTar(src, dst)
+ }
+ // Create dst, copy src's content into it
+ logrus.Debugf("Creating dest directory: %s", dst)
+ if err := system.MkdirAll(dst, 0755); err != nil && !os.IsExist(err) {
+ return err
+ }
+ logrus.Debugf("Calling TarUntar(%s, %s)", src, dst)
+ return archiver.TarUntar(src, dst)
+}
+
+// CopyWithTar creates a tar archive of filesystem path `src`, and
+// unpacks it at filesystem path `dst`.
+// The archive is streamed directly with fixed buffering and no
+// intermediary disk IO.
+func CopyWithTar(src, dst string) error {
+ return defaultArchiver.CopyWithTar(src, dst)
+}
+
+func (archiver *Archiver) CopyFileWithTar(src, dst string) (err error) {
+ logrus.Debugf("CopyFileWithTar(%s, %s)", src, dst)
+ srcSt, err := os.Stat(src)
+ if err != nil {
+ return err
+ }
+
+ if srcSt.IsDir() {
+ return fmt.Errorf("Can't copy a directory")
+ }
+
+ // Clean up the trailing slash. This must be done in an operating
+ // system specific manner.
+ if dst[len(dst)-1] == os.PathSeparator {
+ dst = filepath.Join(dst, filepath.Base(src))
+ }
+ // Create the holding directory if necessary
+ if err := system.MkdirAll(filepath.Dir(dst), 0700); err != nil && !os.IsExist(err) {
+ return err
+ }
+
+ r, w := io.Pipe()
+ errC := promise.Go(func() error {
+ defer w.Close()
+
+ srcF, err := os.Open(src)
+ if err != nil {
+ return err
+ }
+ defer srcF.Close()
+
+ hdr, err := tar.FileInfoHeader(srcSt, "")
+ if err != nil {
+ return err
+ }
+ hdr.Name = filepath.Base(dst)
+ hdr.Mode = int64(chmodTarEntry(os.FileMode(hdr.Mode)))
+
+ tw := tar.NewWriter(w)
+ defer tw.Close()
+ if err := tw.WriteHeader(hdr); err != nil {
+ return err
+ }
+ if _, err := io.Copy(tw, srcF); err != nil {
+ return err
+ }
+ return nil
+ })
+ defer func() {
+ if er := <-errC; err != nil {
+ err = er
+ }
+ }()
+ return archiver.Untar(r, filepath.Dir(dst), nil)
+}
+
+// CopyFileWithTar emulates the behavior of the 'cp' command-line
+// for a single file. It copies a regular file from path `src` to
+// path `dst`, and preserves all its metadata.
+//
+// Destination handling is in an operating specific manner depending
+// where the daemon is running. If `dst` ends with a trailing slash
+// the final destination path will be `dst/base(src)` (Linux) or
+// `dst\base(src)` (Windows).
+func CopyFileWithTar(src, dst string) (err error) {
+ return defaultArchiver.CopyFileWithTar(src, dst)
+}
+
+// CmdStream executes a command, and returns its stdout as a stream.
+// If the command fails to run or doesn't complete successfully, an error
+// will be returned, including anything written on stderr.
+func CmdStream(cmd *exec.Cmd, input io.Reader) (io.ReadCloser, error) {
+ if input != nil {
+ stdin, err := cmd.StdinPipe()
+ if err != nil {
+ return nil, err
+ }
+ // Write stdin if any
+ go func() {
+ io.Copy(stdin, input)
+ stdin.Close()
+ }()
+ }
+ stdout, err := cmd.StdoutPipe()
+ if err != nil {
+ return nil, err
+ }
+ stderr, err := cmd.StderrPipe()
+ if err != nil {
+ return nil, err
+ }
+ pipeR, pipeW := io.Pipe()
+ errChan := make(chan []byte)
+ // Collect stderr, we will use it in case of an error
+ go func() {
+ errText, e := ioutil.ReadAll(stderr)
+ if e != nil {
+ errText = []byte("(...couldn't fetch stderr: " + e.Error() + ")")
+ }
+ errChan <- errText
+ }()
+ // Copy stdout to the returned pipe
+ go func() {
+ _, err := io.Copy(pipeW, stdout)
+ if err != nil {
+ pipeW.CloseWithError(err)
+ }
+ errText := <-errChan
+ if err := cmd.Wait(); err != nil {
+ pipeW.CloseWithError(fmt.Errorf("%s: %s", err, errText))
+ } else {
+ pipeW.Close()
+ }
+ }()
+ // Run the command and return the pipe
+ if err := cmd.Start(); err != nil {
+ return nil, err
+ }
+ return pipeR, nil
+}
+
+// NewTempArchive reads the content of src into a temporary file, and returns the contents
+// of that file as an archive. The archive can only be read once - as soon as reading completes,
+// the file will be deleted.
+func NewTempArchive(src Archive, dir string) (*TempArchive, error) {
+ f, err := ioutil.TempFile(dir, "")
+ if err != nil {
+ return nil, err
+ }
+ if _, err := io.Copy(f, src); err != nil {
+ return nil, err
+ }
+ if _, err := f.Seek(0, 0); err != nil {
+ return nil, err
+ }
+ st, err := f.Stat()
+ if err != nil {
+ return nil, err
+ }
+ size := st.Size()
+ return &TempArchive{File: f, Size: size}, nil
+}
+
+type TempArchive struct {
+ *os.File
+ Size int64 // Pre-computed from Stat().Size() as a convenience
+ read int64
+ closed bool
+}
+
+// Close closes the underlying file if it's still open, or does a no-op
+// to allow callers to try to close the TempArchive multiple times safely.
+func (archive *TempArchive) Close() error {
+ if archive.closed {
+ return nil
+ }
+
+ archive.closed = true
+
+ return archive.File.Close()
+}
+
+func (archive *TempArchive) Read(data []byte) (int, error) {
+ n, err := archive.File.Read(data)
+ archive.read += int64(n)
+ if err != nil || archive.read == archive.Size {
+ archive.Close()
+ os.Remove(archive.File.Name())
+ }
+ return n, err
+}
diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/archive_test.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/archive_test.go
new file mode 100644
index 0000000..b93c76c
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/archive_test.go
@@ -0,0 +1,1204 @@
+package archive
+
+import (
+ "archive/tar"
+ "bytes"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "os"
+ "os/exec"
+ "path"
+ "path/filepath"
+ "strings"
+ "syscall"
+ "testing"
+ "time"
+
+ "github.com/docker/docker/pkg/system"
+)
+
+func TestIsArchiveNilHeader(t *testing.T) {
+ out := IsArchive(nil)
+ if out {
+ t.Fatalf("isArchive should return false as nil is not a valid archive header")
+ }
+}
+
+func TestIsArchiveInvalidHeader(t *testing.T) {
+ header := []byte{0x00, 0x01, 0x02}
+ out := IsArchive(header)
+ if out {
+ t.Fatalf("isArchive should return false as %s is not a valid archive header", header)
+ }
+}
+
+func TestIsArchiveBzip2(t *testing.T) {
+ header := []byte{0x42, 0x5A, 0x68}
+ out := IsArchive(header)
+ if !out {
+ t.Fatalf("isArchive should return true as %s is a bz2 header", header)
+ }
+}
+
+func TestIsArchive7zip(t *testing.T) {
+ header := []byte{0x50, 0x4b, 0x03, 0x04}
+ out := IsArchive(header)
+ if out {
+ t.Fatalf("isArchive should return false as %s is a 7z header and it is not supported", header)
+ }
+}
+
+func TestDecompressStreamGzip(t *testing.T) {
+ cmd := exec.Command("/bin/sh", "-c", "touch /tmp/archive && gzip -f /tmp/archive")
+ output, err := cmd.CombinedOutput()
+ if err != nil {
+ t.Fatalf("Fail to create an archive file for test : %s.", output)
+ }
+ archive, err := os.Open("/tmp/archive.gz")
+ _, err = DecompressStream(archive)
+ if err != nil {
+ t.Fatalf("Failed to decompress a gzip file.")
+ }
+}
+
+func TestDecompressStreamBzip2(t *testing.T) {
+ cmd := exec.Command("/bin/sh", "-c", "touch /tmp/archive && bzip2 -f /tmp/archive")
+ output, err := cmd.CombinedOutput()
+ if err != nil {
+ t.Fatalf("Fail to create an archive file for test : %s.", output)
+ }
+ archive, err := os.Open("/tmp/archive.bz2")
+ _, err = DecompressStream(archive)
+ if err != nil {
+ t.Fatalf("Failed to decompress a bzip2 file.")
+ }
+}
+
+func TestDecompressStreamXz(t *testing.T) {
+ cmd := exec.Command("/bin/sh", "-c", "touch /tmp/archive && xz -f /tmp/archive")
+ output, err := cmd.CombinedOutput()
+ if err != nil {
+ t.Fatalf("Fail to create an archive file for test : %s.", output)
+ }
+ archive, err := os.Open("/tmp/archive.xz")
+ _, err = DecompressStream(archive)
+ if err != nil {
+ t.Fatalf("Failed to decompress a xz file.")
+ }
+}
+
+func TestCompressStreamXzUnsuported(t *testing.T) {
+ dest, err := os.Create("/tmp/dest")
+ if err != nil {
+ t.Fatalf("Fail to create the destination file")
+ }
+ _, err = CompressStream(dest, Xz)
+ if err == nil {
+ t.Fatalf("Should fail as xz is unsupported for compression format.")
+ }
+}
+
+func TestCompressStreamBzip2Unsupported(t *testing.T) {
+ dest, err := os.Create("/tmp/dest")
+ if err != nil {
+ t.Fatalf("Fail to create the destination file")
+ }
+ _, err = CompressStream(dest, Xz)
+ if err == nil {
+ t.Fatalf("Should fail as xz is unsupported for compression format.")
+ }
+}
+
+func TestCompressStreamInvalid(t *testing.T) {
+ dest, err := os.Create("/tmp/dest")
+ if err != nil {
+ t.Fatalf("Fail to create the destination file")
+ }
+ _, err = CompressStream(dest, -1)
+ if err == nil {
+ t.Fatalf("Should fail as xz is unsupported for compression format.")
+ }
+}
+
+func TestExtensionInvalid(t *testing.T) {
+ compression := Compression(-1)
+ output := compression.Extension()
+ if output != "" {
+ t.Fatalf("The extension of an invalid compression should be an empty string.")
+ }
+}
+
+func TestExtensionUncompressed(t *testing.T) {
+ compression := Uncompressed
+ output := compression.Extension()
+ if output != "tar" {
+ t.Fatalf("The extension of a uncompressed archive should be 'tar'.")
+ }
+}
+func TestExtensionBzip2(t *testing.T) {
+ compression := Bzip2
+ output := compression.Extension()
+ if output != "tar.bz2" {
+ t.Fatalf("The extension of a bzip2 archive should be 'tar.bz2'")
+ }
+}
+func TestExtensionGzip(t *testing.T) {
+ compression := Gzip
+ output := compression.Extension()
+ if output != "tar.gz" {
+ t.Fatalf("The extension of a bzip2 archive should be 'tar.gz'")
+ }
+}
+func TestExtensionXz(t *testing.T) {
+ compression := Xz
+ output := compression.Extension()
+ if output != "tar.xz" {
+ t.Fatalf("The extension of a bzip2 archive should be 'tar.xz'")
+ }
+}
+
+func TestCmdStreamLargeStderr(t *testing.T) {
+ cmd := exec.Command("/bin/sh", "-c", "dd if=/dev/zero bs=1k count=1000 of=/dev/stderr; echo hello")
+ out, err := CmdStream(cmd, nil)
+ if err != nil {
+ t.Fatalf("Failed to start command: %s", err)
+ }
+ errCh := make(chan error)
+ go func() {
+ _, err := io.Copy(ioutil.Discard, out)
+ errCh <- err
+ }()
+ select {
+ case err := <-errCh:
+ if err != nil {
+ t.Fatalf("Command should not have failed (err=%.100s...)", err)
+ }
+ case <-time.After(5 * time.Second):
+ t.Fatalf("Command did not complete in 5 seconds; probable deadlock")
+ }
+}
+
+func TestCmdStreamBad(t *testing.T) {
+ badCmd := exec.Command("/bin/sh", "-c", "echo hello; echo >&2 error couldn\\'t reverse the phase pulser; exit 1")
+ out, err := CmdStream(badCmd, nil)
+ if err != nil {
+ t.Fatalf("Failed to start command: %s", err)
+ }
+ if output, err := ioutil.ReadAll(out); err == nil {
+ t.Fatalf("Command should have failed")
+ } else if err.Error() != "exit status 1: error couldn't reverse the phase pulser\n" {
+ t.Fatalf("Wrong error value (%s)", err)
+ } else if s := string(output); s != "hello\n" {
+ t.Fatalf("Command output should be '%s', not '%s'", "hello\\n", output)
+ }
+}
+
+func TestCmdStreamGood(t *testing.T) {
+ cmd := exec.Command("/bin/sh", "-c", "echo hello; exit 0")
+ out, err := CmdStream(cmd, nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if output, err := ioutil.ReadAll(out); err != nil {
+ t.Fatalf("Command should not have failed (err=%s)", err)
+ } else if s := string(output); s != "hello\n" {
+ t.Fatalf("Command output should be '%s', not '%s'", "hello\\n", output)
+ }
+}
+
+func TestUntarPathWithInvalidDest(t *testing.T) {
+ tempFolder, err := ioutil.TempDir("", "docker-archive-test")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer os.RemoveAll(tempFolder)
+ invalidDestFolder := path.Join(tempFolder, "invalidDest")
+ // Create a src file
+ srcFile := path.Join(tempFolder, "src")
+ _, err = os.Create(srcFile)
+ if err != nil {
+ t.Fatalf("Fail to create the source file")
+ }
+ err = UntarPath(srcFile, invalidDestFolder)
+ if err == nil {
+ t.Fatalf("UntarPath with invalid destination path should throw an error.")
+ }
+}
+
+func TestUntarPathWithInvalidSrc(t *testing.T) {
+ dest, err := ioutil.TempDir("", "docker-archive-test")
+ if err != nil {
+ t.Fatalf("Fail to create the destination file")
+ }
+ defer os.RemoveAll(dest)
+ err = UntarPath("/invalid/path", dest)
+ if err == nil {
+ t.Fatalf("UntarPath with invalid src path should throw an error.")
+ }
+}
+
+func TestUntarPath(t *testing.T) {
+ tmpFolder, err := ioutil.TempDir("", "docker-archive-test")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer os.RemoveAll(tmpFolder)
+ srcFile := path.Join(tmpFolder, "src")
+ tarFile := path.Join(tmpFolder, "src.tar")
+ os.Create(path.Join(tmpFolder, "src"))
+ cmd := exec.Command("/bin/sh", "-c", "tar cf "+tarFile+" "+srcFile)
+ _, err = cmd.CombinedOutput()
+ if err != nil {
+ t.Fatal(err)
+ }
+ destFolder := path.Join(tmpFolder, "dest")
+ err = os.MkdirAll(destFolder, 0740)
+ if err != nil {
+ t.Fatalf("Fail to create the destination file")
+ }
+ err = UntarPath(tarFile, destFolder)
+ if err != nil {
+ t.Fatalf("UntarPath shouldn't throw an error, %s.", err)
+ }
+ expectedFile := path.Join(destFolder, srcFile)
+ _, err = os.Stat(expectedFile)
+ if err != nil {
+ t.Fatalf("Destination folder should contain the source file but did not.")
+ }
+}
+
+// Do the same test as above but with the destination as file, it should fail
+func TestUntarPathWithDestinationFile(t *testing.T) {
+ tmpFolder, err := ioutil.TempDir("", "docker-archive-test")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer os.RemoveAll(tmpFolder)
+ srcFile := path.Join(tmpFolder, "src")
+ tarFile := path.Join(tmpFolder, "src.tar")
+ os.Create(path.Join(tmpFolder, "src"))
+ cmd := exec.Command("/bin/sh", "-c", "tar cf "+tarFile+" "+srcFile)
+ _, err = cmd.CombinedOutput()
+ if err != nil {
+ t.Fatal(err)
+ }
+ destFile := path.Join(tmpFolder, "dest")
+ _, err = os.Create(destFile)
+ if err != nil {
+ t.Fatalf("Fail to create the destination file")
+ }
+ err = UntarPath(tarFile, destFile)
+ if err == nil {
+ t.Fatalf("UntarPath should throw an error if the destination if a file")
+ }
+}
+
+// Do the same test as above but with the destination folder already exists
+// and the destination file is a directory
+// It's working, see https://github.com/docker/docker/issues/10040
+func TestUntarPathWithDestinationSrcFileAsFolder(t *testing.T) {
+ tmpFolder, err := ioutil.TempDir("", "docker-archive-test")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer os.RemoveAll(tmpFolder)
+ srcFile := path.Join(tmpFolder, "src")
+ tarFile := path.Join(tmpFolder, "src.tar")
+ os.Create(srcFile)
+ cmd := exec.Command("/bin/sh", "-c", "tar cf "+tarFile+" "+srcFile)
+ _, err = cmd.CombinedOutput()
+ if err != nil {
+ t.Fatal(err)
+ }
+ destFolder := path.Join(tmpFolder, "dest")
+ err = os.MkdirAll(destFolder, 0740)
+ if err != nil {
+ t.Fatalf("Fail to create the destination folder")
+ }
+ // Let's create a folder that will has the same path as the extracted file (from tar)
+ destSrcFileAsFolder := path.Join(destFolder, srcFile)
+ err = os.MkdirAll(destSrcFileAsFolder, 0740)
+ if err != nil {
+ t.Fatal(err)
+ }
+ err = UntarPath(tarFile, destFolder)
+ if err != nil {
+ t.Fatalf("UntarPath should throw not throw an error if the extracted file already exists and is a folder")
+ }
+}
+
+func TestCopyWithTarInvalidSrc(t *testing.T) {
+ tempFolder, err := ioutil.TempDir("", "docker-archive-test")
+ if err != nil {
+ t.Fatal(nil)
+ }
+ destFolder := path.Join(tempFolder, "dest")
+ invalidSrc := path.Join(tempFolder, "doesnotexists")
+ err = os.MkdirAll(destFolder, 0740)
+ if err != nil {
+ t.Fatal(err)
+ }
+ err = CopyWithTar(invalidSrc, destFolder)
+ if err == nil {
+ t.Fatalf("archiver.CopyWithTar with invalid src path should throw an error.")
+ }
+}
+
+func TestCopyWithTarInexistentDestWillCreateIt(t *testing.T) {
+ tempFolder, err := ioutil.TempDir("", "docker-archive-test")
+ if err != nil {
+ t.Fatal(nil)
+ }
+ srcFolder := path.Join(tempFolder, "src")
+ inexistentDestFolder := path.Join(tempFolder, "doesnotexists")
+ err = os.MkdirAll(srcFolder, 0740)
+ if err != nil {
+ t.Fatal(err)
+ }
+ err = CopyWithTar(srcFolder, inexistentDestFolder)
+ if err != nil {
+ t.Fatalf("CopyWithTar with an inexistent folder shouldn't fail.")
+ }
+ _, err = os.Stat(inexistentDestFolder)
+ if err != nil {
+ t.Fatalf("CopyWithTar with an inexistent folder should create it.")
+ }
+}
+
+// Test CopyWithTar with a file as src
+func TestCopyWithTarSrcFile(t *testing.T) {
+ folder, err := ioutil.TempDir("", "docker-archive-test")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer os.RemoveAll(folder)
+ dest := path.Join(folder, "dest")
+ srcFolder := path.Join(folder, "src")
+ src := path.Join(folder, path.Join("src", "src"))
+ err = os.MkdirAll(srcFolder, 0740)
+ if err != nil {
+ t.Fatal(err)
+ }
+ err = os.MkdirAll(dest, 0740)
+ if err != nil {
+ t.Fatal(err)
+ }
+ ioutil.WriteFile(src, []byte("content"), 0777)
+ err = CopyWithTar(src, dest)
+ if err != nil {
+ t.Fatalf("archiver.CopyWithTar shouldn't throw an error, %s.", err)
+ }
+ _, err = os.Stat(dest)
+ // FIXME Check the content
+ if err != nil {
+ t.Fatalf("Destination file should be the same as the source.")
+ }
+}
+
+// Test CopyWithTar with a folder as src
+func TestCopyWithTarSrcFolder(t *testing.T) {
+ folder, err := ioutil.TempDir("", "docker-archive-test")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer os.RemoveAll(folder)
+ dest := path.Join(folder, "dest")
+ src := path.Join(folder, path.Join("src", "folder"))
+ err = os.MkdirAll(src, 0740)
+ if err != nil {
+ t.Fatal(err)
+ }
+ err = os.MkdirAll(dest, 0740)
+ if err != nil {
+ t.Fatal(err)
+ }
+ ioutil.WriteFile(path.Join(src, "file"), []byte("content"), 0777)
+ err = CopyWithTar(src, dest)
+ if err != nil {
+ t.Fatalf("archiver.CopyWithTar shouldn't throw an error, %s.", err)
+ }
+ _, err = os.Stat(dest)
+ // FIXME Check the content (the file inside)
+ if err != nil {
+ t.Fatalf("Destination folder should contain the source file but did not.")
+ }
+}
+
+func TestCopyFileWithTarInvalidSrc(t *testing.T) {
+ tempFolder, err := ioutil.TempDir("", "docker-archive-test")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer os.RemoveAll(tempFolder)
+ destFolder := path.Join(tempFolder, "dest")
+ err = os.MkdirAll(destFolder, 0740)
+ if err != nil {
+ t.Fatal(err)
+ }
+ invalidFile := path.Join(tempFolder, "doesnotexists")
+ err = CopyFileWithTar(invalidFile, destFolder)
+ if err == nil {
+ t.Fatalf("archiver.CopyWithTar with invalid src path should throw an error.")
+ }
+}
+
+func TestCopyFileWithTarInexistentDestWillCreateIt(t *testing.T) {
+ tempFolder, err := ioutil.TempDir("", "docker-archive-test")
+ if err != nil {
+ t.Fatal(nil)
+ }
+ defer os.RemoveAll(tempFolder)
+ srcFile := path.Join(tempFolder, "src")
+ inexistentDestFolder := path.Join(tempFolder, "doesnotexists")
+ _, err = os.Create(srcFile)
+ if err != nil {
+ t.Fatal(err)
+ }
+ err = CopyFileWithTar(srcFile, inexistentDestFolder)
+ if err != nil {
+ t.Fatalf("CopyWithTar with an inexistent folder shouldn't fail.")
+ }
+ _, err = os.Stat(inexistentDestFolder)
+ if err != nil {
+ t.Fatalf("CopyWithTar with an inexistent folder should create it.")
+ }
+ // FIXME Test the src file and content
+}
+
+func TestCopyFileWithTarSrcFolder(t *testing.T) {
+ folder, err := ioutil.TempDir("", "docker-archive-copyfilewithtar-test")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer os.RemoveAll(folder)
+ dest := path.Join(folder, "dest")
+ src := path.Join(folder, "srcfolder")
+ err = os.MkdirAll(src, 0740)
+ if err != nil {
+ t.Fatal(err)
+ }
+ err = os.MkdirAll(dest, 0740)
+ if err != nil {
+ t.Fatal(err)
+ }
+ err = CopyFileWithTar(src, dest)
+ if err == nil {
+ t.Fatalf("CopyFileWithTar should throw an error with a folder.")
+ }
+}
+
+func TestCopyFileWithTarSrcFile(t *testing.T) {
+ folder, err := ioutil.TempDir("", "docker-archive-test")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer os.RemoveAll(folder)
+ dest := path.Join(folder, "dest")
+ srcFolder := path.Join(folder, "src")
+ src := path.Join(folder, path.Join("src", "src"))
+ err = os.MkdirAll(srcFolder, 0740)
+ if err != nil {
+ t.Fatal(err)
+ }
+ err = os.MkdirAll(dest, 0740)
+ if err != nil {
+ t.Fatal(err)
+ }
+ ioutil.WriteFile(src, []byte("content"), 0777)
+ err = CopyWithTar(src, dest+"/")
+ if err != nil {
+ t.Fatalf("archiver.CopyFileWithTar shouldn't throw an error, %s.", err)
+ }
+ _, err = os.Stat(dest)
+ if err != nil {
+ t.Fatalf("Destination folder should contain the source file but did not.")
+ }
+}
+
+func TestTarFiles(t *testing.T) {
+ // try without hardlinks
+ if err := checkNoChanges(1000, false); err != nil {
+ t.Fatal(err)
+ }
+ // try with hardlinks
+ if err := checkNoChanges(1000, true); err != nil {
+ t.Fatal(err)
+ }
+}
+
+func checkNoChanges(fileNum int, hardlinks bool) error {
+ srcDir, err := ioutil.TempDir("", "docker-test-srcDir")
+ if err != nil {
+ return err
+ }
+ defer os.RemoveAll(srcDir)
+
+ destDir, err := ioutil.TempDir("", "docker-test-destDir")
+ if err != nil {
+ return err
+ }
+ defer os.RemoveAll(destDir)
+
+ _, err = prepareUntarSourceDirectory(fileNum, srcDir, hardlinks)
+ if err != nil {
+ return err
+ }
+
+ err = TarUntar(srcDir, destDir)
+ if err != nil {
+ return err
+ }
+
+ changes, err := ChangesDirs(destDir, srcDir)
+ if err != nil {
+ return err
+ }
+ if len(changes) > 0 {
+ return fmt.Errorf("with %d files and %v hardlinks: expected 0 changes, got %d", fileNum, hardlinks, len(changes))
+ }
+ return nil
+}
+
+func tarUntar(t *testing.T, origin string, options *TarOptions) ([]Change, error) {
+ archive, err := TarWithOptions(origin, options)
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer archive.Close()
+
+ buf := make([]byte, 10)
+ if _, err := archive.Read(buf); err != nil {
+ return nil, err
+ }
+ wrap := io.MultiReader(bytes.NewReader(buf), archive)
+
+ detectedCompression := DetectCompression(buf)
+ compression := options.Compression
+ if detectedCompression.Extension() != compression.Extension() {
+ return nil, fmt.Errorf("Wrong compression detected. Actual compression: %s, found %s", compression.Extension(), detectedCompression.Extension())
+ }
+
+ tmp, err := ioutil.TempDir("", "docker-test-untar")
+ if err != nil {
+ return nil, err
+ }
+ defer os.RemoveAll(tmp)
+ if err := Untar(wrap, tmp, nil); err != nil {
+ return nil, err
+ }
+ if _, err := os.Stat(tmp); err != nil {
+ return nil, err
+ }
+
+ return ChangesDirs(origin, tmp)
+}
+
+func TestTarUntar(t *testing.T) {
+ origin, err := ioutil.TempDir("", "docker-test-untar-origin")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer os.RemoveAll(origin)
+ if err := ioutil.WriteFile(path.Join(origin, "1"), []byte("hello world"), 0700); err != nil {
+ t.Fatal(err)
+ }
+ if err := ioutil.WriteFile(path.Join(origin, "2"), []byte("welcome!"), 0700); err != nil {
+ t.Fatal(err)
+ }
+ if err := ioutil.WriteFile(path.Join(origin, "3"), []byte("will be ignored"), 0700); err != nil {
+ t.Fatal(err)
+ }
+
+ for _, c := range []Compression{
+ Uncompressed,
+ Gzip,
+ } {
+ changes, err := tarUntar(t, origin, &TarOptions{
+ Compression: c,
+ ExcludePatterns: []string{"3"},
+ })
+
+ if err != nil {
+ t.Fatalf("Error tar/untar for compression %s: %s", c.Extension(), err)
+ }
+
+ if len(changes) != 1 || changes[0].Path != "/3" {
+ t.Fatalf("Unexpected differences after tarUntar: %v", changes)
+ }
+ }
+}
+
+func TestTarUntarWithXattr(t *testing.T) {
+ origin, err := ioutil.TempDir("", "docker-test-untar-origin")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer os.RemoveAll(origin)
+ if err := ioutil.WriteFile(path.Join(origin, "1"), []byte("hello world"), 0700); err != nil {
+ t.Fatal(err)
+ }
+ if err := ioutil.WriteFile(path.Join(origin, "2"), []byte("welcome!"), 0700); err != nil {
+ t.Fatal(err)
+ }
+ if err := ioutil.WriteFile(path.Join(origin, "3"), []byte("will be ignored"), 0700); err != nil {
+ t.Fatal(err)
+ }
+ if err := system.Lsetxattr(path.Join(origin, "2"), "security.capability", []byte{0x00}, 0); err != nil {
+ t.Fatal(err)
+ }
+
+ for _, c := range []Compression{
+ Uncompressed,
+ Gzip,
+ } {
+ changes, err := tarUntar(t, origin, &TarOptions{
+ Compression: c,
+ ExcludePatterns: []string{"3"},
+ })
+
+ if err != nil {
+ t.Fatalf("Error tar/untar for compression %s: %s", c.Extension(), err)
+ }
+
+ if len(changes) != 1 || changes[0].Path != "/3" {
+ t.Fatalf("Unexpected differences after tarUntar: %v", changes)
+ }
+ capability, _ := system.Lgetxattr(path.Join(origin, "2"), "security.capability")
+ if capability == nil && capability[0] != 0x00 {
+ t.Fatalf("Untar should have kept the 'security.capability' xattr.")
+ }
+ }
+}
+
+func TestTarWithOptions(t *testing.T) {
+ origin, err := ioutil.TempDir("", "docker-test-untar-origin")
+ if err != nil {
+ t.Fatal(err)
+ }
+ if _, err := ioutil.TempDir(origin, "folder"); err != nil {
+ t.Fatal(err)
+ }
+ defer os.RemoveAll(origin)
+ if err := ioutil.WriteFile(path.Join(origin, "1"), []byte("hello world"), 0700); err != nil {
+ t.Fatal(err)
+ }
+ if err := ioutil.WriteFile(path.Join(origin, "2"), []byte("welcome!"), 0700); err != nil {
+ t.Fatal(err)
+ }
+
+ cases := []struct {
+ opts *TarOptions
+ numChanges int
+ }{
+ {&TarOptions{IncludeFiles: []string{"1"}}, 2},
+ {&TarOptions{ExcludePatterns: []string{"2"}}, 1},
+ {&TarOptions{ExcludePatterns: []string{"1", "folder*"}}, 2},
+ {&TarOptions{IncludeFiles: []string{"1", "1"}}, 2},
+ {&TarOptions{Name: "test", IncludeFiles: []string{"1"}}, 4},
+ }
+ for _, testCase := range cases {
+ changes, err := tarUntar(t, origin, testCase.opts)
+ if err != nil {
+ t.Fatalf("Error tar/untar when testing inclusion/exclusion: %s", err)
+ }
+ if len(changes) != testCase.numChanges {
+ t.Errorf("Expected %d changes, got %d for %+v:",
+ testCase.numChanges, len(changes), testCase.opts)
+ }
+ }
+}
+
+// Some tar archives such as http://haproxy.1wt.eu/download/1.5/src/devel/haproxy-1.5-dev21.tar.gz
+// use PAX Global Extended Headers.
+// Failing prevents the archives from being uncompressed during ADD
+func TestTypeXGlobalHeaderDoesNotFail(t *testing.T) {
+ hdr := tar.Header{Typeflag: tar.TypeXGlobalHeader}
+ tmpDir, err := ioutil.TempDir("", "docker-test-archive-pax-test")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer os.RemoveAll(tmpDir)
+ err = createTarFile(filepath.Join(tmpDir, "pax_global_header"), tmpDir, &hdr, nil, true, nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+}
+
+// Some tar have both GNU specific (huge uid) and Ustar specific (long name) things.
+// Not supposed to happen (should use PAX instead of Ustar for long name) but it does and it should still work.
+func TestUntarUstarGnuConflict(t *testing.T) {
+ f, err := os.Open("testdata/broken.tar")
+ if err != nil {
+ t.Fatal(err)
+ }
+ found := false
+ tr := tar.NewReader(f)
+ // Iterate through the files in the archive.
+ for {
+ hdr, err := tr.Next()
+ if err == io.EOF {
+ // end of tar archive
+ break
+ }
+ if err != nil {
+ t.Fatal(err)
+ }
+ if hdr.Name == "root/.cpanm/work/1395823785.24209/Plack-1.0030/blib/man3/Plack::Middleware::LighttpdScriptNameFix.3pm" {
+ found = true
+ break
+ }
+ }
+ if !found {
+ t.Fatalf("%s not found in the archive", "root/.cpanm/work/1395823785.24209/Plack-1.0030/blib/man3/Plack::Middleware::LighttpdScriptNameFix.3pm")
+ }
+}
+
+func TestTarWithBlockCharFifo(t *testing.T) {
+ origin, err := ioutil.TempDir("", "docker-test-tar-hardlink")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer os.RemoveAll(origin)
+ if err := ioutil.WriteFile(path.Join(origin, "1"), []byte("hello world"), 0700); err != nil {
+ t.Fatal(err)
+ }
+ if err := system.Mknod(path.Join(origin, "2"), syscall.S_IFBLK, int(system.Mkdev(int64(12), int64(5)))); err != nil {
+ t.Fatal(err)
+ }
+ if err := system.Mknod(path.Join(origin, "3"), syscall.S_IFCHR, int(system.Mkdev(int64(12), int64(5)))); err != nil {
+ t.Fatal(err)
+ }
+ if err := system.Mknod(path.Join(origin, "4"), syscall.S_IFIFO, int(system.Mkdev(int64(12), int64(5)))); err != nil {
+ t.Fatal(err)
+ }
+
+ dest, err := ioutil.TempDir("", "docker-test-tar-hardlink-dest")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer os.RemoveAll(dest)
+
+ // we'll do this in two steps to separate failure
+ fh, err := Tar(origin, Uncompressed)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // ensure we can read the whole thing with no error, before writing back out
+ buf, err := ioutil.ReadAll(fh)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ bRdr := bytes.NewReader(buf)
+ err = Untar(bRdr, dest, &TarOptions{Compression: Uncompressed})
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ changes, err := ChangesDirs(origin, dest)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(changes) > 0 {
+ t.Fatalf("Tar with special device (block, char, fifo) should keep them (recreate them when untar) : %v", changes)
+ }
+}
+
+func TestTarWithHardLink(t *testing.T) {
+ origin, err := ioutil.TempDir("", "docker-test-tar-hardlink")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer os.RemoveAll(origin)
+ if err := ioutil.WriteFile(path.Join(origin, "1"), []byte("hello world"), 0700); err != nil {
+ t.Fatal(err)
+ }
+ if err := os.Link(path.Join(origin, "1"), path.Join(origin, "2")); err != nil {
+ t.Fatal(err)
+ }
+
+ var i1, i2 uint64
+ if i1, err = getNlink(path.Join(origin, "1")); err != nil {
+ t.Fatal(err)
+ }
+ // sanity check that we can hardlink
+ if i1 != 2 {
+ t.Skipf("skipping since hardlinks don't work here; expected 2 links, got %d", i1)
+ }
+
+ dest, err := ioutil.TempDir("", "docker-test-tar-hardlink-dest")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer os.RemoveAll(dest)
+
+ // we'll do this in two steps to separate failure
+ fh, err := Tar(origin, Uncompressed)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // ensure we can read the whole thing with no error, before writing back out
+ buf, err := ioutil.ReadAll(fh)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ bRdr := bytes.NewReader(buf)
+ err = Untar(bRdr, dest, &TarOptions{Compression: Uncompressed})
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if i1, err = getInode(path.Join(dest, "1")); err != nil {
+ t.Fatal(err)
+ }
+ if i2, err = getInode(path.Join(dest, "2")); err != nil {
+ t.Fatal(err)
+ }
+
+ if i1 != i2 {
+ t.Errorf("expected matching inodes, but got %d and %d", i1, i2)
+ }
+}
+
+func getNlink(path string) (uint64, error) {
+ stat, err := os.Stat(path)
+ if err != nil {
+ return 0, err
+ }
+ statT, ok := stat.Sys().(*syscall.Stat_t)
+ if !ok {
+ return 0, fmt.Errorf("expected type *syscall.Stat_t, got %t", stat.Sys())
+ }
+ // We need this conversion on ARM64
+ return uint64(statT.Nlink), nil
+}
+
+func getInode(path string) (uint64, error) {
+ stat, err := os.Stat(path)
+ if err != nil {
+ return 0, err
+ }
+ statT, ok := stat.Sys().(*syscall.Stat_t)
+ if !ok {
+ return 0, fmt.Errorf("expected type *syscall.Stat_t, got %t", stat.Sys())
+ }
+ return statT.Ino, nil
+}
+
+func prepareUntarSourceDirectory(numberOfFiles int, targetPath string, makeLinks bool) (int, error) {
+ fileData := []byte("fooo")
+ for n := 0; n < numberOfFiles; n++ {
+ fileName := fmt.Sprintf("file-%d", n)
+ if err := ioutil.WriteFile(path.Join(targetPath, fileName), fileData, 0700); err != nil {
+ return 0, err
+ }
+ if makeLinks {
+ if err := os.Link(path.Join(targetPath, fileName), path.Join(targetPath, fileName+"-link")); err != nil {
+ return 0, err
+ }
+ }
+ }
+ totalSize := numberOfFiles * len(fileData)
+ return totalSize, nil
+}
+
+func BenchmarkTarUntar(b *testing.B) {
+ origin, err := ioutil.TempDir("", "docker-test-untar-origin")
+ if err != nil {
+ b.Fatal(err)
+ }
+ tempDir, err := ioutil.TempDir("", "docker-test-untar-destination")
+ if err != nil {
+ b.Fatal(err)
+ }
+ target := path.Join(tempDir, "dest")
+ n, err := prepareUntarSourceDirectory(100, origin, false)
+ if err != nil {
+ b.Fatal(err)
+ }
+ defer os.RemoveAll(origin)
+ defer os.RemoveAll(tempDir)
+
+ b.ResetTimer()
+ b.SetBytes(int64(n))
+ for n := 0; n < b.N; n++ {
+ err := TarUntar(origin, target)
+ if err != nil {
+ b.Fatal(err)
+ }
+ os.RemoveAll(target)
+ }
+}
+
+func BenchmarkTarUntarWithLinks(b *testing.B) {
+ origin, err := ioutil.TempDir("", "docker-test-untar-origin")
+ if err != nil {
+ b.Fatal(err)
+ }
+ tempDir, err := ioutil.TempDir("", "docker-test-untar-destination")
+ if err != nil {
+ b.Fatal(err)
+ }
+ target := path.Join(tempDir, "dest")
+ n, err := prepareUntarSourceDirectory(100, origin, true)
+ if err != nil {
+ b.Fatal(err)
+ }
+ defer os.RemoveAll(origin)
+ defer os.RemoveAll(tempDir)
+
+ b.ResetTimer()
+ b.SetBytes(int64(n))
+ for n := 0; n < b.N; n++ {
+ err := TarUntar(origin, target)
+ if err != nil {
+ b.Fatal(err)
+ }
+ os.RemoveAll(target)
+ }
+}
+
+func TestUntarInvalidFilenames(t *testing.T) {
+ for i, headers := range [][]*tar.Header{
+ {
+ {
+ Name: "../victim/dotdot",
+ Typeflag: tar.TypeReg,
+ Mode: 0644,
+ },
+ },
+ {
+ {
+ // Note the leading slash
+ Name: "/../victim/slash-dotdot",
+ Typeflag: tar.TypeReg,
+ Mode: 0644,
+ },
+ },
+ } {
+ if err := testBreakout("untar", "docker-TestUntarInvalidFilenames", headers); err != nil {
+ t.Fatalf("i=%d. %v", i, err)
+ }
+ }
+}
+
+func TestUntarHardlinkToSymlink(t *testing.T) {
+ for i, headers := range [][]*tar.Header{
+ {
+ {
+ Name: "symlink1",
+ Typeflag: tar.TypeSymlink,
+ Linkname: "regfile",
+ Mode: 0644,
+ },
+ {
+ Name: "symlink2",
+ Typeflag: tar.TypeLink,
+ Linkname: "symlink1",
+ Mode: 0644,
+ },
+ {
+ Name: "regfile",
+ Typeflag: tar.TypeReg,
+ Mode: 0644,
+ },
+ },
+ } {
+ if err := testBreakout("untar", "docker-TestUntarHardlinkToSymlink", headers); err != nil {
+ t.Fatalf("i=%d. %v", i, err)
+ }
+ }
+}
+
+func TestUntarInvalidHardlink(t *testing.T) {
+ for i, headers := range [][]*tar.Header{
+ { // try reading victim/hello (../)
+ {
+ Name: "dotdot",
+ Typeflag: tar.TypeLink,
+ Linkname: "../victim/hello",
+ Mode: 0644,
+ },
+ },
+ { // try reading victim/hello (/../)
+ {
+ Name: "slash-dotdot",
+ Typeflag: tar.TypeLink,
+ // Note the leading slash
+ Linkname: "/../victim/hello",
+ Mode: 0644,
+ },
+ },
+ { // try writing victim/file
+ {
+ Name: "loophole-victim",
+ Typeflag: tar.TypeLink,
+ Linkname: "../victim",
+ Mode: 0755,
+ },
+ {
+ Name: "loophole-victim/file",
+ Typeflag: tar.TypeReg,
+ Mode: 0644,
+ },
+ },
+ { // try reading victim/hello (hardlink, symlink)
+ {
+ Name: "loophole-victim",
+ Typeflag: tar.TypeLink,
+ Linkname: "../victim",
+ Mode: 0755,
+ },
+ {
+ Name: "symlink",
+ Typeflag: tar.TypeSymlink,
+ Linkname: "loophole-victim/hello",
+ Mode: 0644,
+ },
+ },
+ { // Try reading victim/hello (hardlink, hardlink)
+ {
+ Name: "loophole-victim",
+ Typeflag: tar.TypeLink,
+ Linkname: "../victim",
+ Mode: 0755,
+ },
+ {
+ Name: "hardlink",
+ Typeflag: tar.TypeLink,
+ Linkname: "loophole-victim/hello",
+ Mode: 0644,
+ },
+ },
+ { // Try removing victim directory (hardlink)
+ {
+ Name: "loophole-victim",
+ Typeflag: tar.TypeLink,
+ Linkname: "../victim",
+ Mode: 0755,
+ },
+ {
+ Name: "loophole-victim",
+ Typeflag: tar.TypeReg,
+ Mode: 0644,
+ },
+ },
+ } {
+ if err := testBreakout("untar", "docker-TestUntarInvalidHardlink", headers); err != nil {
+ t.Fatalf("i=%d. %v", i, err)
+ }
+ }
+}
+
+func TestUntarInvalidSymlink(t *testing.T) {
+ for i, headers := range [][]*tar.Header{
+ { // try reading victim/hello (../)
+ {
+ Name: "dotdot",
+ Typeflag: tar.TypeSymlink,
+ Linkname: "../victim/hello",
+ Mode: 0644,
+ },
+ },
+ { // try reading victim/hello (/../)
+ {
+ Name: "slash-dotdot",
+ Typeflag: tar.TypeSymlink,
+ // Note the leading slash
+ Linkname: "/../victim/hello",
+ Mode: 0644,
+ },
+ },
+ { // try writing victim/file
+ {
+ Name: "loophole-victim",
+ Typeflag: tar.TypeSymlink,
+ Linkname: "../victim",
+ Mode: 0755,
+ },
+ {
+ Name: "loophole-victim/file",
+ Typeflag: tar.TypeReg,
+ Mode: 0644,
+ },
+ },
+ { // try reading victim/hello (symlink, symlink)
+ {
+ Name: "loophole-victim",
+ Typeflag: tar.TypeSymlink,
+ Linkname: "../victim",
+ Mode: 0755,
+ },
+ {
+ Name: "symlink",
+ Typeflag: tar.TypeSymlink,
+ Linkname: "loophole-victim/hello",
+ Mode: 0644,
+ },
+ },
+ { // try reading victim/hello (symlink, hardlink)
+ {
+ Name: "loophole-victim",
+ Typeflag: tar.TypeSymlink,
+ Linkname: "../victim",
+ Mode: 0755,
+ },
+ {
+ Name: "hardlink",
+ Typeflag: tar.TypeLink,
+ Linkname: "loophole-victim/hello",
+ Mode: 0644,
+ },
+ },
+ { // try removing victim directory (symlink)
+ {
+ Name: "loophole-victim",
+ Typeflag: tar.TypeSymlink,
+ Linkname: "../victim",
+ Mode: 0755,
+ },
+ {
+ Name: "loophole-victim",
+ Typeflag: tar.TypeReg,
+ Mode: 0644,
+ },
+ },
+ { // try writing to victim/newdir/newfile with a symlink in the path
+ {
+ // this header needs to be before the next one, or else there is an error
+ Name: "dir/loophole",
+ Typeflag: tar.TypeSymlink,
+ Linkname: "../../victim",
+ Mode: 0755,
+ },
+ {
+ Name: "dir/loophole/newdir/newfile",
+ Typeflag: tar.TypeReg,
+ Mode: 0644,
+ },
+ },
+ } {
+ if err := testBreakout("untar", "docker-TestUntarInvalidSymlink", headers); err != nil {
+ t.Fatalf("i=%d. %v", i, err)
+ }
+ }
+}
+
+func TestTempArchiveCloseMultipleTimes(t *testing.T) {
+ reader := ioutil.NopCloser(strings.NewReader("hello"))
+ tempArchive, err := NewTempArchive(reader, "")
+ buf := make([]byte, 10)
+ n, err := tempArchive.Read(buf)
+ if n != 5 {
+ t.Fatalf("Expected to read 5 bytes. Read %d instead", n)
+ }
+ for i := 0; i < 3; i++ {
+ if err = tempArchive.Close(); err != nil {
+ t.Fatalf("i=%d. Unexpected error closing temp archive: %v", i, err)
+ }
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/archive_unix.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/archive_unix.go
new file mode 100644
index 0000000..9e1dfad
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/archive_unix.go
@@ -0,0 +1,89 @@
+// +build !windows
+
+package archive
+
+import (
+ "archive/tar"
+ "errors"
+ "os"
+ "syscall"
+
+ "github.com/docker/docker/pkg/system"
+)
+
+// CanonicalTarNameForPath returns platform-specific filepath
+// to canonical posix-style path for tar archival. p is relative
+// path.
+func CanonicalTarNameForPath(p string) (string, error) {
+ return p, nil // already unix-style
+}
+
+// chmodTarEntry is used to adjust the file permissions used in tar header based
+// on the platform the archival is done.
+
+func chmodTarEntry(perm os.FileMode) os.FileMode {
+ return perm // noop for unix as golang APIs provide perm bits correctly
+}
+
+func setHeaderForSpecialDevice(hdr *tar.Header, ta *tarAppender, name string, stat interface{}) (nlink uint32, inode uint64, err error) {
+ s, ok := stat.(*syscall.Stat_t)
+
+ if !ok {
+ err = errors.New("cannot convert stat value to syscall.Stat_t")
+ return
+ }
+
+ nlink = uint32(s.Nlink)
+ inode = uint64(s.Ino)
+
+ // Currently go does not fil in the major/minors
+ if s.Mode&syscall.S_IFBLK != 0 ||
+ s.Mode&syscall.S_IFCHR != 0 {
+ hdr.Devmajor = int64(major(uint64(s.Rdev)))
+ hdr.Devminor = int64(minor(uint64(s.Rdev)))
+ }
+
+ return
+}
+
+func major(device uint64) uint64 {
+ return (device >> 8) & 0xfff
+}
+
+func minor(device uint64) uint64 {
+ return (device & 0xff) | ((device >> 12) & 0xfff00)
+}
+
+// handleTarTypeBlockCharFifo is an OS-specific helper function used by
+// createTarFile to handle the following types of header: Block; Char; Fifo
+func handleTarTypeBlockCharFifo(hdr *tar.Header, path string) error {
+ mode := uint32(hdr.Mode & 07777)
+ switch hdr.Typeflag {
+ case tar.TypeBlock:
+ mode |= syscall.S_IFBLK
+ case tar.TypeChar:
+ mode |= syscall.S_IFCHR
+ case tar.TypeFifo:
+ mode |= syscall.S_IFIFO
+ }
+
+ if err := system.Mknod(path, mode, int(system.Mkdev(hdr.Devmajor, hdr.Devminor))); err != nil {
+ return err
+ }
+ return nil
+}
+
+func handleLChmod(hdr *tar.Header, path string, hdrInfo os.FileInfo) error {
+ if hdr.Typeflag == tar.TypeLink {
+ if fi, err := os.Lstat(hdr.Linkname); err == nil && (fi.Mode()&os.ModeSymlink == 0) {
+ if err := os.Chmod(path, hdrInfo.Mode()); err != nil {
+ return err
+ }
+ }
+ } else if hdr.Typeflag != tar.TypeSymlink {
+ if err := os.Chmod(path, hdrInfo.Mode()); err != nil {
+ return err
+ }
+ }
+ return nil
+}
diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/archive_unix_test.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/archive_unix_test.go
new file mode 100644
index 0000000..18f45c4
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/archive_unix_test.go
@@ -0,0 +1,60 @@
+// +build !windows
+
+package archive
+
+import (
+ "os"
+ "testing"
+)
+
+func TestCanonicalTarNameForPath(t *testing.T) {
+ cases := []struct{ in, expected string }{
+ {"foo", "foo"},
+ {"foo/bar", "foo/bar"},
+ {"foo/dir/", "foo/dir/"},
+ }
+ for _, v := range cases {
+ if out, err := CanonicalTarNameForPath(v.in); err != nil {
+ t.Fatalf("cannot get canonical name for path: %s: %v", v.in, err)
+ } else if out != v.expected {
+ t.Fatalf("wrong canonical tar name. expected:%s got:%s", v.expected, out)
+ }
+ }
+}
+
+func TestCanonicalTarName(t *testing.T) {
+ cases := []struct {
+ in string
+ isDir bool
+ expected string
+ }{
+ {"foo", false, "foo"},
+ {"foo", true, "foo/"},
+ {"foo/bar", false, "foo/bar"},
+ {"foo/bar", true, "foo/bar/"},
+ }
+ for _, v := range cases {
+ if out, err := canonicalTarName(v.in, v.isDir); err != nil {
+ t.Fatalf("cannot get canonical name for path: %s: %v", v.in, err)
+ } else if out != v.expected {
+ t.Fatalf("wrong canonical tar name. expected:%s got:%s", v.expected, out)
+ }
+ }
+}
+
+func TestChmodTarEntry(t *testing.T) {
+ cases := []struct {
+ in, expected os.FileMode
+ }{
+ {0000, 0000},
+ {0777, 0777},
+ {0644, 0644},
+ {0755, 0755},
+ {0444, 0444},
+ }
+ for _, v := range cases {
+ if out := chmodTarEntry(v.in); out != v.expected {
+ t.Fatalf("wrong chmod. expected:%v got:%v", v.expected, out)
+ }
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/archive_windows.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/archive_windows.go
new file mode 100644
index 0000000..10db4bd
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/archive_windows.go
@@ -0,0 +1,50 @@
+// +build windows
+
+package archive
+
+import (
+ "archive/tar"
+ "fmt"
+ "os"
+ "strings"
+)
+
+// canonicalTarNameForPath returns platform-specific filepath
+// to canonical posix-style path for tar archival. p is relative
+// path.
+func CanonicalTarNameForPath(p string) (string, error) {
+ // windows: convert windows style relative path with backslashes
+ // into forward slashes. Since windows does not allow '/' or '\'
+ // in file names, it is mostly safe to replace however we must
+ // check just in case
+ if strings.Contains(p, "/") {
+ return "", fmt.Errorf("Windows path contains forward slash: %s", p)
+ }
+ return strings.Replace(p, string(os.PathSeparator), "/", -1), nil
+
+}
+
+// chmodTarEntry is used to adjust the file permissions used in tar header based
+// on the platform the archival is done.
+func chmodTarEntry(perm os.FileMode) os.FileMode {
+ perm &= 0755
+ // Add the x bit: make everything +x from windows
+ perm |= 0111
+
+ return perm
+}
+
+func setHeaderForSpecialDevice(hdr *tar.Header, ta *tarAppender, name string, stat interface{}) (nlink uint32, inode uint64, err error) {
+ // do nothing. no notion of Rdev, Inode, Nlink in stat on Windows
+ return
+}
+
+// handleTarTypeBlockCharFifo is an OS-specific helper function used by
+// createTarFile to handle the following types of header: Block; Char; Fifo
+func handleTarTypeBlockCharFifo(hdr *tar.Header, path string) error {
+ return nil
+}
+
+func handleLChmod(hdr *tar.Header, path string, hdrInfo os.FileInfo) error {
+ return nil
+}
diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/archive_windows_test.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/archive_windows_test.go
new file mode 100644
index 0000000..72bc71e
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/archive_windows_test.go
@@ -0,0 +1,65 @@
+// +build windows
+
+package archive
+
+import (
+ "os"
+ "testing"
+)
+
+func TestCanonicalTarNameForPath(t *testing.T) {
+ cases := []struct {
+ in, expected string
+ shouldFail bool
+ }{
+ {"foo", "foo", false},
+ {"foo/bar", "___", true}, // unix-styled windows path must fail
+ {`foo\bar`, "foo/bar", false},
+ }
+ for _, v := range cases {
+ if out, err := CanonicalTarNameForPath(v.in); err != nil && !v.shouldFail {
+ t.Fatalf("cannot get canonical name for path: %s: %v", v.in, err)
+ } else if v.shouldFail && err == nil {
+ t.Fatalf("canonical path call should have failed with error. in=%s out=%s", v.in, out)
+ } else if !v.shouldFail && out != v.expected {
+ t.Fatalf("wrong canonical tar name. expected:%s got:%s", v.expected, out)
+ }
+ }
+}
+
+func TestCanonicalTarName(t *testing.T) {
+ cases := []struct {
+ in string
+ isDir bool
+ expected string
+ }{
+ {"foo", false, "foo"},
+ {"foo", true, "foo/"},
+ {`foo\bar`, false, "foo/bar"},
+ {`foo\bar`, true, "foo/bar/"},
+ }
+ for _, v := range cases {
+ if out, err := canonicalTarName(v.in, v.isDir); err != nil {
+ t.Fatalf("cannot get canonical name for path: %s: %v", v.in, err)
+ } else if out != v.expected {
+ t.Fatalf("wrong canonical tar name. expected:%s got:%s", v.expected, out)
+ }
+ }
+}
+
+func TestChmodTarEntry(t *testing.T) {
+ cases := []struct {
+ in, expected os.FileMode
+ }{
+ {0000, 0111},
+ {0777, 0755},
+ {0644, 0755},
+ {0755, 0755},
+ {0444, 0555},
+ }
+ for _, v := range cases {
+ if out := chmodTarEntry(v.in); out != v.expected {
+ t.Fatalf("wrong chmod. expected:%v got:%v", v.expected, out)
+ }
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/changes.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/changes.go
new file mode 100644
index 0000000..689d9a2
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/changes.go
@@ -0,0 +1,383 @@
+package archive
+
+import (
+ "archive/tar"
+ "bytes"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "sort"
+ "strings"
+ "syscall"
+ "time"
+
+ "github.com/Sirupsen/logrus"
+ "github.com/docker/docker/pkg/pools"
+ "github.com/docker/docker/pkg/system"
+)
+
+type ChangeType int
+
+const (
+ ChangeModify = iota
+ ChangeAdd
+ ChangeDelete
+)
+
+type Change struct {
+ Path string
+ Kind ChangeType
+}
+
+func (change *Change) String() string {
+ var kind string
+ switch change.Kind {
+ case ChangeModify:
+ kind = "C"
+ case ChangeAdd:
+ kind = "A"
+ case ChangeDelete:
+ kind = "D"
+ }
+ return fmt.Sprintf("%s %s", kind, change.Path)
+}
+
+// for sort.Sort
+type changesByPath []Change
+
+func (c changesByPath) Less(i, j int) bool { return c[i].Path < c[j].Path }
+func (c changesByPath) Len() int { return len(c) }
+func (c changesByPath) Swap(i, j int) { c[j], c[i] = c[i], c[j] }
+
+// Gnu tar and the go tar writer don't have sub-second mtime
+// precision, which is problematic when we apply changes via tar
+// files, we handle this by comparing for exact times, *or* same
+// second count and either a or b having exactly 0 nanoseconds
+func sameFsTime(a, b time.Time) bool {
+ return a == b ||
+ (a.Unix() == b.Unix() &&
+ (a.Nanosecond() == 0 || b.Nanosecond() == 0))
+}
+
+func sameFsTimeSpec(a, b syscall.Timespec) bool {
+ return a.Sec == b.Sec &&
+ (a.Nsec == b.Nsec || a.Nsec == 0 || b.Nsec == 0)
+}
+
+// Changes walks the path rw and determines changes for the files in the path,
+// with respect to the parent layers
+func Changes(layers []string, rw string) ([]Change, error) {
+ var (
+ changes []Change
+ changedDirs = make(map[string]struct{})
+ )
+
+ err := filepath.Walk(rw, func(path string, f os.FileInfo, err error) error {
+ if err != nil {
+ return err
+ }
+
+ // Rebase path
+ path, err = filepath.Rel(rw, path)
+ if err != nil {
+ return err
+ }
+
+ // As this runs on the daemon side, file paths are OS specific.
+ path = filepath.Join(string(os.PathSeparator), path)
+
+ // Skip root
+ if path == string(os.PathSeparator) {
+ return nil
+ }
+
+ // Skip AUFS metadata
+ if matched, err := filepath.Match(string(os.PathSeparator)+".wh..wh.*", path); err != nil || matched {
+ return err
+ }
+
+ change := Change{
+ Path: path,
+ }
+
+ // Find out what kind of modification happened
+ file := filepath.Base(path)
+ // If there is a whiteout, then the file was removed
+ if strings.HasPrefix(file, ".wh.") {
+ originalFile := file[len(".wh."):]
+ change.Path = filepath.Join(filepath.Dir(path), originalFile)
+ change.Kind = ChangeDelete
+ } else {
+ // Otherwise, the file was added
+ change.Kind = ChangeAdd
+
+ // ...Unless it already existed in a top layer, in which case, it's a modification
+ for _, layer := range layers {
+ stat, err := os.Stat(filepath.Join(layer, path))
+ if err != nil && !os.IsNotExist(err) {
+ return err
+ }
+ if err == nil {
+ // The file existed in the top layer, so that's a modification
+
+ // However, if it's a directory, maybe it wasn't actually modified.
+ // If you modify /foo/bar/baz, then /foo will be part of the changed files only because it's the parent of bar
+ if stat.IsDir() && f.IsDir() {
+ if f.Size() == stat.Size() && f.Mode() == stat.Mode() && sameFsTime(f.ModTime(), stat.ModTime()) {
+ // Both directories are the same, don't record the change
+ return nil
+ }
+ }
+ change.Kind = ChangeModify
+ break
+ }
+ }
+ }
+
+ // If /foo/bar/file.txt is modified, then /foo/bar must be part of the changed files.
+ // This block is here to ensure the change is recorded even if the
+ // modify time, mode and size of the parent directoriy in the rw and ro layers are all equal.
+ // Check https://github.com/docker/docker/pull/13590 for details.
+ if f.IsDir() {
+ changedDirs[path] = struct{}{}
+ }
+ if change.Kind == ChangeAdd || change.Kind == ChangeDelete {
+ parent := filepath.Dir(path)
+ if _, ok := changedDirs[parent]; !ok && parent != "/" {
+ changes = append(changes, Change{Path: parent, Kind: ChangeModify})
+ changedDirs[parent] = struct{}{}
+ }
+ }
+
+ // Record change
+ changes = append(changes, change)
+ return nil
+ })
+ if err != nil && !os.IsNotExist(err) {
+ return nil, err
+ }
+ return changes, nil
+}
+
+type FileInfo struct {
+ parent *FileInfo
+ name string
+ stat *system.Stat_t
+ children map[string]*FileInfo
+ capability []byte
+ added bool
+}
+
+func (root *FileInfo) LookUp(path string) *FileInfo {
+ // As this runs on the daemon side, file paths are OS specific.
+ parent := root
+ if path == string(os.PathSeparator) {
+ return root
+ }
+
+ pathElements := strings.Split(path, string(os.PathSeparator))
+ for _, elem := range pathElements {
+ if elem != "" {
+ child := parent.children[elem]
+ if child == nil {
+ return nil
+ }
+ parent = child
+ }
+ }
+ return parent
+}
+
+func (info *FileInfo) path() string {
+ if info.parent == nil {
+ // As this runs on the daemon side, file paths are OS specific.
+ return string(os.PathSeparator)
+ }
+ return filepath.Join(info.parent.path(), info.name)
+}
+
+func (info *FileInfo) addChanges(oldInfo *FileInfo, changes *[]Change) {
+
+ sizeAtEntry := len(*changes)
+
+ if oldInfo == nil {
+ // add
+ change := Change{
+ Path: info.path(),
+ Kind: ChangeAdd,
+ }
+ *changes = append(*changes, change)
+ info.added = true
+ }
+
+ // We make a copy so we can modify it to detect additions
+ // also, we only recurse on the old dir if the new info is a directory
+ // otherwise any previous delete/change is considered recursive
+ oldChildren := make(map[string]*FileInfo)
+ if oldInfo != nil && info.isDir() {
+ for k, v := range oldInfo.children {
+ oldChildren[k] = v
+ }
+ }
+
+ for name, newChild := range info.children {
+ oldChild, _ := oldChildren[name]
+ if oldChild != nil {
+ // change?
+ oldStat := oldChild.stat
+ newStat := newChild.stat
+ // Note: We can't compare inode or ctime or blocksize here, because these change
+ // when copying a file into a container. However, that is not generally a problem
+ // because any content change will change mtime, and any status change should
+ // be visible when actually comparing the stat fields. The only time this
+ // breaks down is if some code intentionally hides a change by setting
+ // back mtime
+ if statDifferent(oldStat, newStat) ||
+ bytes.Compare(oldChild.capability, newChild.capability) != 0 {
+ change := Change{
+ Path: newChild.path(),
+ Kind: ChangeModify,
+ }
+ *changes = append(*changes, change)
+ newChild.added = true
+ }
+
+ // Remove from copy so we can detect deletions
+ delete(oldChildren, name)
+ }
+
+ newChild.addChanges(oldChild, changes)
+ }
+ for _, oldChild := range oldChildren {
+ // delete
+ change := Change{
+ Path: oldChild.path(),
+ Kind: ChangeDelete,
+ }
+ *changes = append(*changes, change)
+ }
+
+ // If there were changes inside this directory, we need to add it, even if the directory
+ // itself wasn't changed. This is needed to properly save and restore filesystem permissions.
+ // As this runs on the daemon side, file paths are OS specific.
+ if len(*changes) > sizeAtEntry && info.isDir() && !info.added && info.path() != string(os.PathSeparator) {
+ change := Change{
+ Path: info.path(),
+ Kind: ChangeModify,
+ }
+ // Let's insert the directory entry before the recently added entries located inside this dir
+ *changes = append(*changes, change) // just to resize the slice, will be overwritten
+ copy((*changes)[sizeAtEntry+1:], (*changes)[sizeAtEntry:])
+ (*changes)[sizeAtEntry] = change
+ }
+
+}
+
+func (info *FileInfo) Changes(oldInfo *FileInfo) []Change {
+ var changes []Change
+
+ info.addChanges(oldInfo, &changes)
+
+ return changes
+}
+
+func newRootFileInfo() *FileInfo {
+ // As this runs on the daemon side, file paths are OS specific.
+ root := &FileInfo{
+ name: string(os.PathSeparator),
+ children: make(map[string]*FileInfo),
+ }
+ return root
+}
+
+// ChangesDirs compares two directories and generates an array of Change objects describing the changes.
+// If oldDir is "", then all files in newDir will be Add-Changes.
+func ChangesDirs(newDir, oldDir string) ([]Change, error) {
+ var (
+ oldRoot, newRoot *FileInfo
+ )
+ if oldDir == "" {
+ emptyDir, err := ioutil.TempDir("", "empty")
+ if err != nil {
+ return nil, err
+ }
+ defer os.Remove(emptyDir)
+ oldDir = emptyDir
+ }
+ oldRoot, newRoot, err := collectFileInfoForChanges(oldDir, newDir)
+ if err != nil {
+ return nil, err
+ }
+
+ return newRoot.Changes(oldRoot), nil
+}
+
+// ChangesSize calculates the size in bytes of the provided changes, based on newDir.
+func ChangesSize(newDir string, changes []Change) int64 {
+ var size int64
+ for _, change := range changes {
+ if change.Kind == ChangeModify || change.Kind == ChangeAdd {
+ file := filepath.Join(newDir, change.Path)
+ fileInfo, _ := os.Lstat(file)
+ if fileInfo != nil && !fileInfo.IsDir() {
+ size += fileInfo.Size()
+ }
+ }
+ }
+ return size
+}
+
+// ExportChanges produces an Archive from the provided changes, relative to dir.
+func ExportChanges(dir string, changes []Change) (Archive, error) {
+ reader, writer := io.Pipe()
+ go func() {
+ ta := &tarAppender{
+ TarWriter: tar.NewWriter(writer),
+ Buffer: pools.BufioWriter32KPool.Get(nil),
+ SeenFiles: make(map[uint64]string),
+ }
+ // this buffer is needed for the duration of this piped stream
+ defer pools.BufioWriter32KPool.Put(ta.Buffer)
+
+ sort.Sort(changesByPath(changes))
+
+ // In general we log errors here but ignore them because
+ // during e.g. a diff operation the container can continue
+ // mutating the filesystem and we can see transient errors
+ // from this
+ for _, change := range changes {
+ if change.Kind == ChangeDelete {
+ whiteOutDir := filepath.Dir(change.Path)
+ whiteOutBase := filepath.Base(change.Path)
+ whiteOut := filepath.Join(whiteOutDir, ".wh."+whiteOutBase)
+ timestamp := time.Now()
+ hdr := &tar.Header{
+ Name: whiteOut[1:],
+ Size: 0,
+ ModTime: timestamp,
+ AccessTime: timestamp,
+ ChangeTime: timestamp,
+ }
+ if err := ta.TarWriter.WriteHeader(hdr); err != nil {
+ logrus.Debugf("Can't write whiteout header: %s", err)
+ }
+ } else {
+ path := filepath.Join(dir, change.Path)
+ if err := ta.addTarFile(path, change.Path[1:]); err != nil {
+ logrus.Debugf("Can't add file %s to tar: %s", path, err)
+ }
+ }
+ }
+
+ // Make sure to check the error on Close.
+ if err := ta.TarWriter.Close(); err != nil {
+ logrus.Debugf("Can't close layer: %s", err)
+ }
+ if err := writer.Close(); err != nil {
+ logrus.Debugf("failed close Changes writer: %s", err)
+ }
+ }()
+ return reader, nil
+}
diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/changes_linux.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/changes_linux.go
new file mode 100644
index 0000000..dee8b7c
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/changes_linux.go
@@ -0,0 +1,285 @@
+package archive
+
+import (
+ "bytes"
+ "fmt"
+ "os"
+ "path/filepath"
+ "sort"
+ "syscall"
+ "unsafe"
+
+ "github.com/docker/docker/pkg/system"
+)
+
+// walker is used to implement collectFileInfoForChanges on linux. Where this
+// method in general returns the entire contents of two directory trees, we
+// optimize some FS calls out on linux. In particular, we take advantage of the
+// fact that getdents(2) returns the inode of each file in the directory being
+// walked, which, when walking two trees in parallel to generate a list of
+// changes, can be used to prune subtrees without ever having to lstat(2) them
+// directly. Eliminating stat calls in this way can save up to seconds on large
+// images.
+type walker struct {
+ dir1 string
+ dir2 string
+ root1 *FileInfo
+ root2 *FileInfo
+}
+
+// collectFileInfoForChanges returns a complete representation of the trees
+// rooted at dir1 and dir2, with one important exception: any subtree or
+// leaf where the inode and device numbers are an exact match between dir1
+// and dir2 will be pruned from the results. This method is *only* to be used
+// to generating a list of changes between the two directories, as it does not
+// reflect the full contents.
+func collectFileInfoForChanges(dir1, dir2 string) (*FileInfo, *FileInfo, error) {
+ w := &walker{
+ dir1: dir1,
+ dir2: dir2,
+ root1: newRootFileInfo(),
+ root2: newRootFileInfo(),
+ }
+
+ i1, err := os.Lstat(w.dir1)
+ if err != nil {
+ return nil, nil, err
+ }
+ i2, err := os.Lstat(w.dir2)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ if err := w.walk("/", i1, i2); err != nil {
+ return nil, nil, err
+ }
+
+ return w.root1, w.root2, nil
+}
+
+// Given a FileInfo, its path info, and a reference to the root of the tree
+// being constructed, register this file with the tree.
+func walkchunk(path string, fi os.FileInfo, dir string, root *FileInfo) error {
+ if fi == nil {
+ return nil
+ }
+ parent := root.LookUp(filepath.Dir(path))
+ if parent == nil {
+ return fmt.Errorf("collectFileInfoForChanges: Unexpectedly no parent for %s", path)
+ }
+ info := &FileInfo{
+ name: filepath.Base(path),
+ children: make(map[string]*FileInfo),
+ parent: parent,
+ }
+ cpath := filepath.Join(dir, path)
+ stat, err := system.FromStatT(fi.Sys().(*syscall.Stat_t))
+ if err != nil {
+ return err
+ }
+ info.stat = stat
+ info.capability, _ = system.Lgetxattr(cpath, "security.capability") // lgetxattr(2): fs access
+ parent.children[info.name] = info
+ return nil
+}
+
+// Walk a subtree rooted at the same path in both trees being iterated. For
+// example, /docker/overlay/1234/a/b/c/d and /docker/overlay/8888/a/b/c/d
+func (w *walker) walk(path string, i1, i2 os.FileInfo) (err error) {
+ // Register these nodes with the return trees, unless we're still at the
+ // (already-created) roots:
+ if path != "/" {
+ if err := walkchunk(path, i1, w.dir1, w.root1); err != nil {
+ return err
+ }
+ if err := walkchunk(path, i2, w.dir2, w.root2); err != nil {
+ return err
+ }
+ }
+
+ is1Dir := i1 != nil && i1.IsDir()
+ is2Dir := i2 != nil && i2.IsDir()
+
+ sameDevice := false
+ if i1 != nil && i2 != nil {
+ si1 := i1.Sys().(*syscall.Stat_t)
+ si2 := i2.Sys().(*syscall.Stat_t)
+ if si1.Dev == si2.Dev {
+ sameDevice = true
+ }
+ }
+
+ // If these files are both non-existent, or leaves (non-dirs), we are done.
+ if !is1Dir && !is2Dir {
+ return nil
+ }
+
+ // Fetch the names of all the files contained in both directories being walked:
+ var names1, names2 []nameIno
+ if is1Dir {
+ names1, err = readdirnames(filepath.Join(w.dir1, path)) // getdents(2): fs access
+ if err != nil {
+ return err
+ }
+ }
+ if is2Dir {
+ names2, err = readdirnames(filepath.Join(w.dir2, path)) // getdents(2): fs access
+ if err != nil {
+ return err
+ }
+ }
+
+ // We have lists of the files contained in both parallel directories, sorted
+ // in the same order. Walk them in parallel, generating a unique merged list
+ // of all items present in either or both directories.
+ var names []string
+ ix1 := 0
+ ix2 := 0
+
+ for {
+ if ix1 >= len(names1) {
+ break
+ }
+ if ix2 >= len(names2) {
+ break
+ }
+
+ ni1 := names1[ix1]
+ ni2 := names2[ix2]
+
+ switch bytes.Compare([]byte(ni1.name), []byte(ni2.name)) {
+ case -1: // ni1 < ni2 -- advance ni1
+ // we will not encounter ni1 in names2
+ names = append(names, ni1.name)
+ ix1++
+ case 0: // ni1 == ni2
+ if ni1.ino != ni2.ino || !sameDevice {
+ names = append(names, ni1.name)
+ }
+ ix1++
+ ix2++
+ case 1: // ni1 > ni2 -- advance ni2
+ // we will not encounter ni2 in names1
+ names = append(names, ni2.name)
+ ix2++
+ }
+ }
+ for ix1 < len(names1) {
+ names = append(names, names1[ix1].name)
+ ix1++
+ }
+ for ix2 < len(names2) {
+ names = append(names, names2[ix2].name)
+ ix2++
+ }
+
+ // For each of the names present in either or both of the directories being
+ // iterated, stat the name under each root, and recurse the pair of them:
+ for _, name := range names {
+ fname := filepath.Join(path, name)
+ var cInfo1, cInfo2 os.FileInfo
+ if is1Dir {
+ cInfo1, err = os.Lstat(filepath.Join(w.dir1, fname)) // lstat(2): fs access
+ if err != nil && !os.IsNotExist(err) {
+ return err
+ }
+ }
+ if is2Dir {
+ cInfo2, err = os.Lstat(filepath.Join(w.dir2, fname)) // lstat(2): fs access
+ if err != nil && !os.IsNotExist(err) {
+ return err
+ }
+ }
+ if err = w.walk(fname, cInfo1, cInfo2); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// {name,inode} pairs used to support the early-pruning logic of the walker type
+type nameIno struct {
+ name string
+ ino uint64
+}
+
+type nameInoSlice []nameIno
+
+func (s nameInoSlice) Len() int { return len(s) }
+func (s nameInoSlice) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
+func (s nameInoSlice) Less(i, j int) bool { return s[i].name < s[j].name }
+
+// readdirnames is a hacked-apart version of the Go stdlib code, exposing inode
+// numbers further up the stack when reading directory contents. Unlike
+// os.Readdirnames, which returns a list of filenames, this function returns a
+// list of {filename,inode} pairs.
+func readdirnames(dirname string) (names []nameIno, err error) {
+ var (
+ size = 100
+ buf = make([]byte, 4096)
+ nbuf int
+ bufp int
+ nb int
+ )
+
+ f, err := os.Open(dirname)
+ if err != nil {
+ return nil, err
+ }
+ defer f.Close()
+
+ names = make([]nameIno, 0, size) // Empty with room to grow.
+ for {
+ // Refill the buffer if necessary
+ if bufp >= nbuf {
+ bufp = 0
+ nbuf, err = syscall.ReadDirent(int(f.Fd()), buf) // getdents on linux
+ if nbuf < 0 {
+ nbuf = 0
+ }
+ if err != nil {
+ return nil, os.NewSyscallError("readdirent", err)
+ }
+ if nbuf <= 0 {
+ break // EOF
+ }
+ }
+
+ // Drain the buffer
+ nb, names = parseDirent(buf[bufp:nbuf], names)
+ bufp += nb
+ }
+
+ sl := nameInoSlice(names)
+ sort.Sort(sl)
+ return sl, nil
+}
+
+// parseDirent is a minor modification of syscall.ParseDirent (linux version)
+// which returns {name,inode} pairs instead of just names.
+func parseDirent(buf []byte, names []nameIno) (consumed int, newnames []nameIno) {
+ origlen := len(buf)
+ for len(buf) > 0 {
+ dirent := (*syscall.Dirent)(unsafe.Pointer(&buf[0]))
+ buf = buf[dirent.Reclen:]
+ if dirent.Ino == 0 { // File absent in directory.
+ continue
+ }
+ bytes := (*[10000]byte)(unsafe.Pointer(&dirent.Name[0]))
+ var name = string(bytes[0:clen(bytes[:])])
+ if name == "." || name == ".." { // Useless names
+ continue
+ }
+ names = append(names, nameIno{name, dirent.Ino})
+ }
+ return origlen - len(buf), names
+}
+
+func clen(n []byte) int {
+ for i := 0; i < len(n); i++ {
+ if n[i] == 0 {
+ return i
+ }
+ }
+ return len(n)
+}
diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/changes_other.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/changes_other.go
new file mode 100644
index 0000000..da70ed3
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/changes_other.go
@@ -0,0 +1,97 @@
+// +build !linux
+
+package archive
+
+import (
+ "fmt"
+ "os"
+ "path/filepath"
+ "runtime"
+ "strings"
+
+ "github.com/docker/docker/pkg/system"
+)
+
+func collectFileInfoForChanges(oldDir, newDir string) (*FileInfo, *FileInfo, error) {
+ var (
+ oldRoot, newRoot *FileInfo
+ err1, err2 error
+ errs = make(chan error, 2)
+ )
+ go func() {
+ oldRoot, err1 = collectFileInfo(oldDir)
+ errs <- err1
+ }()
+ go func() {
+ newRoot, err2 = collectFileInfo(newDir)
+ errs <- err2
+ }()
+
+ // block until both routines have returned
+ for i := 0; i < 2; i++ {
+ if err := <-errs; err != nil {
+ return nil, nil, err
+ }
+ }
+
+ return oldRoot, newRoot, nil
+}
+
+func collectFileInfo(sourceDir string) (*FileInfo, error) {
+ root := newRootFileInfo()
+
+ err := filepath.Walk(sourceDir, func(path string, f os.FileInfo, err error) error {
+ if err != nil {
+ return err
+ }
+
+ // Rebase path
+ relPath, err := filepath.Rel(sourceDir, path)
+ if err != nil {
+ return err
+ }
+
+ // As this runs on the daemon side, file paths are OS specific.
+ relPath = filepath.Join(string(os.PathSeparator), relPath)
+
+ // See https://github.com/golang/go/issues/9168 - bug in filepath.Join.
+ // Temporary workaround. If the returned path starts with two backslashes,
+ // trim it down to a single backslash. Only relevant on Windows.
+ if runtime.GOOS == "windows" {
+ if strings.HasPrefix(relPath, `\\`) {
+ relPath = relPath[1:]
+ }
+ }
+
+ if relPath == string(os.PathSeparator) {
+ return nil
+ }
+
+ parent := root.LookUp(filepath.Dir(relPath))
+ if parent == nil {
+ return fmt.Errorf("collectFileInfo: Unexpectedly no parent for %s", relPath)
+ }
+
+ info := &FileInfo{
+ name: filepath.Base(relPath),
+ children: make(map[string]*FileInfo),
+ parent: parent,
+ }
+
+ s, err := system.Lstat(path)
+ if err != nil {
+ return err
+ }
+ info.stat = s
+
+ info.capability, _ = system.Lgetxattr(path, "security.capability")
+
+ parent.children[info.name] = info
+
+ return nil
+ })
+ if err != nil {
+ return nil, err
+ }
+ return root, nil
+}
diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/changes_posix_test.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/changes_posix_test.go
new file mode 100644
index 0000000..9d528e6
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/changes_posix_test.go
@@ -0,0 +1,127 @@
+package archive
+
+import (
+ "archive/tar"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "os"
+ "path"
+ "sort"
+ "testing"
+)
+
+func TestHardLinkOrder(t *testing.T) {
+ names := []string{"file1.txt", "file2.txt", "file3.txt"}
+ msg := []byte("Hey y'all")
+
+ // Create dir
+ src, err := ioutil.TempDir("", "docker-hardlink-test-src-")
+ if err != nil {
+ t.Fatal(err)
+ }
+ //defer os.RemoveAll(src)
+ for _, name := range names {
+ func() {
+ fh, err := os.Create(path.Join(src, name))
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer fh.Close()
+ if _, err = fh.Write(msg); err != nil {
+ t.Fatal(err)
+ }
+ }()
+ }
+ // Create dest, with changes that includes hardlinks
+ dest, err := ioutil.TempDir("", "docker-hardlink-test-dest-")
+ if err != nil {
+ t.Fatal(err)
+ }
+ os.RemoveAll(dest) // we just want the name, at first
+ if err := copyDir(src, dest); err != nil {
+ t.Fatal(err)
+ }
+ defer os.RemoveAll(dest)
+ for _, name := range names {
+ for i := 0; i < 5; i++ {
+ if err := os.Link(path.Join(dest, name), path.Join(dest, fmt.Sprintf("%s.link%d", name, i))); err != nil {
+ t.Fatal(err)
+ }
+ }
+ }
+
+ // get changes
+ changes, err := ChangesDirs(dest, src)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // sort
+ sort.Sort(changesByPath(changes))
+
+ // ExportChanges
+ ar, err := ExportChanges(dest, changes)
+ if err != nil {
+ t.Fatal(err)
+ }
+ hdrs, err := walkHeaders(ar)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // reverse sort
+ sort.Sort(sort.Reverse(changesByPath(changes)))
+ // ExportChanges
+ arRev, err := ExportChanges(dest, changes)
+ if err != nil {
+ t.Fatal(err)
+ }
+ hdrsRev, err := walkHeaders(arRev)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // line up the two sets
+ sort.Sort(tarHeaders(hdrs))
+ sort.Sort(tarHeaders(hdrsRev))
+
+ // compare Size and LinkName
+ for i := range hdrs {
+ if hdrs[i].Name != hdrsRev[i].Name {
+ t.Errorf("headers - expected name %q; but got %q", hdrs[i].Name, hdrsRev[i].Name)
+ }
+ if hdrs[i].Size != hdrsRev[i].Size {
+ t.Errorf("headers - %q expected size %d; but got %d", hdrs[i].Name, hdrs[i].Size, hdrsRev[i].Size)
+ }
+ if hdrs[i].Typeflag != hdrsRev[i].Typeflag {
+ t.Errorf("headers - %q expected type %d; but got %d", hdrs[i].Name, hdrs[i].Typeflag, hdrsRev[i].Typeflag)
+ }
+ if hdrs[i].Linkname != hdrsRev[i].Linkname {
+ t.Errorf("headers - %q expected linkname %q; but got %q", hdrs[i].Name, hdrs[i].Linkname, hdrsRev[i].Linkname)
+ }
+ }
+
+}
+
+type tarHeaders []tar.Header
+
+func (th tarHeaders) Len() int { return len(th) }
+func (th tarHeaders) Swap(i, j int) { th[j], th[i] = th[i], th[j] }
+func (th tarHeaders) Less(i, j int) bool { return th[i].Name < th[j].Name }
+
+func walkHeaders(r io.Reader) ([]tar.Header, error) {
+ t := tar.NewReader(r)
+ headers := []tar.Header{}
+ for {
+ hdr, err := t.Next()
+ if err != nil {
+ if err == io.EOF {
+ break
+ }
+ return headers, err
+ }
+ headers = append(headers, *hdr)
+ }
+ return headers, nil
+}
diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/changes_test.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/changes_test.go
new file mode 100644
index 0000000..509bdb2
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/changes_test.go
@@ -0,0 +1,495 @@
+package archive
+
+import (
+ "io/ioutil"
+ "os"
+ "os/exec"
+ "path"
+ "sort"
+ "testing"
+ "time"
+)
+
+func max(x, y int) int {
+ if x >= y {
+ return x
+ }
+ return y
+}
+
+func copyDir(src, dst string) error {
+ cmd := exec.Command("cp", "-a", src, dst)
+ if err := cmd.Run(); err != nil {
+ return err
+ }
+ return nil
+}
+
+type FileType uint32
+
+const (
+ Regular FileType = iota
+ Dir
+ Symlink
+)
+
+type FileData struct {
+ filetype FileType
+ path string
+ contents string
+ permissions os.FileMode
+}
+
+func createSampleDir(t *testing.T, root string) {
+ files := []FileData{
+ {Regular, "file1", "file1\n", 0600},
+ {Regular, "file2", "file2\n", 0666},
+ {Regular, "file3", "file3\n", 0404},
+ {Regular, "file4", "file4\n", 0600},
+ {Regular, "file5", "file5\n", 0600},
+ {Regular, "file6", "file6\n", 0600},
+ {Regular, "file7", "file7\n", 0600},
+ {Dir, "dir1", "", 0740},
+ {Regular, "dir1/file1-1", "file1-1\n", 01444},
+ {Regular, "dir1/file1-2", "file1-2\n", 0666},
+ {Dir, "dir2", "", 0700},
+ {Regular, "dir2/file2-1", "file2-1\n", 0666},
+ {Regular, "dir2/file2-2", "file2-2\n", 0666},
+ {Dir, "dir3", "", 0700},
+ {Regular, "dir3/file3-1", "file3-1\n", 0666},
+ {Regular, "dir3/file3-2", "file3-2\n", 0666},
+ {Dir, "dir4", "", 0700},
+ {Regular, "dir4/file3-1", "file4-1\n", 0666},
+ {Regular, "dir4/file3-2", "file4-2\n", 0666},
+ {Symlink, "symlink1", "target1", 0666},
+ {Symlink, "symlink2", "target2", 0666},
+ }
+
+ now := time.Now()
+ for _, info := range files {
+ p := path.Join(root, info.path)
+ if info.filetype == Dir {
+ if err := os.MkdirAll(p, info.permissions); err != nil {
+ t.Fatal(err)
+ }
+ } else if info.filetype == Regular {
+ if err := ioutil.WriteFile(p, []byte(info.contents), info.permissions); err != nil {
+ t.Fatal(err)
+ }
+ } else if info.filetype == Symlink {
+ if err := os.Symlink(info.contents, p); err != nil {
+ t.Fatal(err)
+ }
+ }
+
+ if info.filetype != Symlink {
+ // Set a consistent ctime, atime for all files and dirs
+ if err := os.Chtimes(p, now, now); err != nil {
+ t.Fatal(err)
+ }
+ }
+ }
+}
+
+func TestChangeString(t *testing.T) {
+ modifiyChange := Change{"change", ChangeModify}
+ toString := modifiyChange.String()
+ if toString != "C change" {
+ t.Fatalf("String() of a change with ChangeModifiy Kind should have been %s but was %s", "C change", toString)
+ }
+ addChange := Change{"change", ChangeAdd}
+ toString = addChange.String()
+ if toString != "A change" {
+ t.Fatalf("String() of a change with ChangeAdd Kind should have been %s but was %s", "A change", toString)
+ }
+ deleteChange := Change{"change", ChangeDelete}
+ toString = deleteChange.String()
+ if toString != "D change" {
+ t.Fatalf("String() of a change with ChangeDelete Kind should have been %s but was %s", "D change", toString)
+ }
+}
+
+func TestChangesWithNoChanges(t *testing.T) {
+ rwLayer, err := ioutil.TempDir("", "docker-changes-test")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer os.RemoveAll(rwLayer)
+ layer, err := ioutil.TempDir("", "docker-changes-test-layer")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer os.RemoveAll(layer)
+ createSampleDir(t, layer)
+ changes, err := Changes([]string{layer}, rwLayer)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(changes) != 0 {
+ t.Fatalf("Changes with no difference should have detect no changes, but detected %d", len(changes))
+ }
+}
+
+func TestChangesWithChanges(t *testing.T) {
+ // Mock the readonly layer
+ layer, err := ioutil.TempDir("", "docker-changes-test-layer")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer os.RemoveAll(layer)
+ createSampleDir(t, layer)
+ os.MkdirAll(path.Join(layer, "dir1/subfolder"), 0740)
+
+ // Mock the RW layer
+ rwLayer, err := ioutil.TempDir("", "docker-changes-test")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer os.RemoveAll(rwLayer)
+
+ // Create a folder in RW layer
+ dir1 := path.Join(rwLayer, "dir1")
+ os.MkdirAll(dir1, 0740)
+ deletedFile := path.Join(dir1, ".wh.file1-2")
+ ioutil.WriteFile(deletedFile, []byte{}, 0600)
+ modifiedFile := path.Join(dir1, "file1-1")
+ ioutil.WriteFile(modifiedFile, []byte{0x00}, 01444)
+ // Let's add a subfolder for a newFile
+ subfolder := path.Join(dir1, "subfolder")
+ os.MkdirAll(subfolder, 0740)
+ newFile := path.Join(subfolder, "newFile")
+ ioutil.WriteFile(newFile, []byte{}, 0740)
+
+ changes, err := Changes([]string{layer}, rwLayer)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ expectedChanges := []Change{
+ {"/dir1", ChangeModify},
+ {"/dir1/file1-1", ChangeModify},
+ {"/dir1/file1-2", ChangeDelete},
+ {"/dir1/subfolder", ChangeModify},
+ {"/dir1/subfolder/newFile", ChangeAdd},
+ }
+ checkChanges(expectedChanges, changes, t)
+}
+
+// See https://github.com/docker/docker/pull/13590
+func TestChangesWithChangesGH13590(t *testing.T) {
+ baseLayer, err := ioutil.TempDir("", "docker-changes-test.")
+ defer os.RemoveAll(baseLayer)
+
+ dir3 := path.Join(baseLayer, "dir1/dir2/dir3")
+ os.MkdirAll(dir3, 07400)
+
+ file := path.Join(dir3, "file.txt")
+ ioutil.WriteFile(file, []byte("hello"), 0666)
+
+ layer, err := ioutil.TempDir("", "docker-changes-test2.")
+ defer os.RemoveAll(layer)
+
+ // Test creating a new file
+ if err := copyDir(baseLayer+"/dir1", layer+"/"); err != nil {
+ t.Fatalf("Cmd failed: %q", err)
+ }
+
+ os.Remove(path.Join(layer, "dir1/dir2/dir3/file.txt"))
+ file = path.Join(layer, "dir1/dir2/dir3/file1.txt")
+ ioutil.WriteFile(file, []byte("bye"), 0666)
+
+ changes, err := Changes([]string{baseLayer}, layer)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ expectedChanges := []Change{
+ {"/dir1/dir2/dir3", ChangeModify},
+ {"/dir1/dir2/dir3/file1.txt", ChangeAdd},
+ }
+ checkChanges(expectedChanges, changes, t)
+
+ // Now test changing a file
+ layer, err = ioutil.TempDir("", "docker-changes-test3.")
+ defer os.RemoveAll(layer)
+
+ if err := copyDir(baseLayer+"/dir1", layer+"/"); err != nil {
+ t.Fatalf("Cmd failed: %q", err)
+ }
+
+ file = path.Join(layer, "dir1/dir2/dir3/file.txt")
+ ioutil.WriteFile(file, []byte("bye"), 0666)
+
+ changes, err = Changes([]string{baseLayer}, layer)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ expectedChanges = []Change{
+ {"/dir1/dir2/dir3/file.txt", ChangeModify},
+ }
+ checkChanges(expectedChanges, changes, t)
+}
+
+// Create an directory, copy it, make sure we report no changes between the two
+func TestChangesDirsEmpty(t *testing.T) {
+ src, err := ioutil.TempDir("", "docker-changes-test")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer os.RemoveAll(src)
+ createSampleDir(t, src)
+ dst := src + "-copy"
+ if err := copyDir(src, dst); err != nil {
+ t.Fatal(err)
+ }
+ defer os.RemoveAll(dst)
+ changes, err := ChangesDirs(dst, src)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if len(changes) != 0 {
+ t.Fatalf("Reported changes for identical dirs: %v", changes)
+ }
+ os.RemoveAll(src)
+ os.RemoveAll(dst)
+}
+
+func mutateSampleDir(t *testing.T, root string) {
+ // Remove a regular file
+ if err := os.RemoveAll(path.Join(root, "file1")); err != nil {
+ t.Fatal(err)
+ }
+
+ // Remove a directory
+ if err := os.RemoveAll(path.Join(root, "dir1")); err != nil {
+ t.Fatal(err)
+ }
+
+ // Remove a symlink
+ if err := os.RemoveAll(path.Join(root, "symlink1")); err != nil {
+ t.Fatal(err)
+ }
+
+ // Rewrite a file
+ if err := ioutil.WriteFile(path.Join(root, "file2"), []byte("fileNN\n"), 0777); err != nil {
+ t.Fatal(err)
+ }
+
+ // Replace a file
+ if err := os.RemoveAll(path.Join(root, "file3")); err != nil {
+ t.Fatal(err)
+ }
+ if err := ioutil.WriteFile(path.Join(root, "file3"), []byte("fileMM\n"), 0404); err != nil {
+ t.Fatal(err)
+ }
+
+ // Touch file
+ if err := os.Chtimes(path.Join(root, "file4"), time.Now().Add(time.Second), time.Now().Add(time.Second)); err != nil {
+ t.Fatal(err)
+ }
+
+ // Replace file with dir
+ if err := os.RemoveAll(path.Join(root, "file5")); err != nil {
+ t.Fatal(err)
+ }
+ if err := os.MkdirAll(path.Join(root, "file5"), 0666); err != nil {
+ t.Fatal(err)
+ }
+
+ // Create new file
+ if err := ioutil.WriteFile(path.Join(root, "filenew"), []byte("filenew\n"), 0777); err != nil {
+ t.Fatal(err)
+ }
+
+ // Create new dir
+ if err := os.MkdirAll(path.Join(root, "dirnew"), 0766); err != nil {
+ t.Fatal(err)
+ }
+
+ // Create a new symlink
+ if err := os.Symlink("targetnew", path.Join(root, "symlinknew")); err != nil {
+ t.Fatal(err)
+ }
+
+ // Change a symlink
+ if err := os.RemoveAll(path.Join(root, "symlink2")); err != nil {
+ t.Fatal(err)
+ }
+ if err := os.Symlink("target2change", path.Join(root, "symlink2")); err != nil {
+ t.Fatal(err)
+ }
+
+ // Replace dir with file
+ if err := os.RemoveAll(path.Join(root, "dir2")); err != nil {
+ t.Fatal(err)
+ }
+ if err := ioutil.WriteFile(path.Join(root, "dir2"), []byte("dir2\n"), 0777); err != nil {
+ t.Fatal(err)
+ }
+
+ // Touch dir
+ if err := os.Chtimes(path.Join(root, "dir3"), time.Now().Add(time.Second), time.Now().Add(time.Second)); err != nil {
+ t.Fatal(err)
+ }
+}
+
+func TestChangesDirsMutated(t *testing.T) {
+ src, err := ioutil.TempDir("", "docker-changes-test")
+ if err != nil {
+ t.Fatal(err)
+ }
+ createSampleDir(t, src)
+ dst := src + "-copy"
+ if err := copyDir(src, dst); err != nil {
+ t.Fatal(err)
+ }
+ defer os.RemoveAll(src)
+ defer os.RemoveAll(dst)
+
+ mutateSampleDir(t, dst)
+
+ changes, err := ChangesDirs(dst, src)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ sort.Sort(changesByPath(changes))
+
+ expectedChanges := []Change{
+ {"/dir1", ChangeDelete},
+ {"/dir2", ChangeModify},
+ {"/dirnew", ChangeAdd},
+ {"/file1", ChangeDelete},
+ {"/file2", ChangeModify},
+ {"/file3", ChangeModify},
+ {"/file4", ChangeModify},
+ {"/file5", ChangeModify},
+ {"/filenew", ChangeAdd},
+ {"/symlink1", ChangeDelete},
+ {"/symlink2", ChangeModify},
+ {"/symlinknew", ChangeAdd},
+ }
+
+ for i := 0; i < max(len(changes), len(expectedChanges)); i++ {
+ if i >= len(expectedChanges) {
+ t.Fatalf("unexpected change %s\n", changes[i].String())
+ }
+ if i >= len(changes) {
+ t.Fatalf("no change for expected change %s\n", expectedChanges[i].String())
+ }
+ if changes[i].Path == expectedChanges[i].Path {
+ if changes[i] != expectedChanges[i] {
+ t.Fatalf("Wrong change for %s, expected %s, got %s\n", changes[i].Path, changes[i].String(), expectedChanges[i].String())
+ }
+ } else if changes[i].Path < expectedChanges[i].Path {
+ t.Fatalf("unexpected change %s\n", changes[i].String())
+ } else {
+ t.Fatalf("no change for expected change %s != %s\n", expectedChanges[i].String(), changes[i].String())
+ }
+ }
+}
+
+func TestApplyLayer(t *testing.T) {
+ src, err := ioutil.TempDir("", "docker-changes-test")
+ if err != nil {
+ t.Fatal(err)
+ }
+ createSampleDir(t, src)
+ defer os.RemoveAll(src)
+ dst := src + "-copy"
+ if err := copyDir(src, dst); err != nil {
+ t.Fatal(err)
+ }
+ mutateSampleDir(t, dst)
+ defer os.RemoveAll(dst)
+
+ changes, err := ChangesDirs(dst, src)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ layer, err := ExportChanges(dst, changes)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ layerCopy, err := NewTempArchive(layer, "")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if _, err := ApplyLayer(src, layerCopy); err != nil {
+ t.Fatal(err)
+ }
+
+ changes2, err := ChangesDirs(src, dst)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if len(changes2) != 0 {
+ t.Fatalf("Unexpected differences after reapplying mutation: %v", changes2)
+ }
+}
+
+func TestChangesSizeWithNoChanges(t *testing.T) {
+ size := ChangesSize("/tmp", nil)
+ if size != 0 {
+ t.Fatalf("ChangesSizes with no changes should be 0, was %d", size)
+ }
+}
+
+func TestChangesSizeWithOnlyDeleteChanges(t *testing.T) {
+ changes := []Change{
+ {Path: "deletedPath", Kind: ChangeDelete},
+ }
+ size := ChangesSize("/tmp", changes)
+ if size != 0 {
+ t.Fatalf("ChangesSizes with only delete changes should be 0, was %d", size)
+ }
+}
+
+func TestChangesSize(t *testing.T) {
+ parentPath, err := ioutil.TempDir("", "docker-changes-test")
+ defer os.RemoveAll(parentPath)
+ addition := path.Join(parentPath, "addition")
+ if err := ioutil.WriteFile(addition, []byte{0x01, 0x01, 0x01}, 0744); err != nil {
+ t.Fatal(err)
+ }
+ modification := path.Join(parentPath, "modification")
+ if err = ioutil.WriteFile(modification, []byte{0x01, 0x01, 0x01}, 0744); err != nil {
+ t.Fatal(err)
+ }
+ changes := []Change{
+ {Path: "addition", Kind: ChangeAdd},
+ {Path: "modification", Kind: ChangeModify},
+ }
+ size := ChangesSize(parentPath, changes)
+ if size != 6 {
+ t.Fatalf("ChangesSizes with only delete changes should be 0, was %d", size)
+ }
+}
+
+func checkChanges(expectedChanges, changes []Change, t *testing.T) {
+ sort.Sort(changesByPath(expectedChanges))
+ sort.Sort(changesByPath(changes))
+ for i := 0; i < max(len(changes), len(expectedChanges)); i++ {
+ if i >= len(expectedChanges) {
+ t.Fatalf("unexpected change %s\n", changes[i].String())
+ }
+ if i >= len(changes) {
+ t.Fatalf("no change for expected change %s\n", expectedChanges[i].String())
+ }
+ if changes[i].Path == expectedChanges[i].Path {
+ if changes[i] != expectedChanges[i] {
+ t.Fatalf("Wrong change for %s, expected %s, got %s\n", changes[i].Path, changes[i].String(), expectedChanges[i].String())
+ }
+ } else if changes[i].Path < expectedChanges[i].Path {
+ t.Fatalf("unexpected change %s\n", changes[i].String())
+ } else {
+ t.Fatalf("no change for expected change %s != %s\n", expectedChanges[i].String(), changes[i].String())
+ }
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/changes_unix.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/changes_unix.go
new file mode 100644
index 0000000..d780f16
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/changes_unix.go
@@ -0,0 +1,27 @@
+// +build !windows
+
+package archive
+
+import (
+ "syscall"
+
+ "github.com/docker/docker/pkg/system"
+)
+
+func statDifferent(oldStat *system.Stat_t, newStat *system.Stat_t) bool {
+ // Don't look at size for dirs, its not a good measure of change
+ if oldStat.Mode() != newStat.Mode() ||
+ oldStat.Uid() != newStat.Uid() ||
+ oldStat.Gid() != newStat.Gid() ||
+ oldStat.Rdev() != newStat.Rdev() ||
+ // Don't look at size for dirs, its not a good measure of change
+ (oldStat.Mode()&syscall.S_IFDIR != syscall.S_IFDIR &&
+ (!sameFsTimeSpec(oldStat.Mtim(), newStat.Mtim()) || (oldStat.Size() != newStat.Size()))) {
+ return true
+ }
+ return false
+}
+
+func (info *FileInfo) isDir() bool {
+ return info.parent == nil || info.stat.Mode()&syscall.S_IFDIR != 0
+}
diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/changes_windows.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/changes_windows.go
new file mode 100644
index 0000000..4809b7a
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/changes_windows.go
@@ -0,0 +1,20 @@
+package archive
+
+import (
+ "github.com/docker/docker/pkg/system"
+)
+
+func statDifferent(oldStat *system.Stat_t, newStat *system.Stat_t) bool {
+
+ // Don't look at size for dirs, its not a good measure of change
+ if oldStat.ModTime() != newStat.ModTime() ||
+ oldStat.Mode() != newStat.Mode() ||
+ oldStat.Size() != newStat.Size() && !oldStat.IsDir() {
+ return true
+ }
+ return false
+}
+
+func (info *FileInfo) isDir() bool {
+ return info.parent == nil || info.stat.IsDir()
+}
diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/copy.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/copy.go
new file mode 100644
index 0000000..fee4a02
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/copy.go
@@ -0,0 +1,308 @@
+package archive
+
+import (
+ "archive/tar"
+ "errors"
+ "io"
+ "io/ioutil"
+ "os"
+ "path"
+ "path/filepath"
+ "strings"
+
+ log "github.com/Sirupsen/logrus"
+)
+
+// Errors used or returned by this file.
+var (
+ ErrNotDirectory = errors.New("not a directory")
+ ErrDirNotExists = errors.New("no such directory")
+ ErrCannotCopyDir = errors.New("cannot copy directory")
+ ErrInvalidCopySource = errors.New("invalid copy source content")
+)
+
+// PreserveTrailingDotOrSeparator returns the given cleaned path (after
+// processing using any utility functions from the path or filepath stdlib
+// packages) and appends a trailing `/.` or `/` if its corresponding original
+// path (from before being processed by utility functions from the path or
+// filepath stdlib packages) ends with a trailing `/.` or `/`. If the cleaned
+// path already ends in a `.` path segment, then another is not added. If the
+// clean path already ends in a path separator, then another is not added.
+func PreserveTrailingDotOrSeparator(cleanedPath, originalPath string) string {
+ if !SpecifiesCurrentDir(cleanedPath) && SpecifiesCurrentDir(originalPath) {
+ if !HasTrailingPathSeparator(cleanedPath) {
+ // Add a separator if it doesn't already end with one (a cleaned
+ // path would only end in a separator if it is the root).
+ cleanedPath += string(filepath.Separator)
+ }
+ cleanedPath += "."
+ }
+
+ if !HasTrailingPathSeparator(cleanedPath) && HasTrailingPathSeparator(originalPath) {
+ cleanedPath += string(filepath.Separator)
+ }
+
+ return cleanedPath
+}
+
+// AssertsDirectory returns whether the given path is
+// asserted to be a directory, i.e., the path ends with
+// a trailing '/' or `/.`, assuming a path separator of `/`.
+func AssertsDirectory(path string) bool {
+ return HasTrailingPathSeparator(path) || SpecifiesCurrentDir(path)
+}
+
+// HasTrailingPathSeparator returns whether the given
+// path ends with the system's path separator character.
+func HasTrailingPathSeparator(path string) bool {
+ return len(path) > 0 && os.IsPathSeparator(path[len(path)-1])
+}
+
+// SpecifiesCurrentDir returns whether the given path specifies
+// a "current directory", i.e., the last path segment is `.`.
+func SpecifiesCurrentDir(path string) bool {
+ return filepath.Base(path) == "."
+}
+
+// SplitPathDirEntry splits the given path between its
+// parent directory and its basename in that directory.
+func SplitPathDirEntry(localizedPath string) (dir, base string) {
+ normalizedPath := filepath.ToSlash(localizedPath)
+ vol := filepath.VolumeName(normalizedPath)
+ normalizedPath = normalizedPath[len(vol):]
+
+ if normalizedPath == "/" {
+ // Specifies the root path.
+ return filepath.FromSlash(vol + normalizedPath), "."
+ }
+
+ trimmedPath := vol + strings.TrimRight(normalizedPath, "/")
+
+ dir = filepath.FromSlash(path.Dir(trimmedPath))
+ base = filepath.FromSlash(path.Base(trimmedPath))
+
+ return dir, base
+}
+
+// TarResource archives the resource at the given sourcePath into a Tar
+// archive. A non-nil error is returned if sourcePath does not exist or is
+// asserted to be a directory but exists as another type of file.
+//
+// This function acts as a convenient wrapper around TarWithOptions, which
+// requires a directory as the source path. TarResource accepts either a
+// directory or a file path and correctly sets the Tar options.
+func TarResource(sourcePath string) (content Archive, err error) {
+ if _, err = os.Lstat(sourcePath); err != nil {
+ // Catches the case where the source does not exist or is not a
+ // directory if asserted to be a directory, as this also causes an
+ // error.
+ return
+ }
+
+ if len(sourcePath) > 1 && HasTrailingPathSeparator(sourcePath) {
+ // In the case where the source path is a symbolic link AND it ends
+ // with a path separator, we will want to evaluate the symbolic link.
+ trimmedPath := sourcePath[:len(sourcePath)-1]
+ stat, err := os.Lstat(trimmedPath)
+ if err != nil {
+ return nil, err
+ }
+
+ if stat.Mode()&os.ModeSymlink != 0 {
+ if sourcePath, err = filepath.EvalSymlinks(trimmedPath); err != nil {
+ return nil, err
+ }
+ }
+ }
+
+ // Separate the source path between it's directory and
+ // the entry in that directory which we are archiving.
+ sourceDir, sourceBase := SplitPathDirEntry(sourcePath)
+
+ filter := []string{sourceBase}
+
+ log.Debugf("copying %q from %q", sourceBase, sourceDir)
+
+ return TarWithOptions(sourceDir, &TarOptions{
+ Compression: Uncompressed,
+ IncludeFiles: filter,
+ IncludeSourceDir: true,
+ })
+}
+
+// CopyInfo holds basic info about the source
+// or destination path of a copy operation.
+type CopyInfo struct {
+ Path string
+ Exists bool
+ IsDir bool
+}
+
+// CopyInfoStatPath stats the given path to create a CopyInfo
+// struct representing that resource. If mustExist is true, then
+// it is an error if there is no file or directory at the given path.
+func CopyInfoStatPath(path string, mustExist bool) (CopyInfo, error) {
+ pathInfo := CopyInfo{Path: path}
+
+ fileInfo, err := os.Lstat(path)
+
+ if err == nil {
+ pathInfo.Exists, pathInfo.IsDir = true, fileInfo.IsDir()
+ } else if os.IsNotExist(err) && !mustExist {
+ err = nil
+ }
+
+ return pathInfo, err
+}
+
+// PrepareArchiveCopy prepares the given srcContent archive, which should
+// contain the archived resource described by srcInfo, to the destination
+// described by dstInfo. Returns the possibly modified content archive along
+// with the path to the destination directory which it should be extracted to.
+func PrepareArchiveCopy(srcContent ArchiveReader, srcInfo, dstInfo CopyInfo) (dstDir string, content Archive, err error) {
+ // Separate the destination path between its directory and base
+ // components in case the source archive contents need to be rebased.
+ dstDir, dstBase := SplitPathDirEntry(dstInfo.Path)
+ _, srcBase := SplitPathDirEntry(srcInfo.Path)
+
+ switch {
+ case dstInfo.Exists && dstInfo.IsDir:
+ // The destination exists as a directory. No alteration
+ // to srcContent is needed as its contents can be
+ // simply extracted to the destination directory.
+ return dstInfo.Path, ioutil.NopCloser(srcContent), nil
+ case dstInfo.Exists && srcInfo.IsDir:
+ // The destination exists as some type of file and the source
+ // content is a directory. This is an error condition since
+ // you cannot copy a directory to an existing file location.
+ return "", nil, ErrCannotCopyDir
+ case dstInfo.Exists:
+ // The destination exists as some type of file and the source content
+ // is also a file. The source content entry will have to be renamed to
+ // have a basename which matches the destination path's basename.
+ return dstDir, rebaseArchiveEntries(srcContent, srcBase, dstBase), nil
+ case srcInfo.IsDir:
+ // The destination does not exist and the source content is an archive
+ // of a directory. The archive should be extracted to the parent of
+ // the destination path instead, and when it is, the directory that is
+ // created as a result should take the name of the destination path.
+ // The source content entries will have to be renamed to have a
+ // basename which matches the destination path's basename.
+ return dstDir, rebaseArchiveEntries(srcContent, srcBase, dstBase), nil
+ case AssertsDirectory(dstInfo.Path):
+ // The destination does not exist and is asserted to be created as a
+ // directory, but the source content is not a directory. This is an
+ // error condition since you cannot create a directory from a file
+ // source.
+ return "", nil, ErrDirNotExists
+ default:
+ // The last remaining case is when the destination does not exist, is
+ // not asserted to be a directory, and the source content is not an
+ // archive of a directory. It this case, the destination file will need
+ // to be created when the archive is extracted and the source content
+ // entry will have to be renamed to have a basename which matches the
+ // destination path's basename.
+ return dstDir, rebaseArchiveEntries(srcContent, srcBase, dstBase), nil
+ }
+
+}
+
+// rebaseArchiveEntries rewrites the given srcContent archive replacing
+// an occurance of oldBase with newBase at the beginning of entry names.
+func rebaseArchiveEntries(srcContent ArchiveReader, oldBase, newBase string) Archive {
+ rebased, w := io.Pipe()
+
+ go func() {
+ srcTar := tar.NewReader(srcContent)
+ rebasedTar := tar.NewWriter(w)
+
+ for {
+ hdr, err := srcTar.Next()
+ if err == io.EOF {
+ // Signals end of archive.
+ rebasedTar.Close()
+ w.Close()
+ return
+ }
+ if err != nil {
+ w.CloseWithError(err)
+ return
+ }
+
+ hdr.Name = strings.Replace(hdr.Name, oldBase, newBase, 1)
+
+ if err = rebasedTar.WriteHeader(hdr); err != nil {
+ w.CloseWithError(err)
+ return
+ }
+
+ if _, err = io.Copy(rebasedTar, srcTar); err != nil {
+ w.CloseWithError(err)
+ return
+ }
+ }
+ }()
+
+ return rebased
+}
+
+// CopyResource performs an archive copy from the given source path to the
+// given destination path. The source path MUST exist and the destination
+// path's parent directory must exist.
+func CopyResource(srcPath, dstPath string) error {
+ var (
+ srcInfo CopyInfo
+ err error
+ )
+
+ // Clean the source and destination paths.
+ srcPath = PreserveTrailingDotOrSeparator(filepath.Clean(srcPath), srcPath)
+ dstPath = PreserveTrailingDotOrSeparator(filepath.Clean(dstPath), dstPath)
+
+ if srcInfo, err = CopyInfoStatPath(srcPath, true); err != nil {
+ return err
+ }
+
+ content, err := TarResource(srcPath)
+ if err != nil {
+ return err
+ }
+ defer content.Close()
+
+ return CopyTo(content, srcInfo, dstPath)
+}
+
+// CopyTo handles extracting the given content whose
+// entries should be sourced from srcInfo to dstPath.
+func CopyTo(content ArchiveReader, srcInfo CopyInfo, dstPath string) error {
+ dstInfo, err := CopyInfoStatPath(dstPath, false)
+ if err != nil {
+ return err
+ }
+
+ if !dstInfo.Exists {
+ // Ensure destination parent dir exists.
+ dstParent, _ := SplitPathDirEntry(dstPath)
+
+ dstStat, err := os.Lstat(dstParent)
+ if err != nil {
+ return err
+ }
+ if !dstStat.IsDir() {
+ return ErrNotDirectory
+ }
+ }
+
+ dstDir, copyArchive, err := PrepareArchiveCopy(content, srcInfo, dstInfo)
+ if err != nil {
+ return err
+ }
+ defer copyArchive.Close()
+
+ options := &TarOptions{
+ NoLchown: true,
+ NoOverwriteDirNonDir: true,
+ }
+
+ return Untar(copyArchive, dstDir, options)
+}
diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/copy_test.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/copy_test.go
new file mode 100644
index 0000000..dd0b323
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/copy_test.go
@@ -0,0 +1,637 @@
+package archive
+
+import (
+ "bytes"
+ "crypto/sha256"
+ "encoding/hex"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "strings"
+ "testing"
+)
+
+func removeAllPaths(paths ...string) {
+ for _, path := range paths {
+ os.RemoveAll(path)
+ }
+}
+
+func getTestTempDirs(t *testing.T) (tmpDirA, tmpDirB string) {
+ var err error
+
+ if tmpDirA, err = ioutil.TempDir("", "archive-copy-test"); err != nil {
+ t.Fatal(err)
+ }
+
+ if tmpDirB, err = ioutil.TempDir("", "archive-copy-test"); err != nil {
+ t.Fatal(err)
+ }
+
+ return
+}
+
+func isNotDir(err error) bool {
+ return strings.Contains(err.Error(), "not a directory")
+}
+
+func joinTrailingSep(pathElements ...string) string {
+ joined := filepath.Join(pathElements...)
+
+ return fmt.Sprintf("%s%c", joined, filepath.Separator)
+}
+
+func fileContentsEqual(t *testing.T, filenameA, filenameB string) (err error) {
+ t.Logf("checking for equal file contents: %q and %q\n", filenameA, filenameB)
+
+ fileA, err := os.Open(filenameA)
+ if err != nil {
+ return
+ }
+ defer fileA.Close()
+
+ fileB, err := os.Open(filenameB)
+ if err != nil {
+ return
+ }
+ defer fileB.Close()
+
+ hasher := sha256.New()
+
+ if _, err = io.Copy(hasher, fileA); err != nil {
+ return
+ }
+
+ hashA := hasher.Sum(nil)
+ hasher.Reset()
+
+ if _, err = io.Copy(hasher, fileB); err != nil {
+ return
+ }
+
+ hashB := hasher.Sum(nil)
+
+ if !bytes.Equal(hashA, hashB) {
+ err = fmt.Errorf("file content hashes not equal - expected %s, got %s", hex.EncodeToString(hashA), hex.EncodeToString(hashB))
+ }
+
+ return
+}
+
+func dirContentsEqual(t *testing.T, newDir, oldDir string) (err error) {
+ t.Logf("checking for equal directory contents: %q and %q\n", newDir, oldDir)
+
+ var changes []Change
+
+ if changes, err = ChangesDirs(newDir, oldDir); err != nil {
+ return
+ }
+
+ if len(changes) != 0 {
+ err = fmt.Errorf("expected no changes between directories, but got: %v", changes)
+ }
+
+ return
+}
+
+func logDirContents(t *testing.T, dirPath string) {
+ logWalkedPaths := filepath.WalkFunc(func(path string, info os.FileInfo, err error) error {
+ if err != nil {
+ t.Errorf("stat error for path %q: %s", path, err)
+ return nil
+ }
+
+ if info.IsDir() {
+ path = joinTrailingSep(path)
+ }
+
+ t.Logf("\t%s", path)
+
+ return nil
+ })
+
+ t.Logf("logging directory contents: %q", dirPath)
+
+ if err := filepath.Walk(dirPath, logWalkedPaths); err != nil {
+ t.Fatal(err)
+ }
+}
+
+func testCopyHelper(t *testing.T, srcPath, dstPath string) (err error) {
+ t.Logf("copying from %q to %q", srcPath, dstPath)
+
+ return CopyResource(srcPath, dstPath)
+}
+
+// Basic assumptions about SRC and DST:
+// 1. SRC must exist.
+// 2. If SRC ends with a trailing separator, it must be a directory.
+// 3. DST parent directory must exist.
+// 4. If DST exists as a file, it must not end with a trailing separator.
+
+// First get these easy error cases out of the way.
+
+// Test for error when SRC does not exist.
+func TestCopyErrSrcNotExists(t *testing.T) {
+ tmpDirA, tmpDirB := getTestTempDirs(t)
+ defer removeAllPaths(tmpDirA, tmpDirB)
+
+ content, err := TarResource(filepath.Join(tmpDirA, "file1"))
+ if err == nil {
+ content.Close()
+ t.Fatal("expected IsNotExist error, but got nil instead")
+ }
+
+ if !os.IsNotExist(err) {
+ t.Fatalf("expected IsNotExist error, but got %T: %s", err, err)
+ }
+}
+
+// Test for error when SRC ends in a trailing
+// path separator but it exists as a file.
+func TestCopyErrSrcNotDir(t *testing.T) {
+ tmpDirA, tmpDirB := getTestTempDirs(t)
+ defer removeAllPaths(tmpDirA, tmpDirB)
+
+ // Load A with some sample files and directories.
+ createSampleDir(t, tmpDirA)
+
+ content, err := TarResource(joinTrailingSep(tmpDirA, "file1"))
+ if err == nil {
+ content.Close()
+ t.Fatal("expected IsNotDir error, but got nil instead")
+ }
+
+ if !isNotDir(err) {
+ t.Fatalf("expected IsNotDir error, but got %T: %s", err, err)
+ }
+}
+
+// Test for error when SRC is a valid file or directory,
+// but the DST parent directory does not exist.
+func TestCopyErrDstParentNotExists(t *testing.T) {
+ tmpDirA, tmpDirB := getTestTempDirs(t)
+ defer removeAllPaths(tmpDirA, tmpDirB)
+
+ // Load A with some sample files and directories.
+ createSampleDir(t, tmpDirA)
+
+ srcInfo := CopyInfo{Path: filepath.Join(tmpDirA, "file1"), Exists: true, IsDir: false}
+
+ // Try with a file source.
+ content, err := TarResource(srcInfo.Path)
+ if err != nil {
+ t.Fatalf("unexpected error %T: %s", err, err)
+ }
+ defer content.Close()
+
+ // Copy to a file whose parent does not exist.
+ if err = CopyTo(content, srcInfo, filepath.Join(tmpDirB, "fakeParentDir", "file1")); err == nil {
+ t.Fatal("expected IsNotExist error, but got nil instead")
+ }
+
+ if !os.IsNotExist(err) {
+ t.Fatalf("expected IsNotExist error, but got %T: %s", err, err)
+ }
+
+ // Try with a directory source.
+ srcInfo = CopyInfo{Path: filepath.Join(tmpDirA, "dir1"), Exists: true, IsDir: true}
+
+ content, err = TarResource(srcInfo.Path)
+ if err != nil {
+ t.Fatalf("unexpected error %T: %s", err, err)
+ }
+ defer content.Close()
+
+ // Copy to a directory whose parent does not exist.
+ if err = CopyTo(content, srcInfo, joinTrailingSep(tmpDirB, "fakeParentDir", "fakeDstDir")); err == nil {
+ t.Fatal("expected IsNotExist error, but got nil instead")
+ }
+
+ if !os.IsNotExist(err) {
+ t.Fatalf("expected IsNotExist error, but got %T: %s", err, err)
+ }
+}
+
+// Test for error when DST ends in a trailing
+// path separator but exists as a file.
+func TestCopyErrDstNotDir(t *testing.T) {
+ tmpDirA, tmpDirB := getTestTempDirs(t)
+ defer removeAllPaths(tmpDirA, tmpDirB)
+
+ // Load A and B with some sample files and directories.
+ createSampleDir(t, tmpDirA)
+ createSampleDir(t, tmpDirB)
+
+ // Try with a file source.
+ srcInfo := CopyInfo{Path: filepath.Join(tmpDirA, "file1"), Exists: true, IsDir: false}
+
+ content, err := TarResource(srcInfo.Path)
+ if err != nil {
+ t.Fatalf("unexpected error %T: %s", err, err)
+ }
+ defer content.Close()
+
+ if err = CopyTo(content, srcInfo, joinTrailingSep(tmpDirB, "file1")); err == nil {
+ t.Fatal("expected IsNotDir error, but got nil instead")
+ }
+
+ if !isNotDir(err) {
+ t.Fatalf("expected IsNotDir error, but got %T: %s", err, err)
+ }
+
+ // Try with a directory source.
+ srcInfo = CopyInfo{Path: filepath.Join(tmpDirA, "dir1"), Exists: true, IsDir: true}
+
+ content, err = TarResource(srcInfo.Path)
+ if err != nil {
+ t.Fatalf("unexpected error %T: %s", err, err)
+ }
+ defer content.Close()
+
+ if err = CopyTo(content, srcInfo, joinTrailingSep(tmpDirB, "file1")); err == nil {
+ t.Fatal("expected IsNotDir error, but got nil instead")
+ }
+
+ if !isNotDir(err) {
+ t.Fatalf("expected IsNotDir error, but got %T: %s", err, err)
+ }
+}
+
+// Possibilities are reduced to the remaining 10 cases:
+//
+// case | srcIsDir | onlyDirContents | dstExists | dstIsDir | dstTrSep | action
+// ===================================================================================================
+// A | no | - | no | - | no | create file
+// B | no | - | no | - | yes | error
+// C | no | - | yes | no | - | overwrite file
+// D | no | - | yes | yes | - | create file in dst dir
+// E | yes | no | no | - | - | create dir, copy contents
+// F | yes | no | yes | no | - | error
+// G | yes | no | yes | yes | - | copy dir and contents
+// H | yes | yes | no | - | - | create dir, copy contents
+// I | yes | yes | yes | no | - | error
+// J | yes | yes | yes | yes | - | copy dir contents
+//
+
+// A. SRC specifies a file and DST (no trailing path separator) doesn't
+// exist. This should create a file with the name DST and copy the
+// contents of the source file into it.
+func TestCopyCaseA(t *testing.T) {
+ tmpDirA, tmpDirB := getTestTempDirs(t)
+ defer removeAllPaths(tmpDirA, tmpDirB)
+
+ // Load A with some sample files and directories.
+ createSampleDir(t, tmpDirA)
+
+ srcPath := filepath.Join(tmpDirA, "file1")
+ dstPath := filepath.Join(tmpDirB, "itWorks.txt")
+
+ var err error
+
+ if err = testCopyHelper(t, srcPath, dstPath); err != nil {
+ t.Fatalf("unexpected error %T: %s", err, err)
+ }
+
+ if err = fileContentsEqual(t, srcPath, dstPath); err != nil {
+ t.Fatal(err)
+ }
+}
+
+// B. SRC specifies a file and DST (with trailing path separator) doesn't
+// exist. This should cause an error because the copy operation cannot
+// create a directory when copying a single file.
+func TestCopyCaseB(t *testing.T) {
+ tmpDirA, tmpDirB := getTestTempDirs(t)
+ defer removeAllPaths(tmpDirA, tmpDirB)
+
+ // Load A with some sample files and directories.
+ createSampleDir(t, tmpDirA)
+
+ srcPath := filepath.Join(tmpDirA, "file1")
+ dstDir := joinTrailingSep(tmpDirB, "testDir")
+
+ var err error
+
+ if err = testCopyHelper(t, srcPath, dstDir); err == nil {
+ t.Fatal("expected ErrDirNotExists error, but got nil instead")
+ }
+
+ if err != ErrDirNotExists {
+ t.Fatalf("expected ErrDirNotExists error, but got %T: %s", err, err)
+ }
+}
+
+// C. SRC specifies a file and DST exists as a file. This should overwrite
+// the file at DST with the contents of the source file.
+func TestCopyCaseC(t *testing.T) {
+ tmpDirA, tmpDirB := getTestTempDirs(t)
+ defer removeAllPaths(tmpDirA, tmpDirB)
+
+ // Load A and B with some sample files and directories.
+ createSampleDir(t, tmpDirA)
+ createSampleDir(t, tmpDirB)
+
+ srcPath := filepath.Join(tmpDirA, "file1")
+ dstPath := filepath.Join(tmpDirB, "file2")
+
+ var err error
+
+ // Ensure they start out different.
+ if err = fileContentsEqual(t, srcPath, dstPath); err == nil {
+ t.Fatal("expected different file contents")
+ }
+
+ if err = testCopyHelper(t, srcPath, dstPath); err != nil {
+ t.Fatalf("unexpected error %T: %s", err, err)
+ }
+
+ if err = fileContentsEqual(t, srcPath, dstPath); err != nil {
+ t.Fatal(err)
+ }
+}
+
+// D. SRC specifies a file and DST exists as a directory. This should place
+// a copy of the source file inside it using the basename from SRC. Ensure
+// this works whether DST has a trailing path separator or not.
+func TestCopyCaseD(t *testing.T) {
+ tmpDirA, tmpDirB := getTestTempDirs(t)
+ defer removeAllPaths(tmpDirA, tmpDirB)
+
+ // Load A and B with some sample files and directories.
+ createSampleDir(t, tmpDirA)
+ createSampleDir(t, tmpDirB)
+
+ srcPath := filepath.Join(tmpDirA, "file1")
+ dstDir := filepath.Join(tmpDirB, "dir1")
+ dstPath := filepath.Join(dstDir, "file1")
+
+ var err error
+
+ // Ensure that dstPath doesn't exist.
+ if _, err = os.Stat(dstPath); !os.IsNotExist(err) {
+ t.Fatalf("did not expect dstPath %q to exist", dstPath)
+ }
+
+ if err = testCopyHelper(t, srcPath, dstDir); err != nil {
+ t.Fatalf("unexpected error %T: %s", err, err)
+ }
+
+ if err = fileContentsEqual(t, srcPath, dstPath); err != nil {
+ t.Fatal(err)
+ }
+
+ // Now try again but using a trailing path separator for dstDir.
+
+ if err = os.RemoveAll(dstDir); err != nil {
+ t.Fatalf("unable to remove dstDir: %s", err)
+ }
+
+ if err = os.MkdirAll(dstDir, os.FileMode(0755)); err != nil {
+ t.Fatalf("unable to make dstDir: %s", err)
+ }
+
+ dstDir = joinTrailingSep(tmpDirB, "dir1")
+
+ if err = testCopyHelper(t, srcPath, dstDir); err != nil {
+ t.Fatalf("unexpected error %T: %s", err, err)
+ }
+
+ if err = fileContentsEqual(t, srcPath, dstPath); err != nil {
+ t.Fatal(err)
+ }
+}
+
+// E. SRC specifies a directory and DST does not exist. This should create a
+// directory at DST and copy the contents of the SRC directory into the DST
+// directory. Ensure this works whether DST has a trailing path separator or
+// not.
+func TestCopyCaseE(t *testing.T) {
+ tmpDirA, tmpDirB := getTestTempDirs(t)
+ defer removeAllPaths(tmpDirA, tmpDirB)
+
+ // Load A with some sample files and directories.
+ createSampleDir(t, tmpDirA)
+
+ srcDir := filepath.Join(tmpDirA, "dir1")
+ dstDir := filepath.Join(tmpDirB, "testDir")
+
+ var err error
+
+ if err = testCopyHelper(t, srcDir, dstDir); err != nil {
+ t.Fatalf("unexpected error %T: %s", err, err)
+ }
+
+ if err = dirContentsEqual(t, dstDir, srcDir); err != nil {
+ t.Log("dir contents not equal")
+ logDirContents(t, tmpDirA)
+ logDirContents(t, tmpDirB)
+ t.Fatal(err)
+ }
+
+ // Now try again but using a trailing path separator for dstDir.
+
+ if err = os.RemoveAll(dstDir); err != nil {
+ t.Fatalf("unable to remove dstDir: %s", err)
+ }
+
+ dstDir = joinTrailingSep(tmpDirB, "testDir")
+
+ if err = testCopyHelper(t, srcDir, dstDir); err != nil {
+ t.Fatalf("unexpected error %T: %s", err, err)
+ }
+
+ if err = dirContentsEqual(t, dstDir, srcDir); err != nil {
+ t.Fatal(err)
+ }
+}
+
+// F. SRC specifies a directory and DST exists as a file. This should cause an
+// error as it is not possible to overwrite a file with a directory.
+func TestCopyCaseF(t *testing.T) {
+ tmpDirA, tmpDirB := getTestTempDirs(t)
+ defer removeAllPaths(tmpDirA, tmpDirB)
+
+ // Load A and B with some sample files and directories.
+ createSampleDir(t, tmpDirA)
+ createSampleDir(t, tmpDirB)
+
+ srcDir := filepath.Join(tmpDirA, "dir1")
+ dstFile := filepath.Join(tmpDirB, "file1")
+
+ var err error
+
+ if err = testCopyHelper(t, srcDir, dstFile); err == nil {
+ t.Fatal("expected ErrCannotCopyDir error, but got nil instead")
+ }
+
+ if err != ErrCannotCopyDir {
+ t.Fatalf("expected ErrCannotCopyDir error, but got %T: %s", err, err)
+ }
+}
+
+// G. SRC specifies a directory and DST exists as a directory. This should copy
+// the SRC directory and all its contents to the DST directory. Ensure this
+// works whether DST has a trailing path separator or not.
+func TestCopyCaseG(t *testing.T) {
+ tmpDirA, tmpDirB := getTestTempDirs(t)
+ defer removeAllPaths(tmpDirA, tmpDirB)
+
+ // Load A and B with some sample files and directories.
+ createSampleDir(t, tmpDirA)
+ createSampleDir(t, tmpDirB)
+
+ srcDir := filepath.Join(tmpDirA, "dir1")
+ dstDir := filepath.Join(tmpDirB, "dir2")
+ resultDir := filepath.Join(dstDir, "dir1")
+
+ var err error
+
+ if err = testCopyHelper(t, srcDir, dstDir); err != nil {
+ t.Fatalf("unexpected error %T: %s", err, err)
+ }
+
+ if err = dirContentsEqual(t, resultDir, srcDir); err != nil {
+ t.Fatal(err)
+ }
+
+ // Now try again but using a trailing path separator for dstDir.
+
+ if err = os.RemoveAll(dstDir); err != nil {
+ t.Fatalf("unable to remove dstDir: %s", err)
+ }
+
+ if err = os.MkdirAll(dstDir, os.FileMode(0755)); err != nil {
+ t.Fatalf("unable to make dstDir: %s", err)
+ }
+
+ dstDir = joinTrailingSep(tmpDirB, "dir2")
+
+ if err = testCopyHelper(t, srcDir, dstDir); err != nil {
+ t.Fatalf("unexpected error %T: %s", err, err)
+ }
+
+ if err = dirContentsEqual(t, resultDir, srcDir); err != nil {
+ t.Fatal(err)
+ }
+}
+
+// H. SRC specifies a directory's contents only and DST does not exist. This
+// should create a directory at DST and copy the contents of the SRC
+// directory (but not the directory itself) into the DST directory. Ensure
+// this works whether DST has a trailing path separator or not.
+func TestCopyCaseH(t *testing.T) {
+ tmpDirA, tmpDirB := getTestTempDirs(t)
+ defer removeAllPaths(tmpDirA, tmpDirB)
+
+ // Load A with some sample files and directories.
+ createSampleDir(t, tmpDirA)
+
+ srcDir := joinTrailingSep(tmpDirA, "dir1") + "."
+ dstDir := filepath.Join(tmpDirB, "testDir")
+
+ var err error
+
+ if err = testCopyHelper(t, srcDir, dstDir); err != nil {
+ t.Fatalf("unexpected error %T: %s", err, err)
+ }
+
+ if err = dirContentsEqual(t, dstDir, srcDir); err != nil {
+ t.Log("dir contents not equal")
+ logDirContents(t, tmpDirA)
+ logDirContents(t, tmpDirB)
+ t.Fatal(err)
+ }
+
+ // Now try again but using a trailing path separator for dstDir.
+
+ if err = os.RemoveAll(dstDir); err != nil {
+ t.Fatalf("unable to remove dstDir: %s", err)
+ }
+
+ dstDir = joinTrailingSep(tmpDirB, "testDir")
+
+ if err = testCopyHelper(t, srcDir, dstDir); err != nil {
+ t.Fatalf("unexpected error %T: %s", err, err)
+ }
+
+ if err = dirContentsEqual(t, dstDir, srcDir); err != nil {
+ t.Log("dir contents not equal")
+ logDirContents(t, tmpDirA)
+ logDirContents(t, tmpDirB)
+ t.Fatal(err)
+ }
+}
+
+// I. SRC specifies a directory's contents only and DST exists as a file. This
+// should cause an error as it is not possible to overwrite a file with a
+// directory.
+func TestCopyCaseI(t *testing.T) {
+ tmpDirA, tmpDirB := getTestTempDirs(t)
+ defer removeAllPaths(tmpDirA, tmpDirB)
+
+ // Load A and B with some sample files and directories.
+ createSampleDir(t, tmpDirA)
+ createSampleDir(t, tmpDirB)
+
+ srcDir := joinTrailingSep(tmpDirA, "dir1") + "."
+ dstFile := filepath.Join(tmpDirB, "file1")
+
+ var err error
+
+ if err = testCopyHelper(t, srcDir, dstFile); err == nil {
+ t.Fatal("expected ErrCannotCopyDir error, but got nil instead")
+ }
+
+ if err != ErrCannotCopyDir {
+ t.Fatalf("expected ErrCannotCopyDir error, but got %T: %s", err, err)
+ }
+}
+
+// J. SRC specifies a directory's contents only and DST exists as a directory.
+// This should copy the contents of the SRC directory (but not the directory
+// itself) into the DST directory. Ensure this works whether DST has a
+// trailing path separator or not.
+func TestCopyCaseJ(t *testing.T) {
+ tmpDirA, tmpDirB := getTestTempDirs(t)
+ defer removeAllPaths(tmpDirA, tmpDirB)
+
+ // Load A and B with some sample files and directories.
+ createSampleDir(t, tmpDirA)
+ createSampleDir(t, tmpDirB)
+
+ srcDir := joinTrailingSep(tmpDirA, "dir1") + "."
+ dstDir := filepath.Join(tmpDirB, "dir5")
+
+ var err error
+
+ if err = testCopyHelper(t, srcDir, dstDir); err != nil {
+ t.Fatalf("unexpected error %T: %s", err, err)
+ }
+
+ if err = dirContentsEqual(t, dstDir, srcDir); err != nil {
+ t.Fatal(err)
+ }
+
+ // Now try again but using a trailing path separator for dstDir.
+
+ if err = os.RemoveAll(dstDir); err != nil {
+ t.Fatalf("unable to remove dstDir: %s", err)
+ }
+
+ if err = os.MkdirAll(dstDir, os.FileMode(0755)); err != nil {
+ t.Fatalf("unable to make dstDir: %s", err)
+ }
+
+ dstDir = joinTrailingSep(tmpDirB, "dir5")
+
+ if err = testCopyHelper(t, srcDir, dstDir); err != nil {
+ t.Fatalf("unexpected error %T: %s", err, err)
+ }
+
+ if err = dirContentsEqual(t, dstDir, srcDir); err != nil {
+ t.Fatal(err)
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/diff.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/diff.go
new file mode 100644
index 0000000..aed8542
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/diff.go
@@ -0,0 +1,194 @@
+package archive
+
+import (
+ "archive/tar"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "runtime"
+ "strings"
+ "syscall"
+
+ "github.com/Sirupsen/logrus"
+ "github.com/docker/docker/pkg/pools"
+ "github.com/docker/docker/pkg/system"
+)
+
+func UnpackLayer(dest string, layer ArchiveReader) (size int64, err error) {
+ tr := tar.NewReader(layer)
+ trBuf := pools.BufioReader32KPool.Get(tr)
+ defer pools.BufioReader32KPool.Put(trBuf)
+
+ var dirs []*tar.Header
+
+ aufsTempdir := ""
+ aufsHardlinks := make(map[string]*tar.Header)
+
+ // Iterate through the files in the archive.
+ for {
+ hdr, err := tr.Next()
+ if err == io.EOF {
+ // end of tar archive
+ break
+ }
+ if err != nil {
+ return 0, err
+ }
+
+ size += hdr.Size
+
+ // Normalize name, for safety and for a simple is-root check
+ hdr.Name = filepath.Clean(hdr.Name)
+
+ // Windows does not support filenames with colons in them. Ignore
+ // these files. This is not a problem though (although it might
+ // appear that it is). Let's suppose a client is running docker pull.
+ // The daemon it points to is Windows. Would it make sense for the
+ // client to be doing a docker pull Ubuntu for example (which has files
+ // with colons in the name under /usr/share/man/man3)? No, absolutely
+ // not as it would really only make sense that they were pulling a
+ // Windows image. However, for development, it is necessary to be able
+ // to pull Linux images which are in the repository.
+ //
+ // TODO Windows. Once the registry is aware of what images are Windows-
+ // specific or Linux-specific, this warning should be changed to an error
+ // to cater for the situation where someone does manage to upload a Linux
+ // image but have it tagged as Windows inadvertantly.
+ if runtime.GOOS == "windows" {
+ if strings.Contains(hdr.Name, ":") {
+ logrus.Warnf("Windows: Ignoring %s (is this a Linux image?)", hdr.Name)
+ continue
+ }
+ }
+
+ // Note as these operations are platform specific, so must the slash be.
+ if !strings.HasSuffix(hdr.Name, string(os.PathSeparator)) {
+ // Not the root directory, ensure that the parent directory exists.
+ // This happened in some tests where an image had a tarfile without any
+ // parent directories.
+ parent := filepath.Dir(hdr.Name)
+ parentPath := filepath.Join(dest, parent)
+
+ if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) {
+ err = system.MkdirAll(parentPath, 0600)
+ if err != nil {
+ return 0, err
+ }
+ }
+ }
+
+ // Skip AUFS metadata dirs
+ if strings.HasPrefix(hdr.Name, ".wh..wh.") {
+ // Regular files inside /.wh..wh.plnk can be used as hardlink targets
+ // We don't want this directory, but we need the files in them so that
+ // such hardlinks can be resolved.
+ if strings.HasPrefix(hdr.Name, ".wh..wh.plnk") && hdr.Typeflag == tar.TypeReg {
+ basename := filepath.Base(hdr.Name)
+ aufsHardlinks[basename] = hdr
+ if aufsTempdir == "" {
+ if aufsTempdir, err = ioutil.TempDir("", "dockerplnk"); err != nil {
+ return 0, err
+ }
+ defer os.RemoveAll(aufsTempdir)
+ }
+ if err := createTarFile(filepath.Join(aufsTempdir, basename), dest, hdr, tr, true, nil); err != nil {
+ return 0, err
+ }
+ }
+ continue
+ }
+ path := filepath.Join(dest, hdr.Name)
+ rel, err := filepath.Rel(dest, path)
+ if err != nil {
+ return 0, err
+ }
+
+ // Note as these operations are platform specific, so must the slash be.
+ if strings.HasPrefix(rel, ".."+string(os.PathSeparator)) {
+ return 0, breakoutError(fmt.Errorf("%q is outside of %q", hdr.Name, dest))
+ }
+ base := filepath.Base(path)
+
+ if strings.HasPrefix(base, ".wh.") {
+ originalBase := base[len(".wh."):]
+ originalPath := filepath.Join(filepath.Dir(path), originalBase)
+ if err := os.RemoveAll(originalPath); err != nil {
+ return 0, err
+ }
+ } else {
+ // If path exits we almost always just want to remove and replace it.
+ // The only exception is when it is a directory *and* the file from
+ // the layer is also a directory. Then we want to merge them (i.e.
+ // just apply the metadata from the layer).
+ if fi, err := os.Lstat(path); err == nil {
+ if !(fi.IsDir() && hdr.Typeflag == tar.TypeDir) {
+ if err := os.RemoveAll(path); err != nil {
+ return 0, err
+ }
+ }
+ }
+
+ trBuf.Reset(tr)
+ srcData := io.Reader(trBuf)
+ srcHdr := hdr
+
+ // Hard links into /.wh..wh.plnk don't work, as we don't extract that directory, so
+ // we manually retarget these into the temporary files we extracted them into
+ if hdr.Typeflag == tar.TypeLink && strings.HasPrefix(filepath.Clean(hdr.Linkname), ".wh..wh.plnk") {
+ linkBasename := filepath.Base(hdr.Linkname)
+ srcHdr = aufsHardlinks[linkBasename]
+ if srcHdr == nil {
+ return 0, fmt.Errorf("Invalid aufs hardlink")
+ }
+ tmpFile, err := os.Open(filepath.Join(aufsTempdir, linkBasename))
+ if err != nil {
+ return 0, err
+ }
+ defer tmpFile.Close()
+ srcData = tmpFile
+ }
+
+ if err := createTarFile(path, dest, srcHdr, srcData, true, nil); err != nil {
+ return 0, err
+ }
+
+ // Directory mtimes must be handled at the end to avoid further
+ // file creation in them to modify the directory mtime
+ if hdr.Typeflag == tar.TypeDir {
+ dirs = append(dirs, hdr)
+ }
+ }
+ }
+
+ for _, hdr := range dirs {
+ path := filepath.Join(dest, hdr.Name)
+ ts := []syscall.Timespec{timeToTimespec(hdr.AccessTime), timeToTimespec(hdr.ModTime)}
+ if err := syscall.UtimesNano(path, ts); err != nil {
+ return 0, err
+ }
+ }
+
+ return size, nil
+}
+
+// ApplyLayer parses a diff in the standard layer format from `layer`, and
+// applies it to the directory `dest`. Returns the size in bytes of the
+// contents of the layer.
+func ApplyLayer(dest string, layer ArchiveReader) (int64, error) {
+ dest = filepath.Clean(dest)
+
+ // We need to be able to set any perms
+ oldmask, err := system.Umask(0)
+ if err != nil {
+ return 0, err
+ }
+ defer system.Umask(oldmask) // ignore err, ErrNotSupportedPlatform
+
+ layer, err = DecompressStream(layer)
+ if err != nil {
+ return 0, err
+ }
+ return UnpackLayer(dest, layer)
+}
diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/diff_test.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/diff_test.go
new file mode 100644
index 0000000..01ed437
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/diff_test.go
@@ -0,0 +1,190 @@
+package archive
+
+import (
+ "archive/tar"
+ "testing"
+)
+
+func TestApplyLayerInvalidFilenames(t *testing.T) {
+ for i, headers := range [][]*tar.Header{
+ {
+ {
+ Name: "../victim/dotdot",
+ Typeflag: tar.TypeReg,
+ Mode: 0644,
+ },
+ },
+ {
+ {
+ // Note the leading slash
+ Name: "/../victim/slash-dotdot",
+ Typeflag: tar.TypeReg,
+ Mode: 0644,
+ },
+ },
+ } {
+ if err := testBreakout("applylayer", "docker-TestApplyLayerInvalidFilenames", headers); err != nil {
+ t.Fatalf("i=%d. %v", i, err)
+ }
+ }
+}
+
+func TestApplyLayerInvalidHardlink(t *testing.T) {
+ for i, headers := range [][]*tar.Header{
+ { // try reading victim/hello (../)
+ {
+ Name: "dotdot",
+ Typeflag: tar.TypeLink,
+ Linkname: "../victim/hello",
+ Mode: 0644,
+ },
+ },
+ { // try reading victim/hello (/../)
+ {
+ Name: "slash-dotdot",
+ Typeflag: tar.TypeLink,
+ // Note the leading slash
+ Linkname: "/../victim/hello",
+ Mode: 0644,
+ },
+ },
+ { // try writing victim/file
+ {
+ Name: "loophole-victim",
+ Typeflag: tar.TypeLink,
+ Linkname: "../victim",
+ Mode: 0755,
+ },
+ {
+ Name: "loophole-victim/file",
+ Typeflag: tar.TypeReg,
+ Mode: 0644,
+ },
+ },
+ { // try reading victim/hello (hardlink, symlink)
+ {
+ Name: "loophole-victim",
+ Typeflag: tar.TypeLink,
+ Linkname: "../victim",
+ Mode: 0755,
+ },
+ {
+ Name: "symlink",
+ Typeflag: tar.TypeSymlink,
+ Linkname: "loophole-victim/hello",
+ Mode: 0644,
+ },
+ },
+ { // Try reading victim/hello (hardlink, hardlink)
+ {
+ Name: "loophole-victim",
+ Typeflag: tar.TypeLink,
+ Linkname: "../victim",
+ Mode: 0755,
+ },
+ {
+ Name: "hardlink",
+ Typeflag: tar.TypeLink,
+ Linkname: "loophole-victim/hello",
+ Mode: 0644,
+ },
+ },
+ { // Try removing victim directory (hardlink)
+ {
+ Name: "loophole-victim",
+ Typeflag: tar.TypeLink,
+ Linkname: "../victim",
+ Mode: 0755,
+ },
+ {
+ Name: "loophole-victim",
+ Typeflag: tar.TypeReg,
+ Mode: 0644,
+ },
+ },
+ } {
+ if err := testBreakout("applylayer", "docker-TestApplyLayerInvalidHardlink", headers); err != nil {
+ t.Fatalf("i=%d. %v", i, err)
+ }
+ }
+}
+
+func TestApplyLayerInvalidSymlink(t *testing.T) {
+ for i, headers := range [][]*tar.Header{
+ { // try reading victim/hello (../)
+ {
+ Name: "dotdot",
+ Typeflag: tar.TypeSymlink,
+ Linkname: "../victim/hello",
+ Mode: 0644,
+ },
+ },
+ { // try reading victim/hello (/../)
+ {
+ Name: "slash-dotdot",
+ Typeflag: tar.TypeSymlink,
+ // Note the leading slash
+ Linkname: "/../victim/hello",
+ Mode: 0644,
+ },
+ },
+ { // try writing victim/file
+ {
+ Name: "loophole-victim",
+ Typeflag: tar.TypeSymlink,
+ Linkname: "../victim",
+ Mode: 0755,
+ },
+ {
+ Name: "loophole-victim/file",
+ Typeflag: tar.TypeReg,
+ Mode: 0644,
+ },
+ },
+ { // try reading victim/hello (symlink, symlink)
+ {
+ Name: "loophole-victim",
+ Typeflag: tar.TypeSymlink,
+ Linkname: "../victim",
+ Mode: 0755,
+ },
+ {
+ Name: "symlink",
+ Typeflag: tar.TypeSymlink,
+ Linkname: "loophole-victim/hello",
+ Mode: 0644,
+ },
+ },
+ { // try reading victim/hello (symlink, hardlink)
+ {
+ Name: "loophole-victim",
+ Typeflag: tar.TypeSymlink,
+ Linkname: "../victim",
+ Mode: 0755,
+ },
+ {
+ Name: "hardlink",
+ Typeflag: tar.TypeLink,
+ Linkname: "loophole-victim/hello",
+ Mode: 0644,
+ },
+ },
+ { // try removing victim directory (symlink)
+ {
+ Name: "loophole-victim",
+ Typeflag: tar.TypeSymlink,
+ Linkname: "../victim",
+ Mode: 0755,
+ },
+ {
+ Name: "loophole-victim",
+ Typeflag: tar.TypeReg,
+ Mode: 0644,
+ },
+ },
+ } {
+ if err := testBreakout("applylayer", "docker-TestApplyLayerInvalidSymlink", headers); err != nil {
+ t.Fatalf("i=%d. %v", i, err)
+ }
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/example_changes.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/example_changes.go
new file mode 100644
index 0000000..cedd46a
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/example_changes.go
@@ -0,0 +1,97 @@
+// +build ignore
+
+// Simple tool to create an archive stream from an old and new directory
+//
+// By default it will stream the comparison of two temporary directories with junk files
+package main
+
+import (
+ "flag"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "os"
+ "path"
+
+ "github.com/Sirupsen/logrus"
+ "github.com/docker/docker/pkg/archive"
+)
+
+var (
+ flDebug = flag.Bool("D", false, "debugging output")
+ flNewDir = flag.String("newdir", "", "")
+ flOldDir = flag.String("olddir", "", "")
+ log = logrus.New()
+)
+
+func main() {
+ flag.Usage = func() {
+ fmt.Println("Produce a tar from comparing two directory paths. By default a demo tar is created of around 200 files (including hardlinks)")
+ fmt.Printf("%s [OPTIONS]\n", os.Args[0])
+ flag.PrintDefaults()
+ }
+ flag.Parse()
+ log.Out = os.Stderr
+ if (len(os.Getenv("DEBUG")) > 0) || *flDebug {
+ logrus.SetLevel(logrus.DebugLevel)
+ }
+ var newDir, oldDir string
+
+ if len(*flNewDir) == 0 {
+ var err error
+ newDir, err = ioutil.TempDir("", "docker-test-newDir")
+ if err != nil {
+ log.Fatal(err)
+ }
+ defer os.RemoveAll(newDir)
+ if _, err := prepareUntarSourceDirectory(100, newDir, true); err != nil {
+ log.Fatal(err)
+ }
+ } else {
+ newDir = *flNewDir
+ }
+
+ if len(*flOldDir) == 0 {
+ oldDir, err := ioutil.TempDir("", "docker-test-oldDir")
+ if err != nil {
+ log.Fatal(err)
+ }
+ defer os.RemoveAll(oldDir)
+ } else {
+ oldDir = *flOldDir
+ }
+
+ changes, err := archive.ChangesDirs(newDir, oldDir)
+ if err != nil {
+ log.Fatal(err)
+ }
+
+ a, err := archive.ExportChanges(newDir, changes)
+ if err != nil {
+ log.Fatal(err)
+ }
+ defer a.Close()
+
+ i, err := io.Copy(os.Stdout, a)
+ if err != nil && err != io.EOF {
+ log.Fatal(err)
+ }
+ fmt.Fprintf(os.Stderr, "wrote archive of %d bytes", i)
+}
+
+func prepareUntarSourceDirectory(numberOfFiles int, targetPath string, makeLinks bool) (int, error) {
+ fileData := []byte("fooo")
+ for n := 0; n < numberOfFiles; n++ {
+ fileName := fmt.Sprintf("file-%d", n)
+ if err := ioutil.WriteFile(path.Join(targetPath, fileName), fileData, 0700); err != nil {
+ return 0, err
+ }
+ if makeLinks {
+ if err := os.Link(path.Join(targetPath, fileName), path.Join(targetPath, fileName+"-link")); err != nil {
+ return 0, err
+ }
+ }
+ }
+ totalSize := numberOfFiles * len(fileData)
+ return totalSize, nil
+}
diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/testdata/broken.tar b/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/testdata/broken.tar
new file mode 100644
index 0000000..8f10ea6
Binary files /dev/null and b/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/testdata/broken.tar differ
diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/time_linux.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/time_linux.go
new file mode 100644
index 0000000..3448569
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/time_linux.go
@@ -0,0 +1,16 @@
+package archive
+
+import (
+ "syscall"
+ "time"
+)
+
+func timeToTimespec(time time.Time) (ts syscall.Timespec) {
+ if time.IsZero() {
+ // Return UTIME_OMIT special value
+ ts.Sec = 0
+ ts.Nsec = ((1 << 30) - 2)
+ return
+ }
+ return syscall.NsecToTimespec(time.UnixNano())
+}
diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/time_unsupported.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/time_unsupported.go
new file mode 100644
index 0000000..e85aac0
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/time_unsupported.go
@@ -0,0 +1,16 @@
+// +build !linux
+
+package archive
+
+import (
+ "syscall"
+ "time"
+)
+
+func timeToTimespec(time time.Time) (ts syscall.Timespec) {
+ nsec := int64(0)
+ if !time.IsZero() {
+ nsec = time.UnixNano()
+ }
+ return syscall.NsecToTimespec(nsec)
+}
diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/utils_test.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/utils_test.go
new file mode 100644
index 0000000..f5cacea
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/utils_test.go
@@ -0,0 +1,166 @@
+package archive
+
+import (
+ "archive/tar"
+ "bytes"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "time"
+)
+
+var testUntarFns = map[string]func(string, io.Reader) error{
+ "untar": func(dest string, r io.Reader) error {
+ return Untar(r, dest, nil)
+ },
+ "applylayer": func(dest string, r io.Reader) error {
+ _, err := ApplyLayer(dest, ArchiveReader(r))
+ return err
+ },
+}
+
+// testBreakout is a helper function that, within the provided `tmpdir` directory,
+// creates a `victim` folder with a generated `hello` file in it.
+// `untar` extracts to a directory named `dest`, the tar file created from `headers`.
+//
+// Here are the tested scenarios:
+// - removed `victim` folder (write)
+// - removed files from `victim` folder (write)
+// - new files in `victim` folder (write)
+// - modified files in `victim` folder (write)
+// - file in `dest` with same content as `victim/hello` (read)
+//
+// When using testBreakout make sure you cover one of the scenarios listed above.
+func testBreakout(untarFn string, tmpdir string, headers []*tar.Header) error {
+ tmpdir, err := ioutil.TempDir("", tmpdir)
+ if err != nil {
+ return err
+ }
+ defer os.RemoveAll(tmpdir)
+
+ dest := filepath.Join(tmpdir, "dest")
+ if err := os.Mkdir(dest, 0755); err != nil {
+ return err
+ }
+
+ victim := filepath.Join(tmpdir, "victim")
+ if err := os.Mkdir(victim, 0755); err != nil {
+ return err
+ }
+ hello := filepath.Join(victim, "hello")
+ helloData, err := time.Now().MarshalText()
+ if err != nil {
+ return err
+ }
+ if err := ioutil.WriteFile(hello, helloData, 0644); err != nil {
+ return err
+ }
+ helloStat, err := os.Stat(hello)
+ if err != nil {
+ return err
+ }
+
+ reader, writer := io.Pipe()
+ go func() {
+ t := tar.NewWriter(writer)
+ for _, hdr := range headers {
+ t.WriteHeader(hdr)
+ }
+ t.Close()
+ }()
+
+ untar := testUntarFns[untarFn]
+ if untar == nil {
+ return fmt.Errorf("could not find untar function %q in testUntarFns", untarFn)
+ }
+ if err := untar(dest, reader); err != nil {
+ if _, ok := err.(breakoutError); !ok {
+ // If untar returns an error unrelated to an archive breakout,
+ // then consider this an unexpected error and abort.
+ return err
+ }
+ // Here, untar detected the breakout.
+ // Let's move on verifying that indeed there was no breakout.
+ fmt.Printf("breakoutError: %v\n", err)
+ }
+
+ // Check victim folder
+ f, err := os.Open(victim)
+ if err != nil {
+ // codepath taken if victim folder was removed
+ return fmt.Errorf("archive breakout: error reading %q: %v", victim, err)
+ }
+ defer f.Close()
+
+ // Check contents of victim folder
+ //
+ // We are only interested in getting 2 files from the victim folder, because if all is well
+ // we expect only one result, the `hello` file. If there is a second result, it cannot
+ // hold the same name `hello` and we assume that a new file got created in the victim folder.
+ // That is enough to detect an archive breakout.
+ names, err := f.Readdirnames(2)
+ if err != nil {
+ // codepath taken if victim is not a folder
+ return fmt.Errorf("archive breakout: error reading directory content of %q: %v", victim, err)
+ }
+ for _, name := range names {
+ if name != "hello" {
+ // codepath taken if new file was created in victim folder
+ return fmt.Errorf("archive breakout: new file %q", name)
+ }
+ }
+
+ // Check victim/hello
+ f, err = os.Open(hello)
+ if err != nil {
+ // codepath taken if read permissions were removed
+ return fmt.Errorf("archive breakout: could not lstat %q: %v", hello, err)
+ }
+ defer f.Close()
+ b, err := ioutil.ReadAll(f)
+ if err != nil {
+ return err
+ }
+ fi, err := f.Stat()
+ if err != nil {
+ return err
+ }
+ if helloStat.IsDir() != fi.IsDir() ||
+ // TODO: cannot check for fi.ModTime() change
+ helloStat.Mode() != fi.Mode() ||
+ helloStat.Size() != fi.Size() ||
+ !bytes.Equal(helloData, b) {
+ // codepath taken if hello has been modified
+ return fmt.Errorf("archive breakout: file %q has been modified. Contents: expected=%q, got=%q. FileInfo: expected=%#v, got=%#v", hello, helloData, b, helloStat, fi)
+ }
+
+ // Check that nothing in dest/ has the same content as victim/hello.
+ // Since victim/hello was generated with time.Now(), it is safe to assume
+ // that any file whose content matches exactly victim/hello, managed somehow
+ // to access victim/hello.
+ return filepath.Walk(dest, func(path string, info os.FileInfo, err error) error {
+ if info.IsDir() {
+ if err != nil {
+ // skip directory if error
+ return filepath.SkipDir
+ }
+ // enter directory
+ return nil
+ }
+ if err != nil {
+ // skip file if error
+ return nil
+ }
+ b, err := ioutil.ReadFile(path)
+ if err != nil {
+ // Houston, we have a problem. Aborting (space)walk.
+ return err
+ }
+ if bytes.Equal(helloData, b) {
+ return fmt.Errorf("archive breakout: file %q has been accessed via %q", hello, path)
+ }
+ return nil
+ })
+}
diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/wrap.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/wrap.go
new file mode 100644
index 0000000..dfb335c
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/wrap.go
@@ -0,0 +1,59 @@
+package archive
+
+import (
+ "archive/tar"
+ "bytes"
+ "io/ioutil"
+)
+
+// Generate generates a new archive from the content provided
+// as input.
+//
+// `files` is a sequence of path/content pairs. A new file is
+// added to the archive for each pair.
+// If the last pair is incomplete, the file is created with an
+// empty content. For example:
+//
+// Generate("foo.txt", "hello world", "emptyfile")
+//
+// The above call will return an archive with 2 files:
+// * ./foo.txt with content "hello world"
+// * ./empty with empty content
+//
+// FIXME: stream content instead of buffering
+// FIXME: specify permissions and other archive metadata
+func Generate(input ...string) (Archive, error) {
+ files := parseStringPairs(input...)
+ buf := new(bytes.Buffer)
+ tw := tar.NewWriter(buf)
+ for _, file := range files {
+ name, content := file[0], file[1]
+ hdr := &tar.Header{
+ Name: name,
+ Size: int64(len(content)),
+ }
+ if err := tw.WriteHeader(hdr); err != nil {
+ return nil, err
+ }
+ if _, err := tw.Write([]byte(content)); err != nil {
+ return nil, err
+ }
+ }
+ if err := tw.Close(); err != nil {
+ return nil, err
+ }
+ return ioutil.NopCloser(buf), nil
+}
+
+func parseStringPairs(input ...string) (output [][2]string) {
+ output = make([][2]string, 0, len(input)/2+1)
+ for i := 0; i < len(input); i += 2 {
+ var pair [2]string
+ pair[0] = input[i]
+ if i+1 < len(input) {
+ pair[1] = input[i+1]
+ }
+ output = append(output, pair)
+ }
+ return
+}
diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/wrap_test.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/wrap_test.go
new file mode 100644
index 0000000..46ab366
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/wrap_test.go
@@ -0,0 +1,98 @@
+package archive
+
+import (
+ "archive/tar"
+ "bytes"
+ "io"
+ "testing"
+)
+
+func TestGenerateEmptyFile(t *testing.T) {
+ archive, err := Generate("emptyFile")
+ if err != nil {
+ t.Fatal(err)
+ }
+ if archive == nil {
+ t.Fatal("The generated archive should not be nil.")
+ }
+
+ expectedFiles := [][]string{
+ {"emptyFile", ""},
+ }
+
+ tr := tar.NewReader(archive)
+ actualFiles := make([][]string, 0, 10)
+ i := 0
+ for {
+ hdr, err := tr.Next()
+ if err == io.EOF {
+ break
+ }
+ if err != nil {
+ t.Fatal(err)
+ }
+ buf := new(bytes.Buffer)
+ buf.ReadFrom(tr)
+ content := buf.String()
+ actualFiles = append(actualFiles, []string{hdr.Name, content})
+ i++
+ }
+ if len(actualFiles) != len(expectedFiles) {
+ t.Fatalf("Number of expected file %d, got %d.", len(expectedFiles), len(actualFiles))
+ }
+ for i := 0; i < len(expectedFiles); i++ {
+ actual := actualFiles[i]
+ expected := expectedFiles[i]
+ if actual[0] != expected[0] {
+ t.Fatalf("Expected name '%s', Actual name '%s'", expected[0], actual[0])
+ }
+ if actual[1] != expected[1] {
+ t.Fatalf("Expected content '%s', Actual content '%s'", expected[1], actual[1])
+ }
+ }
+}
+
+func TestGenerateWithContent(t *testing.T) {
+ archive, err := Generate("file", "content")
+ if err != nil {
+ t.Fatal(err)
+ }
+ if archive == nil {
+ t.Fatal("The generated archive should not be nil.")
+ }
+
+ expectedFiles := [][]string{
+ {"file", "content"},
+ }
+
+ tr := tar.NewReader(archive)
+ actualFiles := make([][]string, 0, 10)
+ i := 0
+ for {
+ hdr, err := tr.Next()
+ if err == io.EOF {
+ break
+ }
+ if err != nil {
+ t.Fatal(err)
+ }
+ buf := new(bytes.Buffer)
+ buf.ReadFrom(tr)
+ content := buf.String()
+ actualFiles = append(actualFiles, []string{hdr.Name, content})
+ i++
+ }
+ if len(actualFiles) != len(expectedFiles) {
+ t.Fatalf("Number of expected file %d, got %d.", len(expectedFiles), len(actualFiles))
+ }
+ for i := 0; i < len(expectedFiles); i++ {
+ actual := actualFiles[i]
+ expected := expectedFiles[i]
+ if actual[0] != expected[0] {
+ t.Fatalf("Expected name '%s', Actual name '%s'", expected[0], actual[0])
+ }
+ if actual[1] != expected[1] {
+ t.Fatalf("Expected content '%s', Actual content '%s'", expected[1], actual[1])
+ }
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/fileutils/fileutils.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/fileutils/fileutils.go
new file mode 100644
index 0000000..3eaf7f8
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/fileutils/fileutils.go
@@ -0,0 +1,196 @@
+package fileutils
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "strings"
+
+ "github.com/Sirupsen/logrus"
+)
+
+// exclusion return true if the specified pattern is an exclusion
+func exclusion(pattern string) bool {
+ return pattern[0] == '!'
+}
+
+// empty return true if the specified pattern is empty
+func empty(pattern string) bool {
+ return pattern == ""
+}
+
+// CleanPatterns takes a slice of patterns returns a new
+// slice of patterns cleaned with filepath.Clean, stripped
+// of any empty patterns and lets the caller know whether the
+// slice contains any exception patterns (prefixed with !).
+func CleanPatterns(patterns []string) ([]string, [][]string, bool, error) {
+ // Loop over exclusion patterns and:
+ // 1. Clean them up.
+ // 2. Indicate whether we are dealing with any exception rules.
+ // 3. Error if we see a single exclusion marker on it's own (!).
+ cleanedPatterns := []string{}
+ patternDirs := [][]string{}
+ exceptions := false
+ for _, pattern := range patterns {
+ // Eliminate leading and trailing whitespace.
+ pattern = strings.TrimSpace(pattern)
+ if empty(pattern) {
+ continue
+ }
+ if exclusion(pattern) {
+ if len(pattern) == 1 {
+ return nil, nil, false, errors.New("Illegal exclusion pattern: !")
+ }
+ exceptions = true
+ }
+ pattern = filepath.Clean(pattern)
+ cleanedPatterns = append(cleanedPatterns, pattern)
+ if exclusion(pattern) {
+ pattern = pattern[1:]
+ }
+ patternDirs = append(patternDirs, strings.Split(pattern, "/"))
+ }
+
+ return cleanedPatterns, patternDirs, exceptions, nil
+}
+
+// Matches returns true if file matches any of the patterns
+// and isn't excluded by any of the subsequent patterns.
+func Matches(file string, patterns []string) (bool, error) {
+ file = filepath.Clean(file)
+
+ if file == "." {
+ // Don't let them exclude everything, kind of silly.
+ return false, nil
+ }
+
+ patterns, patDirs, _, err := CleanPatterns(patterns)
+ if err != nil {
+ return false, err
+ }
+
+ return OptimizedMatches(file, patterns, patDirs)
+}
+
+// OptimizedMatches is basically the same as fileutils.Matches() but optimized for archive.go.
+// It will assume that the inputs have been preprocessed and therefore the function
+// doen't need to do as much error checking and clean-up. This was done to avoid
+// repeating these steps on each file being checked during the archive process.
+// The more generic fileutils.Matches() can't make these assumptions.
+func OptimizedMatches(file string, patterns []string, patDirs [][]string) (bool, error) {
+ matched := false
+ parentPath := filepath.Dir(file)
+ parentPathDirs := strings.Split(parentPath, "/")
+
+ for i, pattern := range patterns {
+ negative := false
+
+ if exclusion(pattern) {
+ negative = true
+ pattern = pattern[1:]
+ }
+
+ match, err := filepath.Match(pattern, file)
+ if err != nil {
+ return false, err
+ }
+
+ if !match && parentPath != "." {
+ // Check to see if the pattern matches one of our parent dirs.
+ if len(patDirs[i]) <= len(parentPathDirs) {
+ match, _ = filepath.Match(strings.Join(patDirs[i], "/"),
+ strings.Join(parentPathDirs[:len(patDirs[i])], "/"))
+ }
+ }
+
+ if match {
+ matched = !negative
+ }
+ }
+
+ if matched {
+ logrus.Debugf("Skipping excluded path: %s", file)
+ }
+
+ return matched, nil
+}
+
+// CopyFile copies from src to dst until either EOF is reached
+// on src or an error occurs. It verifies src exists and remove
+// the dst if it exists.
+func CopyFile(src, dst string) (int64, error) {
+ cleanSrc := filepath.Clean(src)
+ cleanDst := filepath.Clean(dst)
+ if cleanSrc == cleanDst {
+ return 0, nil
+ }
+ sf, err := os.Open(cleanSrc)
+ if err != nil {
+ return 0, err
+ }
+ defer sf.Close()
+ if err := os.Remove(cleanDst); err != nil && !os.IsNotExist(err) {
+ return 0, err
+ }
+ df, err := os.Create(cleanDst)
+ if err != nil {
+ return 0, err
+ }
+ defer df.Close()
+ return io.Copy(df, sf)
+}
+
+// GetTotalUsedFds Returns the number of used File Descriptors by
+// reading it via /proc filesystem.
+func GetTotalUsedFds() int {
+ if fds, err := ioutil.ReadDir(fmt.Sprintf("/proc/%d/fd", os.Getpid())); err != nil {
+ logrus.Errorf("Error opening /proc/%d/fd: %s", os.Getpid(), err)
+ } else {
+ return len(fds)
+ }
+ return -1
+}
+
+// ReadSymlinkedDirectory returns the target directory of a symlink.
+// The target of the symbolic link may not be a file.
+func ReadSymlinkedDirectory(path string) (string, error) {
+ var realPath string
+ var err error
+ if realPath, err = filepath.Abs(path); err != nil {
+ return "", fmt.Errorf("unable to get absolute path for %s: %s", path, err)
+ }
+ if realPath, err = filepath.EvalSymlinks(realPath); err != nil {
+ return "", fmt.Errorf("failed to canonicalise path for %s: %s", path, err)
+ }
+ realPathInfo, err := os.Stat(realPath)
+ if err != nil {
+ return "", fmt.Errorf("failed to stat target '%s' of '%s': %s", realPath, path, err)
+ }
+ if !realPathInfo.Mode().IsDir() {
+ return "", fmt.Errorf("canonical path points to a file '%s'", realPath)
+ }
+ return realPath, nil
+}
+
+// CreateIfNotExists creates a file or a directory only if it does not already exist.
+func CreateIfNotExists(path string, isDir bool) error {
+ if _, err := os.Stat(path); err != nil {
+ if os.IsNotExist(err) {
+ if isDir {
+ return os.MkdirAll(path, 0755)
+ }
+ if err := os.MkdirAll(filepath.Dir(path), 0755); err != nil {
+ return err
+ }
+ f, err := os.OpenFile(path, os.O_CREATE, 0755)
+ if err != nil {
+ return err
+ }
+ f.Close()
+ }
+ }
+ return nil
+}
diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/fileutils/fileutils_test.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/fileutils/fileutils_test.go
new file mode 100644
index 0000000..b544ffb
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/fileutils/fileutils_test.go
@@ -0,0 +1,402 @@
+package fileutils
+
+import (
+ "io/ioutil"
+ "os"
+ "path"
+ "path/filepath"
+ "testing"
+)
+
+// CopyFile with invalid src
+func TestCopyFileWithInvalidSrc(t *testing.T) {
+ tempFolder, err := ioutil.TempDir("", "docker-fileutils-test")
+ defer os.RemoveAll(tempFolder)
+ if err != nil {
+ t.Fatal(err)
+ }
+ bytes, err := CopyFile("/invalid/file/path", path.Join(tempFolder, "dest"))
+ if err == nil {
+ t.Fatal("Should have fail to copy an invalid src file")
+ }
+ if bytes != 0 {
+ t.Fatal("Should have written 0 bytes")
+ }
+
+}
+
+// CopyFile with invalid dest
+func TestCopyFileWithInvalidDest(t *testing.T) {
+ tempFolder, err := ioutil.TempDir("", "docker-fileutils-test")
+ defer os.RemoveAll(tempFolder)
+ if err != nil {
+ t.Fatal(err)
+ }
+ src := path.Join(tempFolder, "file")
+ err = ioutil.WriteFile(src, []byte("content"), 0740)
+ if err != nil {
+ t.Fatal(err)
+ }
+ bytes, err := CopyFile(src, path.Join(tempFolder, "/invalid/dest/path"))
+ if err == nil {
+ t.Fatal("Should have fail to copy an invalid src file")
+ }
+ if bytes != 0 {
+ t.Fatal("Should have written 0 bytes")
+ }
+
+}
+
+// CopyFile with same src and dest
+func TestCopyFileWithSameSrcAndDest(t *testing.T) {
+ tempFolder, err := ioutil.TempDir("", "docker-fileutils-test")
+ defer os.RemoveAll(tempFolder)
+ if err != nil {
+ t.Fatal(err)
+ }
+ file := path.Join(tempFolder, "file")
+ err = ioutil.WriteFile(file, []byte("content"), 0740)
+ if err != nil {
+ t.Fatal(err)
+ }
+ bytes, err := CopyFile(file, file)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if bytes != 0 {
+ t.Fatal("Should have written 0 bytes as it is the same file.")
+ }
+}
+
+// CopyFile with same src and dest but path is different and not clean
+func TestCopyFileWithSameSrcAndDestWithPathNameDifferent(t *testing.T) {
+ tempFolder, err := ioutil.TempDir("", "docker-fileutils-test")
+ defer os.RemoveAll(tempFolder)
+ if err != nil {
+ t.Fatal(err)
+ }
+ testFolder := path.Join(tempFolder, "test")
+ err = os.MkdirAll(testFolder, 0740)
+ if err != nil {
+ t.Fatal(err)
+ }
+ file := path.Join(testFolder, "file")
+ sameFile := testFolder + "/../test/file"
+ err = ioutil.WriteFile(file, []byte("content"), 0740)
+ if err != nil {
+ t.Fatal(err)
+ }
+ bytes, err := CopyFile(file, sameFile)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if bytes != 0 {
+ t.Fatal("Should have written 0 bytes as it is the same file.")
+ }
+}
+
+func TestCopyFile(t *testing.T) {
+ tempFolder, err := ioutil.TempDir("", "docker-fileutils-test")
+ defer os.RemoveAll(tempFolder)
+ if err != nil {
+ t.Fatal(err)
+ }
+ src := path.Join(tempFolder, "src")
+ dest := path.Join(tempFolder, "dest")
+ ioutil.WriteFile(src, []byte("content"), 0777)
+ ioutil.WriteFile(dest, []byte("destContent"), 0777)
+ bytes, err := CopyFile(src, dest)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if bytes != 7 {
+ t.Fatalf("Should have written %d bytes but wrote %d", 7, bytes)
+ }
+ actual, err := ioutil.ReadFile(dest)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if string(actual) != "content" {
+ t.Fatalf("Dest content was '%s', expected '%s'", string(actual), "content")
+ }
+}
+
+// Reading a symlink to a directory must return the directory
+func TestReadSymlinkedDirectoryExistingDirectory(t *testing.T) {
+ var err error
+ if err = os.Mkdir("/tmp/testReadSymlinkToExistingDirectory", 0777); err != nil {
+ t.Errorf("failed to create directory: %s", err)
+ }
+
+ if err = os.Symlink("/tmp/testReadSymlinkToExistingDirectory", "/tmp/dirLinkTest"); err != nil {
+ t.Errorf("failed to create symlink: %s", err)
+ }
+
+ var path string
+ if path, err = ReadSymlinkedDirectory("/tmp/dirLinkTest"); err != nil {
+ t.Fatalf("failed to read symlink to directory: %s", err)
+ }
+
+ if path != "/tmp/testReadSymlinkToExistingDirectory" {
+ t.Fatalf("symlink returned unexpected directory: %s", path)
+ }
+
+ if err = os.Remove("/tmp/testReadSymlinkToExistingDirectory"); err != nil {
+ t.Errorf("failed to remove temporary directory: %s", err)
+ }
+
+ if err = os.Remove("/tmp/dirLinkTest"); err != nil {
+ t.Errorf("failed to remove symlink: %s", err)
+ }
+}
+
+// Reading a non-existing symlink must fail
+func TestReadSymlinkedDirectoryNonExistingSymlink(t *testing.T) {
+ var path string
+ var err error
+ if path, err = ReadSymlinkedDirectory("/tmp/test/foo/Non/ExistingPath"); err == nil {
+ t.Fatalf("error expected for non-existing symlink")
+ }
+
+ if path != "" {
+ t.Fatalf("expected empty path, but '%s' was returned", path)
+ }
+}
+
+// Reading a symlink to a file must fail
+func TestReadSymlinkedDirectoryToFile(t *testing.T) {
+ var err error
+ var file *os.File
+
+ if file, err = os.Create("/tmp/testReadSymlinkToFile"); err != nil {
+ t.Fatalf("failed to create file: %s", err)
+ }
+
+ file.Close()
+
+ if err = os.Symlink("/tmp/testReadSymlinkToFile", "/tmp/fileLinkTest"); err != nil {
+ t.Errorf("failed to create symlink: %s", err)
+ }
+
+ var path string
+ if path, err = ReadSymlinkedDirectory("/tmp/fileLinkTest"); err == nil {
+ t.Fatalf("ReadSymlinkedDirectory on a symlink to a file should've failed")
+ }
+
+ if path != "" {
+ t.Fatalf("path should've been empty: %s", path)
+ }
+
+ if err = os.Remove("/tmp/testReadSymlinkToFile"); err != nil {
+ t.Errorf("failed to remove file: %s", err)
+ }
+
+ if err = os.Remove("/tmp/fileLinkTest"); err != nil {
+ t.Errorf("failed to remove symlink: %s", err)
+ }
+}
+
+func TestWildcardMatches(t *testing.T) {
+ match, _ := Matches("fileutils.go", []string{"*"})
+ if match != true {
+ t.Errorf("failed to get a wildcard match, got %v", match)
+ }
+}
+
+// A simple pattern match should return true.
+func TestPatternMatches(t *testing.T) {
+ match, _ := Matches("fileutils.go", []string{"*.go"})
+ if match != true {
+ t.Errorf("failed to get a match, got %v", match)
+ }
+}
+
+// An exclusion followed by an inclusion should return true.
+func TestExclusionPatternMatchesPatternBefore(t *testing.T) {
+ match, _ := Matches("fileutils.go", []string{"!fileutils.go", "*.go"})
+ if match != true {
+ t.Errorf("failed to get true match on exclusion pattern, got %v", match)
+ }
+}
+
+// A folder pattern followed by an exception should return false.
+func TestPatternMatchesFolderExclusions(t *testing.T) {
+ match, _ := Matches("docs/README.md", []string{"docs", "!docs/README.md"})
+ if match != false {
+ t.Errorf("failed to get a false match on exclusion pattern, got %v", match)
+ }
+}
+
+// A folder pattern followed by an exception should return false.
+func TestPatternMatchesFolderWithSlashExclusions(t *testing.T) {
+ match, _ := Matches("docs/README.md", []string{"docs/", "!docs/README.md"})
+ if match != false {
+ t.Errorf("failed to get a false match on exclusion pattern, got %v", match)
+ }
+}
+
+// A folder pattern followed by an exception should return false.
+func TestPatternMatchesFolderWildcardExclusions(t *testing.T) {
+ match, _ := Matches("docs/README.md", []string{"docs/*", "!docs/README.md"})
+ if match != false {
+ t.Errorf("failed to get a false match on exclusion pattern, got %v", match)
+ }
+}
+
+// A pattern followed by an exclusion should return false.
+func TestExclusionPatternMatchesPatternAfter(t *testing.T) {
+ match, _ := Matches("fileutils.go", []string{"*.go", "!fileutils.go"})
+ if match != false {
+ t.Errorf("failed to get false match on exclusion pattern, got %v", match)
+ }
+}
+
+// A filename evaluating to . should return false.
+func TestExclusionPatternMatchesWholeDirectory(t *testing.T) {
+ match, _ := Matches(".", []string{"*.go"})
+ if match != false {
+ t.Errorf("failed to get false match on ., got %v", match)
+ }
+}
+
+// A single ! pattern should return an error.
+func TestSingleExclamationError(t *testing.T) {
+ _, err := Matches("fileutils.go", []string{"!"})
+ if err == nil {
+ t.Errorf("failed to get an error for a single exclamation point, got %v", err)
+ }
+}
+
+// A string preceded with a ! should return true from Exclusion.
+func TestExclusion(t *testing.T) {
+ exclusion := exclusion("!")
+ if !exclusion {
+ t.Errorf("failed to get true for a single !, got %v", exclusion)
+ }
+}
+
+// Matches with no patterns
+func TestMatchesWithNoPatterns(t *testing.T) {
+ matches, err := Matches("/any/path/there", []string{})
+ if err != nil {
+ t.Fatal(err)
+ }
+ if matches {
+ t.Fatalf("Should not have match anything")
+ }
+}
+
+// Matches with malformed patterns
+func TestMatchesWithMalformedPatterns(t *testing.T) {
+ matches, err := Matches("/any/path/there", []string{"["})
+ if err == nil {
+ t.Fatal("Should have failed because of a malformed syntax in the pattern")
+ }
+ if matches {
+ t.Fatalf("Should not have match anything")
+ }
+}
+
+// An empty string should return true from Empty.
+func TestEmpty(t *testing.T) {
+ empty := empty("")
+ if !empty {
+ t.Errorf("failed to get true for an empty string, got %v", empty)
+ }
+}
+
+func TestCleanPatterns(t *testing.T) {
+ cleaned, _, _, _ := CleanPatterns([]string{"docs", "config"})
+ if len(cleaned) != 2 {
+ t.Errorf("expected 2 element slice, got %v", len(cleaned))
+ }
+}
+
+func TestCleanPatternsStripEmptyPatterns(t *testing.T) {
+ cleaned, _, _, _ := CleanPatterns([]string{"docs", "config", ""})
+ if len(cleaned) != 2 {
+ t.Errorf("expected 2 element slice, got %v", len(cleaned))
+ }
+}
+
+func TestCleanPatternsExceptionFlag(t *testing.T) {
+ _, _, exceptions, _ := CleanPatterns([]string{"docs", "!docs/README.md"})
+ if !exceptions {
+ t.Errorf("expected exceptions to be true, got %v", exceptions)
+ }
+}
+
+func TestCleanPatternsLeadingSpaceTrimmed(t *testing.T) {
+ _, _, exceptions, _ := CleanPatterns([]string{"docs", " !docs/README.md"})
+ if !exceptions {
+ t.Errorf("expected exceptions to be true, got %v", exceptions)
+ }
+}
+
+func TestCleanPatternsTrailingSpaceTrimmed(t *testing.T) {
+ _, _, exceptions, _ := CleanPatterns([]string{"docs", "!docs/README.md "})
+ if !exceptions {
+ t.Errorf("expected exceptions to be true, got %v", exceptions)
+ }
+}
+
+func TestCleanPatternsErrorSingleException(t *testing.T) {
+ _, _, _, err := CleanPatterns([]string{"!"})
+ if err == nil {
+ t.Errorf("expected error on single exclamation point, got %v", err)
+ }
+}
+
+func TestCleanPatternsFolderSplit(t *testing.T) {
+ _, dirs, _, _ := CleanPatterns([]string{"docs/config/CONFIG.md"})
+ if dirs[0][0] != "docs" {
+ t.Errorf("expected first element in dirs slice to be docs, got %v", dirs[0][1])
+ }
+ if dirs[0][1] != "config" {
+ t.Errorf("expected first element in dirs slice to be config, got %v", dirs[0][1])
+ }
+}
+
+func TestCreateIfNotExistsDir(t *testing.T) {
+ tempFolder, err := ioutil.TempDir("", "docker-fileutils-test")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer os.RemoveAll(tempFolder)
+
+ folderToCreate := filepath.Join(tempFolder, "tocreate")
+
+ if err := CreateIfNotExists(folderToCreate, true); err != nil {
+ t.Fatal(err)
+ }
+ fileinfo, err := os.Stat(folderToCreate)
+ if err != nil {
+ t.Fatalf("Should have create a folder, got %v", err)
+ }
+
+ if !fileinfo.IsDir() {
+ t.Fatalf("Should have been a dir, seems it's not")
+ }
+}
+
+func TestCreateIfNotExistsFile(t *testing.T) {
+ tempFolder, err := ioutil.TempDir("", "docker-fileutils-test")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer os.RemoveAll(tempFolder)
+
+ fileToCreate := filepath.Join(tempFolder, "file/to/create")
+
+ if err := CreateIfNotExists(fileToCreate, false); err != nil {
+ t.Fatal(err)
+ }
+ fileinfo, err := os.Stat(fileToCreate)
+ if err != nil {
+ t.Fatalf("Should have create a file, got %v", err)
+ }
+
+ if fileinfo.IsDir() {
+ t.Fatalf("Should have been a file, seems it's not")
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/ioutils/fmt.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/ioutils/fmt.go
new file mode 100644
index 0000000..801132f
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/ioutils/fmt.go
@@ -0,0 +1,14 @@
+package ioutils
+
+import (
+ "fmt"
+ "io"
+)
+
+// FprintfIfNotEmpty prints the string value if it's not empty
+func FprintfIfNotEmpty(w io.Writer, format, value string) (int, error) {
+ if value != "" {
+ return fmt.Fprintf(w, format, value)
+ }
+ return 0, nil
+}
diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/ioutils/fmt_test.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/ioutils/fmt_test.go
new file mode 100644
index 0000000..8968863
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/ioutils/fmt_test.go
@@ -0,0 +1,17 @@
+package ioutils
+
+import "testing"
+
+func TestFprintfIfNotEmpty(t *testing.T) {
+ wc := NewWriteCounter(&NopWriter{})
+ n, _ := FprintfIfNotEmpty(wc, "foo%s", "")
+
+ if wc.Count != 0 || n != 0 {
+ t.Errorf("Wrong count: %v vs. %v vs. 0", wc.Count, n)
+ }
+
+ n, _ = FprintfIfNotEmpty(wc, "foo%s", "bar")
+ if wc.Count != 6 || n != 6 {
+ t.Errorf("Wrong count: %v vs. %v vs. 6", wc.Count, n)
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/ioutils/multireader.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/ioutils/multireader.go
new file mode 100644
index 0000000..f231aa9
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/ioutils/multireader.go
@@ -0,0 +1,226 @@
+package ioutils
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "os"
+)
+
+type pos struct {
+ idx int
+ offset int64
+}
+
+type multiReadSeeker struct {
+ readers []io.ReadSeeker
+ pos *pos
+ posIdx map[io.ReadSeeker]int
+}
+
+func (r *multiReadSeeker) Seek(offset int64, whence int) (int64, error) {
+ var tmpOffset int64
+ switch whence {
+ case os.SEEK_SET:
+ for i, rdr := range r.readers {
+ // get size of the current reader
+ s, err := rdr.Seek(0, os.SEEK_END)
+ if err != nil {
+ return -1, err
+ }
+
+ if offset > tmpOffset+s {
+ if i == len(r.readers)-1 {
+ rdrOffset := s + (offset - tmpOffset)
+ if _, err := rdr.Seek(rdrOffset, os.SEEK_SET); err != nil {
+ return -1, err
+ }
+ r.pos = &pos{i, rdrOffset}
+ return offset, nil
+ }
+
+ tmpOffset += s
+ continue
+ }
+
+ rdrOffset := offset - tmpOffset
+ idx := i
+
+ rdr.Seek(rdrOffset, os.SEEK_SET)
+ // make sure all following readers are at 0
+ for _, rdr := range r.readers[i+1:] {
+ rdr.Seek(0, os.SEEK_SET)
+ }
+
+ if rdrOffset == s && i != len(r.readers)-1 {
+ idx += 1
+ rdrOffset = 0
+ }
+ r.pos = &pos{idx, rdrOffset}
+ return offset, nil
+ }
+ case os.SEEK_END:
+ for _, rdr := range r.readers {
+ s, err := rdr.Seek(0, os.SEEK_END)
+ if err != nil {
+ return -1, err
+ }
+ tmpOffset += s
+ }
+ r.Seek(tmpOffset+offset, os.SEEK_SET)
+ return tmpOffset + offset, nil
+ case os.SEEK_CUR:
+ if r.pos == nil {
+ return r.Seek(offset, os.SEEK_SET)
+ }
+ // Just return the current offset
+ if offset == 0 {
+ return r.getCurOffset()
+ }
+
+ curOffset, err := r.getCurOffset()
+ if err != nil {
+ return -1, err
+ }
+ rdr, rdrOffset, err := r.getReaderForOffset(curOffset + offset)
+ if err != nil {
+ return -1, err
+ }
+
+ r.pos = &pos{r.posIdx[rdr], rdrOffset}
+ return curOffset + offset, nil
+ default:
+ return -1, fmt.Errorf("Invalid whence: %d", whence)
+ }
+
+ return -1, fmt.Errorf("Error seeking for whence: %d, offset: %d", whence, offset)
+}
+
+func (r *multiReadSeeker) getReaderForOffset(offset int64) (io.ReadSeeker, int64, error) {
+ var rdr io.ReadSeeker
+ var rdrOffset int64
+
+ for i, rdr := range r.readers {
+ offsetTo, err := r.getOffsetToReader(rdr)
+ if err != nil {
+ return nil, -1, err
+ }
+ if offsetTo > offset {
+ rdr = r.readers[i-1]
+ rdrOffset = offsetTo - offset
+ break
+ }
+
+ if rdr == r.readers[len(r.readers)-1] {
+ rdrOffset = offsetTo + offset
+ break
+ }
+ }
+
+ return rdr, rdrOffset, nil
+}
+
+func (r *multiReadSeeker) getCurOffset() (int64, error) {
+ var totalSize int64
+ for _, rdr := range r.readers[:r.pos.idx+1] {
+ if r.posIdx[rdr] == r.pos.idx {
+ totalSize += r.pos.offset
+ break
+ }
+
+ size, err := getReadSeekerSize(rdr)
+ if err != nil {
+ return -1, fmt.Errorf("error getting seeker size: %v", err)
+ }
+ totalSize += size
+ }
+ return totalSize, nil
+}
+
+func (r *multiReadSeeker) getOffsetToReader(rdr io.ReadSeeker) (int64, error) {
+ var offset int64
+ for _, r := range r.readers {
+ if r == rdr {
+ break
+ }
+
+ size, err := getReadSeekerSize(rdr)
+ if err != nil {
+ return -1, err
+ }
+ offset += size
+ }
+ return offset, nil
+}
+
+func (r *multiReadSeeker) Read(b []byte) (int, error) {
+ if r.pos == nil {
+ r.pos = &pos{0, 0}
+ }
+
+ bCap := int64(cap(b))
+ buf := bytes.NewBuffer(nil)
+ var rdr io.ReadSeeker
+
+ for _, rdr = range r.readers[r.pos.idx:] {
+ readBytes, err := io.CopyN(buf, rdr, bCap)
+ if err != nil && err != io.EOF {
+ return -1, err
+ }
+ bCap -= readBytes
+
+ if bCap == 0 {
+ break
+ }
+ }
+
+ rdrPos, err := rdr.Seek(0, os.SEEK_CUR)
+ if err != nil {
+ return -1, err
+ }
+ r.pos = &pos{r.posIdx[rdr], rdrPos}
+ return buf.Read(b)
+}
+
+func getReadSeekerSize(rdr io.ReadSeeker) (int64, error) {
+ // save the current position
+ pos, err := rdr.Seek(0, os.SEEK_CUR)
+ if err != nil {
+ return -1, err
+ }
+
+ // get the size
+ size, err := rdr.Seek(0, os.SEEK_END)
+ if err != nil {
+ return -1, err
+ }
+
+ // reset the position
+ if _, err := rdr.Seek(pos, os.SEEK_SET); err != nil {
+ return -1, err
+ }
+ return size, nil
+}
+
+// MultiReadSeeker returns a ReadSeeker that's the logical concatenation of the provided
+// input readseekers. After calling this method the initial position is set to the
+// beginning of the first ReadSeeker. At the end of a ReadSeeker, Read always advances
+// to the beginning of the next ReadSeeker and returns EOF at the end of the last ReadSeeker.
+// Seek can be used over the sum of lengths of all readseekers.
+//
+// When a MultiReadSeeker is used, no Read and Seek operations should be made on
+// its ReadSeeker components. Also, users should make no assumption on the state
+// of individual readseekers while the MultiReadSeeker is used.
+func MultiReadSeeker(readers ...io.ReadSeeker) io.ReadSeeker {
+ if len(readers) == 1 {
+ return readers[0]
+ }
+ idx := make(map[io.ReadSeeker]int)
+ for i, rdr := range readers {
+ idx[rdr] = i
+ }
+ return &multiReadSeeker{
+ readers: readers,
+ posIdx: idx,
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/ioutils/multireader_test.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/ioutils/multireader_test.go
new file mode 100644
index 0000000..de495b5
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/ioutils/multireader_test.go
@@ -0,0 +1,149 @@
+package ioutils
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "os"
+ "strings"
+ "testing"
+)
+
+func TestMultiReadSeekerReadAll(t *testing.T) {
+ str := "hello world"
+ s1 := strings.NewReader(str + " 1")
+ s2 := strings.NewReader(str + " 2")
+ s3 := strings.NewReader(str + " 3")
+ mr := MultiReadSeeker(s1, s2, s3)
+
+ expectedSize := int64(s1.Len() + s2.Len() + s3.Len())
+
+ b, err := ioutil.ReadAll(mr)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ expected := "hello world 1hello world 2hello world 3"
+ if string(b) != expected {
+ t.Fatalf("ReadAll failed, got: %q, expected %q", string(b), expected)
+ }
+
+ size, err := mr.Seek(0, os.SEEK_END)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if size != expectedSize {
+ t.Fatalf("reader size does not match, got %d, expected %d", size, expectedSize)
+ }
+
+ // Reset the position and read again
+ pos, err := mr.Seek(0, os.SEEK_SET)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if pos != 0 {
+ t.Fatalf("expected position to be set to 0, got %d", pos)
+ }
+
+ b, err = ioutil.ReadAll(mr)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if string(b) != expected {
+ t.Fatalf("ReadAll failed, got: %q, expected %q", string(b), expected)
+ }
+}
+
+func TestMultiReadSeekerReadEach(t *testing.T) {
+ str := "hello world"
+ s1 := strings.NewReader(str + " 1")
+ s2 := strings.NewReader(str + " 2")
+ s3 := strings.NewReader(str + " 3")
+ mr := MultiReadSeeker(s1, s2, s3)
+
+ var totalBytes int64
+ for i, s := range []*strings.Reader{s1, s2, s3} {
+ sLen := int64(s.Len())
+ buf := make([]byte, s.Len())
+ expected := []byte(fmt.Sprintf("%s %d", str, i+1))
+
+ if _, err := mr.Read(buf); err != nil && err != io.EOF {
+ t.Fatal(err)
+ }
+
+ if !bytes.Equal(buf, expected) {
+ t.Fatalf("expected %q to be %q", string(buf), string(expected))
+ }
+
+ pos, err := mr.Seek(0, os.SEEK_CUR)
+ if err != nil {
+ t.Fatalf("iteration: %d, error: %v", i+1, err)
+ }
+
+ // check that the total bytes read is the current position of the seeker
+ totalBytes += sLen
+ if pos != totalBytes {
+ t.Fatalf("expected current position to be: %d, got: %d, iteration: %d", totalBytes, pos, i+1)
+ }
+
+ // This tests not only that SEEK_SET and SEEK_CUR give the same values, but that the next iteration is in the expected position as well
+ newPos, err := mr.Seek(pos, os.SEEK_SET)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if newPos != pos {
+ t.Fatalf("expected to get same position when calling SEEK_SET with value from SEEK_CUR, cur: %d, set: %d", pos, newPos)
+ }
+ }
+}
+
+func TestMultiReadSeekerReadSpanningChunks(t *testing.T) {
+ str := "hello world"
+ s1 := strings.NewReader(str + " 1")
+ s2 := strings.NewReader(str + " 2")
+ s3 := strings.NewReader(str + " 3")
+ mr := MultiReadSeeker(s1, s2, s3)
+
+ buf := make([]byte, s1.Len()+3)
+ _, err := mr.Read(buf)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // expected is the contents of s1 + 3 bytes from s2, ie, the `hel` at the end of this string
+ expected := "hello world 1hel"
+ if string(buf) != expected {
+ t.Fatalf("expected %s to be %s", string(buf), expected)
+ }
+}
+
+func TestMultiReadSeekerNegativeSeek(t *testing.T) {
+ str := "hello world"
+ s1 := strings.NewReader(str + " 1")
+ s2 := strings.NewReader(str + " 2")
+ s3 := strings.NewReader(str + " 3")
+ mr := MultiReadSeeker(s1, s2, s3)
+
+ s1Len := s1.Len()
+ s2Len := s2.Len()
+ s3Len := s3.Len()
+
+ s, err := mr.Seek(int64(-1*s3.Len()), os.SEEK_END)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if s != int64(s1Len+s2Len) {
+ t.Fatalf("expected %d to be %d", s, s1.Len()+s2.Len())
+ }
+
+ buf := make([]byte, s3Len)
+ if _, err := mr.Read(buf); err != nil && err != io.EOF {
+ t.Fatal(err)
+ }
+ expected := fmt.Sprintf("%s %d", str, 3)
+ if string(buf) != fmt.Sprintf("%s %d", str, 3) {
+ t.Fatalf("expected %q to be %q", string(buf), expected)
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/ioutils/readers.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/ioutils/readers.go
new file mode 100644
index 0000000..ff09baa
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/ioutils/readers.go
@@ -0,0 +1,254 @@
+package ioutils
+
+import (
+ "bytes"
+ "crypto/rand"
+ "crypto/sha256"
+ "encoding/hex"
+ "io"
+ "math/big"
+ "sync"
+ "time"
+)
+
+type readCloserWrapper struct {
+ io.Reader
+ closer func() error
+}
+
+func (r *readCloserWrapper) Close() error {
+ return r.closer()
+}
+
+func NewReadCloserWrapper(r io.Reader, closer func() error) io.ReadCloser {
+ return &readCloserWrapper{
+ Reader: r,
+ closer: closer,
+ }
+}
+
+type readerErrWrapper struct {
+ reader io.Reader
+ closer func()
+}
+
+func (r *readerErrWrapper) Read(p []byte) (int, error) {
+ n, err := r.reader.Read(p)
+ if err != nil {
+ r.closer()
+ }
+ return n, err
+}
+
+func NewReaderErrWrapper(r io.Reader, closer func()) io.Reader {
+ return &readerErrWrapper{
+ reader: r,
+ closer: closer,
+ }
+}
+
+// bufReader allows the underlying reader to continue to produce
+// output by pre-emptively reading from the wrapped reader.
+// This is achieved by buffering this data in bufReader's
+// expanding buffer.
+type bufReader struct {
+ sync.Mutex
+ buf *bytes.Buffer
+ reader io.Reader
+ err error
+ wait sync.Cond
+ drainBuf []byte
+ reuseBuf []byte
+ maxReuse int64
+ resetTimeout time.Duration
+ bufLenResetThreshold int64
+ maxReadDataReset int64
+}
+
+func NewBufReader(r io.Reader) *bufReader {
+ var timeout int
+ if randVal, err := rand.Int(rand.Reader, big.NewInt(120)); err == nil {
+ timeout = int(randVal.Int64()) + 180
+ } else {
+ timeout = 300
+ }
+ reader := &bufReader{
+ buf: &bytes.Buffer{},
+ drainBuf: make([]byte, 1024),
+ reuseBuf: make([]byte, 4096),
+ maxReuse: 1000,
+ resetTimeout: time.Second * time.Duration(timeout),
+ bufLenResetThreshold: 100 * 1024,
+ maxReadDataReset: 10 * 1024 * 1024,
+ reader: r,
+ }
+ reader.wait.L = &reader.Mutex
+ go reader.drain()
+ return reader
+}
+
+func NewBufReaderWithDrainbufAndBuffer(r io.Reader, drainBuffer []byte, buffer *bytes.Buffer) *bufReader {
+ reader := &bufReader{
+ buf: buffer,
+ drainBuf: drainBuffer,
+ reader: r,
+ }
+ reader.wait.L = &reader.Mutex
+ go reader.drain()
+ return reader
+}
+
+func (r *bufReader) drain() {
+ var (
+ duration time.Duration
+ lastReset time.Time
+ now time.Time
+ reset bool
+ bufLen int64
+ dataSinceReset int64
+ maxBufLen int64
+ reuseBufLen int64
+ reuseCount int64
+ )
+ reuseBufLen = int64(len(r.reuseBuf))
+ lastReset = time.Now()
+ for {
+ n, err := r.reader.Read(r.drainBuf)
+ dataSinceReset += int64(n)
+ r.Lock()
+ bufLen = int64(r.buf.Len())
+ if bufLen > maxBufLen {
+ maxBufLen = bufLen
+ }
+
+ // Avoid unbounded growth of the buffer over time.
+ // This has been discovered to be the only non-intrusive
+ // solution to the unbounded growth of the buffer.
+ // Alternative solutions such as compression, multiple
+ // buffers, channels and other similar pieces of code
+ // were reducing throughput, overall Docker performance
+ // or simply crashed Docker.
+ // This solution releases the buffer when specific
+ // conditions are met to avoid the continuous resizing
+ // of the buffer for long lived containers.
+ //
+ // Move data to the front of the buffer if it's
+ // smaller than what reuseBuf can store
+ if bufLen > 0 && reuseBufLen >= bufLen {
+ n, _ := r.buf.Read(r.reuseBuf)
+ r.buf.Write(r.reuseBuf[0:n])
+ // Take action if the buffer has been reused too many
+ // times and if there's data in the buffer.
+ // The timeout is also used as means to avoid doing
+ // these operations more often or less often than
+ // required.
+ // The various conditions try to detect heavy activity
+ // in the buffer which might be indicators of heavy
+ // growth of the buffer.
+ } else if reuseCount >= r.maxReuse && bufLen > 0 {
+ now = time.Now()
+ duration = now.Sub(lastReset)
+ timeoutReached := duration >= r.resetTimeout
+
+ // The timeout has been reached and the
+ // buffered data couldn't be moved to the front
+ // of the buffer, so the buffer gets reset.
+ if timeoutReached && bufLen > reuseBufLen {
+ reset = true
+ }
+ // The amount of buffered data is too high now,
+ // reset the buffer.
+ if timeoutReached && maxBufLen >= r.bufLenResetThreshold {
+ reset = true
+ }
+ // Reset the buffer if a certain amount of
+ // data has gone through the buffer since the
+ // last reset.
+ if timeoutReached && dataSinceReset >= r.maxReadDataReset {
+ reset = true
+ }
+ // The buffered data is moved to a fresh buffer,
+ // swap the old buffer with the new one and
+ // reset all counters.
+ if reset {
+ newbuf := &bytes.Buffer{}
+ newbuf.ReadFrom(r.buf)
+ r.buf = newbuf
+ lastReset = now
+ reset = false
+ dataSinceReset = 0
+ maxBufLen = 0
+ reuseCount = 0
+ }
+ }
+ if err != nil {
+ r.err = err
+ } else {
+ r.buf.Write(r.drainBuf[0:n])
+ }
+ reuseCount++
+ r.wait.Signal()
+ r.Unlock()
+ callSchedulerIfNecessary()
+ if err != nil {
+ break
+ }
+ }
+}
+
+func (r *bufReader) Read(p []byte) (n int, err error) {
+ r.Lock()
+ defer r.Unlock()
+ for {
+ n, err = r.buf.Read(p)
+ if n > 0 {
+ return n, err
+ }
+ if r.err != nil {
+ return 0, r.err
+ }
+ r.wait.Wait()
+ }
+}
+
+func (r *bufReader) Close() error {
+ closer, ok := r.reader.(io.ReadCloser)
+ if !ok {
+ return nil
+ }
+ return closer.Close()
+}
+
+func HashData(src io.Reader) (string, error) {
+ h := sha256.New()
+ if _, err := io.Copy(h, src); err != nil {
+ return "", err
+ }
+ return "sha256:" + hex.EncodeToString(h.Sum(nil)), nil
+}
+
+type OnEOFReader struct {
+ Rc io.ReadCloser
+ Fn func()
+}
+
+func (r *OnEOFReader) Read(p []byte) (n int, err error) {
+ n, err = r.Rc.Read(p)
+ if err == io.EOF {
+ r.runFunc()
+ }
+ return
+}
+
+func (r *OnEOFReader) Close() error {
+ err := r.Rc.Close()
+ r.runFunc()
+ return err
+}
+
+func (r *OnEOFReader) runFunc() {
+ if fn := r.Fn; fn != nil {
+ fn()
+ r.Fn = nil
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/ioutils/readers_test.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/ioutils/readers_test.go
new file mode 100644
index 0000000..0a39b6e
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/ioutils/readers_test.go
@@ -0,0 +1,216 @@
+package ioutils
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "strings"
+ "testing"
+)
+
+// Implement io.Reader
+type errorReader struct{}
+
+func (r *errorReader) Read(p []byte) (int, error) {
+ return 0, fmt.Errorf("Error reader always fail.")
+}
+
+func TestReadCloserWrapperClose(t *testing.T) {
+ reader := strings.NewReader("A string reader")
+ wrapper := NewReadCloserWrapper(reader, func() error {
+ return fmt.Errorf("This will be called when closing")
+ })
+ err := wrapper.Close()
+ if err == nil || !strings.Contains(err.Error(), "This will be called when closing") {
+ t.Fatalf("readCloserWrapper should have call the anonymous func and thus, fail.")
+ }
+}
+
+func TestReaderErrWrapperReadOnError(t *testing.T) {
+ called := false
+ reader := &errorReader{}
+ wrapper := NewReaderErrWrapper(reader, func() {
+ called = true
+ })
+ _, err := wrapper.Read([]byte{})
+ if err == nil || !strings.Contains(err.Error(), "Error reader always fail.") {
+ t.Fatalf("readErrWrapper should returned an error")
+ }
+ if !called {
+ t.Fatalf("readErrWrapper should have call the anonymous function on failure")
+ }
+}
+
+func TestReaderErrWrapperRead(t *testing.T) {
+ reader := strings.NewReader("a string reader.")
+ wrapper := NewReaderErrWrapper(reader, func() {
+ t.Fatalf("readErrWrapper should not have called the anonymous function")
+ })
+ // Read 20 byte (should be ok with the string above)
+ num, err := wrapper.Read(make([]byte, 20))
+ if err != nil {
+ t.Fatal(err)
+ }
+ if num != 16 {
+ t.Fatalf("readerErrWrapper should have read 16 byte, but read %d", num)
+ }
+}
+
+func TestNewBufReaderWithDrainbufAndBuffer(t *testing.T) {
+ reader, writer := io.Pipe()
+
+ drainBuffer := make([]byte, 1024)
+ buffer := bytes.Buffer{}
+ bufreader := NewBufReaderWithDrainbufAndBuffer(reader, drainBuffer, &buffer)
+
+ // Write everything down to a Pipe
+ // Usually, a pipe should block but because of the buffered reader,
+ // the writes will go through
+ done := make(chan bool)
+ go func() {
+ writer.Write([]byte("hello world"))
+ writer.Close()
+ done <- true
+ }()
+
+ // Drain the reader *after* everything has been written, just to verify
+ // it is indeed buffering
+ <-done
+
+ output, err := ioutil.ReadAll(bufreader)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !bytes.Equal(output, []byte("hello world")) {
+ t.Error(string(output))
+ }
+}
+
+func TestBufReader(t *testing.T) {
+ reader, writer := io.Pipe()
+ bufreader := NewBufReader(reader)
+
+ // Write everything down to a Pipe
+ // Usually, a pipe should block but because of the buffered reader,
+ // the writes will go through
+ done := make(chan bool)
+ go func() {
+ writer.Write([]byte("hello world"))
+ writer.Close()
+ done <- true
+ }()
+
+ // Drain the reader *after* everything has been written, just to verify
+ // it is indeed buffering
+ <-done
+ output, err := ioutil.ReadAll(bufreader)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !bytes.Equal(output, []byte("hello world")) {
+ t.Error(string(output))
+ }
+}
+
+func TestBufReaderCloseWithNonReaderCloser(t *testing.T) {
+ reader := strings.NewReader("buffer")
+ bufreader := NewBufReader(reader)
+
+ if err := bufreader.Close(); err != nil {
+ t.Fatal(err)
+ }
+
+}
+
+// implements io.ReadCloser
+type simpleReaderCloser struct{}
+
+func (r *simpleReaderCloser) Read(p []byte) (n int, err error) {
+ return 0, nil
+}
+
+func (r *simpleReaderCloser) Close() error {
+ return nil
+}
+
+func TestBufReaderCloseWithReaderCloser(t *testing.T) {
+ reader := &simpleReaderCloser{}
+ bufreader := NewBufReader(reader)
+
+ err := bufreader.Close()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+}
+
+func TestHashData(t *testing.T) {
+ reader := strings.NewReader("hash-me")
+ actual, err := HashData(reader)
+ if err != nil {
+ t.Fatal(err)
+ }
+ expected := "sha256:4d11186aed035cc624d553e10db358492c84a7cd6b9670d92123c144930450aa"
+ if actual != expected {
+ t.Fatalf("Expecting %s, got %s", expected, actual)
+ }
+}
+
+type repeatedReader struct {
+ readCount int
+ maxReads int
+ data []byte
+}
+
+func newRepeatedReader(max int, data []byte) *repeatedReader {
+ return &repeatedReader{0, max, data}
+}
+
+func (r *repeatedReader) Read(p []byte) (int, error) {
+ if r.readCount >= r.maxReads {
+ return 0, io.EOF
+ }
+ r.readCount++
+ n := copy(p, r.data)
+ return n, nil
+}
+
+func testWithData(data []byte, reads int) {
+ reader := newRepeatedReader(reads, data)
+ bufReader := NewBufReader(reader)
+ io.Copy(ioutil.Discard, bufReader)
+}
+
+func Benchmark1M10BytesReads(b *testing.B) {
+ reads := 1000000
+ readSize := int64(10)
+ data := make([]byte, readSize)
+ b.SetBytes(readSize * int64(reads))
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ testWithData(data, reads)
+ }
+}
+
+func Benchmark1M1024BytesReads(b *testing.B) {
+ reads := 1000000
+ readSize := int64(1024)
+ data := make([]byte, readSize)
+ b.SetBytes(readSize * int64(reads))
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ testWithData(data, reads)
+ }
+}
+
+func Benchmark10k32KBytesReads(b *testing.B) {
+ reads := 10000
+ readSize := int64(32 * 1024)
+ data := make([]byte, readSize)
+ b.SetBytes(readSize * int64(reads))
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ testWithData(data, reads)
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/ioutils/scheduler.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/ioutils/scheduler.go
new file mode 100644
index 0000000..3c88f29
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/ioutils/scheduler.go
@@ -0,0 +1,6 @@
+// +build !gccgo
+
+package ioutils
+
+func callSchedulerIfNecessary() {
+}
diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/ioutils/scheduler_gccgo.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/ioutils/scheduler_gccgo.go
new file mode 100644
index 0000000..c11d02b
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/ioutils/scheduler_gccgo.go
@@ -0,0 +1,13 @@
+// +build gccgo
+
+package ioutils
+
+import (
+ "runtime"
+)
+
+func callSchedulerIfNecessary() {
+ //allow or force Go scheduler to switch context, without explicitly
+ //forcing this will make it hang when using gccgo implementation
+ runtime.Gosched()
+}
diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/ioutils/writeflusher.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/ioutils/writeflusher.go
new file mode 100644
index 0000000..2509547
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/ioutils/writeflusher.go
@@ -0,0 +1,47 @@
+package ioutils
+
+import (
+ "io"
+ "net/http"
+ "sync"
+)
+
+type WriteFlusher struct {
+ sync.Mutex
+ w io.Writer
+ flusher http.Flusher
+ flushed bool
+}
+
+func (wf *WriteFlusher) Write(b []byte) (n int, err error) {
+ wf.Lock()
+ defer wf.Unlock()
+ n, err = wf.w.Write(b)
+ wf.flushed = true
+ wf.flusher.Flush()
+ return n, err
+}
+
+// Flush the stream immediately.
+func (wf *WriteFlusher) Flush() {
+ wf.Lock()
+ defer wf.Unlock()
+ wf.flushed = true
+ wf.flusher.Flush()
+}
+
+func (wf *WriteFlusher) Flushed() bool {
+ wf.Lock()
+ defer wf.Unlock()
+ return wf.flushed
+}
+
+func NewWriteFlusher(w io.Writer) *WriteFlusher {
+ var flusher http.Flusher
+ if f, ok := w.(http.Flusher); ok {
+ flusher = f
+ } else {
+ flusher = &NopFlusher{}
+ }
+ return &WriteFlusher{w: w, flusher: flusher}
+}
diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/ioutils/writers.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/ioutils/writers.go
new file mode 100644
index 0000000..43fdc44
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/ioutils/writers.go
@@ -0,0 +1,60 @@
+package ioutils
+
+import "io"
+
+type NopWriter struct{}
+
+func (*NopWriter) Write(buf []byte) (int, error) {
+ return len(buf), nil
+}
+
+type nopWriteCloser struct {
+ io.Writer
+}
+
+func (w *nopWriteCloser) Close() error { return nil }
+
+func NopWriteCloser(w io.Writer) io.WriteCloser {
+ return &nopWriteCloser{w}
+}
+
+type NopFlusher struct{}
+
+func (f *NopFlusher) Flush() {}
+
+type writeCloserWrapper struct {
+ io.Writer
+ closer func() error
+}
+
+func (r *writeCloserWrapper) Close() error {
+ return r.closer()
+}
+
+func NewWriteCloserWrapper(r io.Writer, closer func() error) io.WriteCloser {
+ return &writeCloserWrapper{
+ Writer: r,
+ closer: closer,
+ }
+}
+
+// Wrap a concrete io.Writer and hold a count of the number
+// of bytes written to the writer during a "session".
+// This can be convenient when write return is masked
+// (e.g., json.Encoder.Encode())
+type WriteCounter struct {
+ Count int64
+ Writer io.Writer
+}
+
+func NewWriteCounter(w io.Writer) *WriteCounter {
+ return &WriteCounter{
+ Writer: w,
+ }
+}
+
+func (wc *WriteCounter) Write(p []byte) (count int, err error) {
+ count, err = wc.Writer.Write(p)
+ wc.Count += int64(count)
+ return
+}
diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/ioutils/writers_test.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/ioutils/writers_test.go
new file mode 100644
index 0000000..564b1cd
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/ioutils/writers_test.go
@@ -0,0 +1,65 @@
+package ioutils
+
+import (
+ "bytes"
+ "strings"
+ "testing"
+)
+
+func TestWriteCloserWrapperClose(t *testing.T) {
+ called := false
+ writer := bytes.NewBuffer([]byte{})
+ wrapper := NewWriteCloserWrapper(writer, func() error {
+ called = true
+ return nil
+ })
+ if err := wrapper.Close(); err != nil {
+ t.Fatal(err)
+ }
+ if !called {
+ t.Fatalf("writeCloserWrapper should have call the anonymous function.")
+ }
+}
+
+func TestNopWriteCloser(t *testing.T) {
+ writer := bytes.NewBuffer([]byte{})
+ wrapper := NopWriteCloser(writer)
+ if err := wrapper.Close(); err != nil {
+ t.Fatal("NopWriteCloser always return nil on Close.")
+ }
+
+}
+
+func TestNopWriter(t *testing.T) {
+ nw := &NopWriter{}
+ l, err := nw.Write([]byte{'c'})
+ if err != nil {
+ t.Fatal(err)
+ }
+ if l != 1 {
+ t.Fatalf("Expected 1 got %d", l)
+ }
+}
+
+func TestWriteCounter(t *testing.T) {
+ dummy1 := "This is a dummy string."
+ dummy2 := "This is another dummy string."
+ totalLength := int64(len(dummy1) + len(dummy2))
+
+ reader1 := strings.NewReader(dummy1)
+ reader2 := strings.NewReader(dummy2)
+
+ var buffer bytes.Buffer
+ wc := NewWriteCounter(&buffer)
+
+ reader1.WriteTo(wc)
+ reader2.WriteTo(wc)
+
+ if wc.Count != totalLength {
+ t.Errorf("Wrong count: %d vs. %d", wc.Count, totalLength)
+ }
+
+ if buffer.String() != dummy1+dummy2 {
+ t.Error("Wrong message written")
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/pools/pools.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/pools/pools.go
new file mode 100644
index 0000000..76e84f9
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/pools/pools.go
@@ -0,0 +1,119 @@
+// Package pools provides a collection of pools which provide various
+// data types with buffers. These can be used to lower the number of
+// memory allocations and reuse buffers.
+//
+// New pools should be added to this package to allow them to be
+// shared across packages.
+//
+// Utility functions which operate on pools should be added to this
+// package to allow them to be reused.
+package pools
+
+import (
+ "bufio"
+ "io"
+ "sync"
+
+ "github.com/docker/docker/pkg/ioutils"
+)
+
+var (
+ // BufioReader32KPool is a pool which returns bufio.Reader with a 32K buffer.
+ BufioReader32KPool *BufioReaderPool
+ // BufioWriter32KPool is a pool which returns bufio.Writer with a 32K buffer.
+ BufioWriter32KPool *BufioWriterPool
+)
+
+const buffer32K = 32 * 1024
+
+// BufioReaderPool is a bufio reader that uses sync.Pool.
+type BufioReaderPool struct {
+ pool sync.Pool
+}
+
+func init() {
+ BufioReader32KPool = newBufioReaderPoolWithSize(buffer32K)
+ BufioWriter32KPool = newBufioWriterPoolWithSize(buffer32K)
+}
+
+// newBufioReaderPoolWithSize is unexported because new pools should be
+// added here to be shared where required.
+func newBufioReaderPoolWithSize(size int) *BufioReaderPool {
+ pool := sync.Pool{
+ New: func() interface{} { return bufio.NewReaderSize(nil, size) },
+ }
+ return &BufioReaderPool{pool: pool}
+}
+
+// Get returns a bufio.Reader which reads from r. The buffer size is that of the pool.
+func (bufPool *BufioReaderPool) Get(r io.Reader) *bufio.Reader {
+ buf := bufPool.pool.Get().(*bufio.Reader)
+ buf.Reset(r)
+ return buf
+}
+
+// Put puts the bufio.Reader back into the pool.
+func (bufPool *BufioReaderPool) Put(b *bufio.Reader) {
+ b.Reset(nil)
+ bufPool.pool.Put(b)
+}
+
+// Copy is a convenience wrapper which uses a buffer to avoid allocation in io.Copy.
+func Copy(dst io.Writer, src io.Reader) (written int64, err error) {
+ buf := BufioReader32KPool.Get(src)
+ written, err = io.Copy(dst, buf)
+ BufioReader32KPool.Put(buf)
+ return
+}
+
+// NewReadCloserWrapper returns a wrapper which puts the bufio.Reader back
+// into the pool and closes the reader if it's an io.ReadCloser.
+func (bufPool *BufioReaderPool) NewReadCloserWrapper(buf *bufio.Reader, r io.Reader) io.ReadCloser {
+ return ioutils.NewReadCloserWrapper(r, func() error {
+ if readCloser, ok := r.(io.ReadCloser); ok {
+ readCloser.Close()
+ }
+ bufPool.Put(buf)
+ return nil
+ })
+}
+
+// BufioWriterPool is a bufio writer that uses sync.Pool.
+type BufioWriterPool struct {
+ pool sync.Pool
+}
+
+// newBufioWriterPoolWithSize is unexported because new pools should be
+// added here to be shared where required.
+func newBufioWriterPoolWithSize(size int) *BufioWriterPool {
+ pool := sync.Pool{
+ New: func() interface{} { return bufio.NewWriterSize(nil, size) },
+ }
+ return &BufioWriterPool{pool: pool}
+}
+
+// Get returns a bufio.Writer which writes to w. The buffer size is that of the pool.
+func (bufPool *BufioWriterPool) Get(w io.Writer) *bufio.Writer {
+ buf := bufPool.pool.Get().(*bufio.Writer)
+ buf.Reset(w)
+ return buf
+}
+
+// Put puts the bufio.Writer back into the pool.
+func (bufPool *BufioWriterPool) Put(b *bufio.Writer) {
+ b.Reset(nil)
+ bufPool.pool.Put(b)
+}
+
+// NewWriteCloserWrapper returns a wrapper which puts the bufio.Writer back
+// into the pool and closes the writer if it's an io.Writecloser.
+func (bufPool *BufioWriterPool) NewWriteCloserWrapper(buf *bufio.Writer, w io.Writer) io.WriteCloser {
+ return ioutils.NewWriteCloserWrapper(w, func() error {
+ buf.Flush()
+ if writeCloser, ok := w.(io.WriteCloser); ok {
+ writeCloser.Close()
+ }
+ bufPool.Put(buf)
+ return nil
+ })
+}
diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/pools/pools_test.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/pools/pools_test.go
new file mode 100644
index 0000000..7868980
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/pools/pools_test.go
@@ -0,0 +1,162 @@
+package pools
+
+import (
+ "bufio"
+ "bytes"
+ "io"
+ "strings"
+ "testing"
+)
+
+func TestBufioReaderPoolGetWithNoReaderShouldCreateOne(t *testing.T) {
+ reader := BufioReader32KPool.Get(nil)
+ if reader == nil {
+ t.Fatalf("BufioReaderPool should have create a bufio.Reader but did not.")
+ }
+}
+
+func TestBufioReaderPoolPutAndGet(t *testing.T) {
+ sr := bufio.NewReader(strings.NewReader("foobar"))
+ reader := BufioReader32KPool.Get(sr)
+ if reader == nil {
+ t.Fatalf("BufioReaderPool should not return a nil reader.")
+ }
+ // verify the first 3 byte
+ buf1 := make([]byte, 3)
+ _, err := reader.Read(buf1)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if actual := string(buf1); actual != "foo" {
+ t.Fatalf("The first letter should have been 'foo' but was %v", actual)
+ }
+ BufioReader32KPool.Put(reader)
+ // Try to read the next 3 bytes
+ _, err = sr.Read(make([]byte, 3))
+ if err == nil || err != io.EOF {
+ t.Fatalf("The buffer should have been empty, issue an EOF error.")
+ }
+}
+
+type simpleReaderCloser struct {
+ io.Reader
+ closed bool
+}
+
+func (r *simpleReaderCloser) Close() error {
+ r.closed = true
+ return nil
+}
+
+func TestNewReadCloserWrapperWithAReadCloser(t *testing.T) {
+ br := bufio.NewReader(strings.NewReader(""))
+ sr := &simpleReaderCloser{
+ Reader: strings.NewReader("foobar"),
+ closed: false,
+ }
+ reader := BufioReader32KPool.NewReadCloserWrapper(br, sr)
+ if reader == nil {
+ t.Fatalf("NewReadCloserWrapper should not return a nil reader.")
+ }
+ // Verify the content of reader
+ buf := make([]byte, 3)
+ _, err := reader.Read(buf)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if actual := string(buf); actual != "foo" {
+ t.Fatalf("The first 3 letter should have been 'foo' but were %v", actual)
+ }
+ reader.Close()
+ // Read 3 more bytes "bar"
+ _, err = reader.Read(buf)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if actual := string(buf); actual != "bar" {
+ t.Fatalf("The first 3 letter should have been 'bar' but were %v", actual)
+ }
+ if !sr.closed {
+ t.Fatalf("The ReaderCloser should have been closed, it is not.")
+ }
+}
+
+func TestBufioWriterPoolGetWithNoReaderShouldCreateOne(t *testing.T) {
+ writer := BufioWriter32KPool.Get(nil)
+ if writer == nil {
+ t.Fatalf("BufioWriterPool should have create a bufio.Writer but did not.")
+ }
+}
+
+func TestBufioWriterPoolPutAndGet(t *testing.T) {
+ buf := new(bytes.Buffer)
+ bw := bufio.NewWriter(buf)
+ writer := BufioWriter32KPool.Get(bw)
+ if writer == nil {
+ t.Fatalf("BufioReaderPool should not return a nil writer.")
+ }
+ written, err := writer.Write([]byte("foobar"))
+ if err != nil {
+ t.Fatal(err)
+ }
+ if written != 6 {
+ t.Fatalf("Should have written 6 bytes, but wrote %v bytes", written)
+ }
+ // Make sure we Flush all the way ?
+ writer.Flush()
+ bw.Flush()
+ if len(buf.Bytes()) != 6 {
+ t.Fatalf("The buffer should contain 6 bytes ('foobar') but contains %v ('%v')", buf.Bytes(), string(buf.Bytes()))
+ }
+ // Reset the buffer
+ buf.Reset()
+ BufioWriter32KPool.Put(writer)
+ // Try to write something
+ written, err = writer.Write([]byte("barfoo"))
+ if err != nil {
+ t.Fatal(err)
+ }
+ // If we now try to flush it, it should panic (the writer is nil)
+ // recover it
+ defer func() {
+ if r := recover(); r == nil {
+ t.Fatal("Trying to flush the writter should have 'paniced', did not.")
+ }
+ }()
+ writer.Flush()
+}
+
+type simpleWriterCloser struct {
+ io.Writer
+ closed bool
+}
+
+func (r *simpleWriterCloser) Close() error {
+ r.closed = true
+ return nil
+}
+
+func TestNewWriteCloserWrapperWithAWriteCloser(t *testing.T) {
+ buf := new(bytes.Buffer)
+ bw := bufio.NewWriter(buf)
+ sw := &simpleWriterCloser{
+ Writer: new(bytes.Buffer),
+ closed: false,
+ }
+ bw.Flush()
+ writer := BufioWriter32KPool.NewWriteCloserWrapper(bw, sw)
+ if writer == nil {
+ t.Fatalf("BufioReaderPool should not return a nil writer.")
+ }
+ written, err := writer.Write([]byte("foobar"))
+ if err != nil {
+ t.Fatal(err)
+ }
+ if written != 6 {
+ t.Fatalf("Should have written 6 bytes, but wrote %v bytes", written)
+ }
+ writer.Close()
+ if !sw.closed {
+ t.Fatalf("The ReaderCloser should have been closed, it is not.")
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/promise/promise.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/promise/promise.go
new file mode 100644
index 0000000..dd52b90
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/promise/promise.go
@@ -0,0 +1,11 @@
+package promise
+
+// Go is a basic promise implementation: it wraps calls a function in a goroutine,
+// and returns a channel which will later return the function's return value.
+func Go(f func() error) chan error {
+ ch := make(chan error, 1)
+ go func() {
+ ch <- f()
+ }()
+ return ch
+}
diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/stdcopy/stdcopy.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/stdcopy/stdcopy.go
new file mode 100644
index 0000000..684b4d4
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/stdcopy/stdcopy.go
@@ -0,0 +1,168 @@
+package stdcopy
+
+import (
+ "encoding/binary"
+ "errors"
+ "io"
+
+ "github.com/Sirupsen/logrus"
+)
+
+const (
+ StdWriterPrefixLen = 8
+ StdWriterFdIndex = 0
+ StdWriterSizeIndex = 4
+)
+
+type StdType [StdWriterPrefixLen]byte
+
+var (
+ Stdin StdType = StdType{0: 0}
+ Stdout StdType = StdType{0: 1}
+ Stderr StdType = StdType{0: 2}
+)
+
+type StdWriter struct {
+ io.Writer
+ prefix StdType
+ sizeBuf []byte
+}
+
+func (w *StdWriter) Write(buf []byte) (n int, err error) {
+ var n1, n2 int
+ if w == nil || w.Writer == nil {
+ return 0, errors.New("Writer not instantiated")
+ }
+ binary.BigEndian.PutUint32(w.prefix[4:], uint32(len(buf)))
+ n1, err = w.Writer.Write(w.prefix[:])
+ if err != nil {
+ n = n1 - StdWriterPrefixLen
+ } else {
+ n2, err = w.Writer.Write(buf)
+ n = n1 + n2 - StdWriterPrefixLen
+ }
+ if n < 0 {
+ n = 0
+ }
+ return
+}
+
+// NewStdWriter instantiates a new Writer.
+// Everything written to it will be encapsulated using a custom format,
+// and written to the underlying `w` stream.
+// This allows multiple write streams (e.g. stdout and stderr) to be muxed into a single connection.
+// `t` indicates the id of the stream to encapsulate.
+// It can be stdcopy.Stdin, stdcopy.Stdout, stdcopy.Stderr.
+func NewStdWriter(w io.Writer, t StdType) *StdWriter {
+ return &StdWriter{
+ Writer: w,
+ prefix: t,
+ sizeBuf: make([]byte, 4),
+ }
+}
+
+var ErrInvalidStdHeader = errors.New("Unrecognized input header")
+
+// StdCopy is a modified version of io.Copy.
+//
+// StdCopy will demultiplex `src`, assuming that it contains two streams,
+// previously multiplexed together using a StdWriter instance.
+// As it reads from `src`, StdCopy will write to `dstout` and `dsterr`.
+//
+// StdCopy will read until it hits EOF on `src`. It will then return a nil error.
+// In other words: if `err` is non nil, it indicates a real underlying error.
+//
+// `written` will hold the total number of bytes written to `dstout` and `dsterr`.
+func StdCopy(dstout, dsterr io.Writer, src io.Reader) (written int64, err error) {
+ var (
+ buf = make([]byte, 32*1024+StdWriterPrefixLen+1)
+ bufLen = len(buf)
+ nr, nw int
+ er, ew error
+ out io.Writer
+ frameSize int
+ )
+
+ for {
+ // Make sure we have at least a full header
+ for nr < StdWriterPrefixLen {
+ var nr2 int
+ nr2, er = src.Read(buf[nr:])
+ nr += nr2
+ if er == io.EOF {
+ if nr < StdWriterPrefixLen {
+ logrus.Debugf("Corrupted prefix: %v", buf[:nr])
+ return written, nil
+ }
+ break
+ }
+ if er != nil {
+ logrus.Debugf("Error reading header: %s", er)
+ return 0, er
+ }
+ }
+
+ // Check the first byte to know where to write
+ switch buf[StdWriterFdIndex] {
+ case 0:
+ fallthrough
+ case 1:
+ // Write on stdout
+ out = dstout
+ case 2:
+ // Write on stderr
+ out = dsterr
+ default:
+ logrus.Debugf("Error selecting output fd: (%d)", buf[StdWriterFdIndex])
+ return 0, ErrInvalidStdHeader
+ }
+
+ // Retrieve the size of the frame
+ frameSize = int(binary.BigEndian.Uint32(buf[StdWriterSizeIndex : StdWriterSizeIndex+4]))
+ logrus.Debugf("framesize: %d", frameSize)
+
+ // Check if the buffer is big enough to read the frame.
+ // Extend it if necessary.
+ if frameSize+StdWriterPrefixLen > bufLen {
+ logrus.Debugf("Extending buffer cap by %d (was %d)", frameSize+StdWriterPrefixLen-bufLen+1, len(buf))
+ buf = append(buf, make([]byte, frameSize+StdWriterPrefixLen-bufLen+1)...)
+ bufLen = len(buf)
+ }
+
+ // While the amount of bytes read is less than the size of the frame + header, we keep reading
+ for nr < frameSize+StdWriterPrefixLen {
+ var nr2 int
+ nr2, er = src.Read(buf[nr:])
+ nr += nr2
+ if er == io.EOF {
+ if nr < frameSize+StdWriterPrefixLen {
+ logrus.Debugf("Corrupted frame: %v", buf[StdWriterPrefixLen:nr])
+ return written, nil
+ }
+ break
+ }
+ if er != nil {
+ logrus.Debugf("Error reading frame: %s", er)
+ return 0, er
+ }
+ }
+
+ // Write the retrieved frame (without header)
+ nw, ew = out.Write(buf[StdWriterPrefixLen : frameSize+StdWriterPrefixLen])
+ if ew != nil {
+ logrus.Debugf("Error writing frame: %s", ew)
+ return 0, ew
+ }
+ // If the frame has not been fully written: error
+ if nw != frameSize {
+ logrus.Debugf("Error Short Write: (%d on %d)", nw, frameSize)
+ return 0, io.ErrShortWrite
+ }
+ written += int64(nw)
+
+ // Move the rest of the buffer to the beginning
+ copy(buf, buf[frameSize+StdWriterPrefixLen:])
+ // Move the index
+ nr -= frameSize + StdWriterPrefixLen
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/stdcopy/stdcopy_test.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/stdcopy/stdcopy_test.go
new file mode 100644
index 0000000..a9fd73a
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/stdcopy/stdcopy_test.go
@@ -0,0 +1,85 @@
+package stdcopy
+
+import (
+ "bytes"
+ "io/ioutil"
+ "strings"
+ "testing"
+)
+
+func TestNewStdWriter(t *testing.T) {
+ writer := NewStdWriter(ioutil.Discard, Stdout)
+ if writer == nil {
+ t.Fatalf("NewStdWriter with an invalid StdType should not return nil.")
+ }
+}
+
+func TestWriteWithUnitializedStdWriter(t *testing.T) {
+ writer := StdWriter{
+ Writer: nil,
+ prefix: Stdout,
+ sizeBuf: make([]byte, 4),
+ }
+ n, err := writer.Write([]byte("Something here"))
+ if n != 0 || err == nil {
+ t.Fatalf("Should fail when given an uncomplete or uninitialized StdWriter")
+ }
+}
+
+func TestWriteWithNilBytes(t *testing.T) {
+ writer := NewStdWriter(ioutil.Discard, Stdout)
+ n, err := writer.Write(nil)
+ if err != nil {
+ t.Fatalf("Shouldn't have fail when given no data")
+ }
+ if n > 0 {
+ t.Fatalf("Write should have written 0 byte, but has written %d", n)
+ }
+}
+
+func TestWrite(t *testing.T) {
+ writer := NewStdWriter(ioutil.Discard, Stdout)
+ data := []byte("Test StdWrite.Write")
+ n, err := writer.Write(data)
+ if err != nil {
+ t.Fatalf("Error while writing with StdWrite")
+ }
+ if n != len(data) {
+ t.Fatalf("Write should have writen %d byte but wrote %d.", len(data), n)
+ }
+}
+
+func TestStdCopyWithInvalidInputHeader(t *testing.T) {
+ dstOut := NewStdWriter(ioutil.Discard, Stdout)
+ dstErr := NewStdWriter(ioutil.Discard, Stderr)
+ src := strings.NewReader("Invalid input")
+ _, err := StdCopy(dstOut, dstErr, src)
+ if err == nil {
+ t.Fatal("StdCopy with invalid input header should fail.")
+ }
+}
+
+func TestStdCopyWithCorruptedPrefix(t *testing.T) {
+ data := []byte{0x01, 0x02, 0x03}
+ src := bytes.NewReader(data)
+ written, err := StdCopy(nil, nil, src)
+ if err != nil {
+ t.Fatalf("StdCopy should not return an error with corrupted prefix.")
+ }
+ if written != 0 {
+ t.Fatalf("StdCopy should have written 0, but has written %d", written)
+ }
+}
+
+func BenchmarkWrite(b *testing.B) {
+ w := NewStdWriter(ioutil.Discard, Stdout)
+ data := []byte("Test line for testing stdwriter performance\n")
+ data = bytes.Repeat(data, 100)
+ b.SetBytes(int64(len(data)))
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ if _, err := w.Write(data); err != nil {
+ b.Fatal(err)
+ }
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/errors.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/errors.go
new file mode 100644
index 0000000..6304518
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/errors.go
@@ -0,0 +1,9 @@
+package system
+
+import (
+ "errors"
+)
+
+var (
+ ErrNotSupportedPlatform = errors.New("platform and architecture is not supported")
+)
diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/events_windows.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/events_windows.go
new file mode 100644
index 0000000..23f7c61
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/events_windows.go
@@ -0,0 +1,83 @@
+package system
+
+// This file implements syscalls for Win32 events which are not implemented
+// in golang.
+
+import (
+ "syscall"
+ "unsafe"
+)
+
+const (
+ EVENT_ALL_ACCESS = 0x1F0003
+ EVENT_MODIFY_STATUS = 0x0002
+)
+
+var (
+ procCreateEvent = modkernel32.NewProc("CreateEventW")
+ procOpenEvent = modkernel32.NewProc("OpenEventW")
+ procSetEvent = modkernel32.NewProc("SetEvent")
+ procResetEvent = modkernel32.NewProc("ResetEvent")
+ procPulseEvent = modkernel32.NewProc("PulseEvent")
+)
+
+func CreateEvent(eventAttributes *syscall.SecurityAttributes, manualReset bool, initialState bool, name string) (handle syscall.Handle, err error) {
+ namep, _ := syscall.UTF16PtrFromString(name)
+ var _p1 uint32 = 0
+ if manualReset {
+ _p1 = 1
+ }
+ var _p2 uint32 = 0
+ if initialState {
+ _p2 = 1
+ }
+ r0, _, e1 := procCreateEvent.Call(uintptr(unsafe.Pointer(eventAttributes)), uintptr(_p1), uintptr(_p2), uintptr(unsafe.Pointer(namep)))
+ use(unsafe.Pointer(namep))
+ handle = syscall.Handle(r0)
+ if handle == syscall.InvalidHandle {
+ err = e1
+ }
+ return
+}
+
+func OpenEvent(desiredAccess uint32, inheritHandle bool, name string) (handle syscall.Handle, err error) {
+ namep, _ := syscall.UTF16PtrFromString(name)
+ var _p1 uint32 = 0
+ if inheritHandle {
+ _p1 = 1
+ }
+ r0, _, e1 := procOpenEvent.Call(uintptr(desiredAccess), uintptr(_p1), uintptr(unsafe.Pointer(namep)))
+ use(unsafe.Pointer(namep))
+ handle = syscall.Handle(r0)
+ if handle == syscall.InvalidHandle {
+ err = e1
+ }
+ return
+}
+
+func SetEvent(handle syscall.Handle) (err error) {
+ return setResetPulse(handle, procSetEvent)
+}
+
+func ResetEvent(handle syscall.Handle) (err error) {
+ return setResetPulse(handle, procResetEvent)
+}
+
+func PulseEvent(handle syscall.Handle) (err error) {
+ return setResetPulse(handle, procPulseEvent)
+}
+
+func setResetPulse(handle syscall.Handle, proc *syscall.LazyProc) (err error) {
+ r0, _, _ := proc.Call(uintptr(handle))
+ if r0 != 0 {
+ err = syscall.Errno(r0)
+ }
+ return
+}
+
+var temp unsafe.Pointer
+
+// use ensures a variable is kept alive without the GC freeing while still needed
+func use(p unsafe.Pointer) {
+ temp = p
+}
diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/filesys.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/filesys.go
new file mode 100644
index 0000000..e1f70e8
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/filesys.go
@@ -0,0 +1,11 @@
+// +build !windows
+
+package system
+
+import (
+ "os"
+)
+
+func MkdirAll(path string, perm os.FileMode) error {
+ return os.MkdirAll(path, perm)
+}
diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/filesys_windows.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/filesys_windows.go
new file mode 100644
index 0000000..90b5006
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/filesys_windows.go
@@ -0,0 +1,64 @@
+// +build windows
+
+package system
+
+import (
+ "os"
+ "regexp"
+ "syscall"
+)
+
+// MkdirAll implementation that is volume path aware for Windows.
+func MkdirAll(path string, perm os.FileMode) error {
+ if re := regexp.MustCompile(`^\\\\\?\\Volume{[a-z0-9-]+}$`); re.MatchString(path) {
+ return nil
+ }
+
+ // The rest of this method is copied from os.MkdirAll and should be kept
+ // as-is to ensure compatibility.
+
+ // Fast path: if we can tell whether path is a directory or file, stop with success or error.
+ dir, err := os.Stat(path)
+ if err == nil {
+ if dir.IsDir() {
+ return nil
+ }
+ return &os.PathError{
+ Op: "mkdir",
+ Path: path,
+ Err: syscall.ENOTDIR,
+ }
+ }
+
+ // Slow path: make sure parent exists and then call Mkdir for path.
+ i := len(path)
+ for i > 0 && os.IsPathSeparator(path[i-1]) { // Skip trailing path separator.
+ i--
+ }
+
+ j := i
+ for j > 0 && !os.IsPathSeparator(path[j-1]) { // Scan backward over element.
+ j--
+ }
+
+ if j > 1 {
+ // Create parent
+ err = MkdirAll(path[0:j-1], perm)
+ if err != nil {
+ return err
+ }
+ }
+
+ // Parent now exists; invoke Mkdir and use its result.
+ err = os.Mkdir(path, perm)
+ if err != nil {
+ // Handle arguments like "foo/." by
+ // double-checking that directory doesn't exist.
+ dir, err1 := os.Lstat(path)
+ if err1 == nil && dir.IsDir() {
+ return nil
+ }
+ return err
+ }
+ return nil
+}
diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/lstat.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/lstat.go
new file mode 100644
index 0000000..d0e43b3
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/lstat.go
@@ -0,0 +1,19 @@
+// +build !windows
+
+package system
+
+import (
+ "syscall"
+)
+
+// Lstat takes a path to a file and returns
+// a system.Stat_t type pertaining to that file.
+//
+// Throws an error if the file does not exist
+func Lstat(path string) (*Stat_t, error) {
+ s := &syscall.Stat_t{}
+ if err := syscall.Lstat(path, s); err != nil {
+ return nil, err
+ }
+ return fromStatT(s)
+}
diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/lstat_test.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/lstat_test.go
new file mode 100644
index 0000000..6bac492
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/lstat_test.go
@@ -0,0 +1,28 @@
+package system
+
+import (
+ "os"
+ "testing"
+)
+
+// TestLstat tests Lstat for existing and non existing files
+func TestLstat(t *testing.T) {
+ file, invalid, _, dir := prepareFiles(t)
+ defer os.RemoveAll(dir)
+
+ statFile, err := Lstat(file)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if statFile == nil {
+ t.Fatal("returned empty stat for existing file")
+ }
+
+ statInvalid, err := Lstat(invalid)
+ if err == nil {
+ t.Fatal("did not return error for non-existing file")
+ }
+ if statInvalid != nil {
+ t.Fatal("returned non-nil stat for non-existing file")
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/lstat_windows.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/lstat_windows.go
new file mode 100644
index 0000000..eee1be2
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/lstat_windows.go
@@ -0,0 +1,29 @@
+// +build windows
+
+package system
+
+import (
+ "os"
+)
+
+// Some explanation for my own sanity, and hopefully maintainers in the
+// future.
+//
+// Lstat calls os.Lstat to get a fileinfo interface back.
+// This is then copied into our own locally defined structure.
+// Note the Linux version uses fromStatT to do the copy back,
+// but that not strictly necessary when already in an OS specific module.
+
+func Lstat(path string) (*Stat_t, error) {
+ fi, err := os.Lstat(path)
+ if err != nil {
+ return nil, err
+ }
+
+ return &Stat_t{
+ name: fi.Name(),
+ size: fi.Size(),
+ mode: fi.Mode(),
+ modTime: fi.ModTime(),
+ isDir: fi.IsDir()}, nil
+}
diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/meminfo.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/meminfo.go
new file mode 100644
index 0000000..3b6e947
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/meminfo.go
@@ -0,0 +1,17 @@
+package system
+
+// MemInfo contains memory statistics of the host system.
+type MemInfo struct {
+ // Total usable RAM (i.e. physical RAM minus a few reserved bits and the
+ // kernel binary code).
+ MemTotal int64
+
+ // Amount of free memory.
+ MemFree int64
+
+ // Total amount of swap space available.
+ SwapTotal int64
+
+ // Amount of swap space that is currently unused.
+ SwapFree int64
+}
diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/meminfo_linux.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/meminfo_linux.go
new file mode 100644
index 0000000..e2ca140
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/meminfo_linux.go
@@ -0,0 +1,71 @@
+package system
+
+import (
+ "bufio"
+ "errors"
+ "io"
+ "os"
+ "strconv"
+ "strings"
+
+ "github.com/docker/docker/pkg/units"
+)
+
+var (
+ ErrMalformed = errors.New("malformed file")
+)
+
+// ReadMemInfo retrieves memory statistics of the host system and returns a
+// MemInfo type.
+func ReadMemInfo() (*MemInfo, error) {
+ file, err := os.Open("/proc/meminfo")
+ if err != nil {
+ return nil, err
+ }
+ defer file.Close()
+ return parseMemInfo(file)
+}
+
+// parseMemInfo parses the /proc/meminfo file into
+// a MemInfo object given a io.Reader to the file.
+//
+// Throws error if there are problems reading from the file
+func parseMemInfo(reader io.Reader) (*MemInfo, error) {
+ meminfo := &MemInfo{}
+ scanner := bufio.NewScanner(reader)
+ for scanner.Scan() {
+ // Expected format: ["MemTotal:", "1234", "kB"]
+ parts := strings.Fields(scanner.Text())
+
+ // Sanity checks: Skip malformed entries.
+ if len(parts) < 3 || parts[2] != "kB" {
+ continue
+ }
+
+ // Convert to bytes.
+ size, err := strconv.Atoi(parts[1])
+ if err != nil {
+ continue
+ }
+ bytes := int64(size) * units.KiB
+
+ switch parts[0] {
+ case "MemTotal:":
+ meminfo.MemTotal = bytes
+ case "MemFree:":
+ meminfo.MemFree = bytes
+ case "SwapTotal:":
+ meminfo.SwapTotal = bytes
+ case "SwapFree:":
+ meminfo.SwapFree = bytes
+ }
+
+ }
+
+ // Handle errors that may have occurred during the reading of the file.
+ if err := scanner.Err(); err != nil {
+ return nil, err
+ }
+
+ return meminfo, nil
+}
diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/meminfo_linux_test.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/meminfo_linux_test.go
new file mode 100644
index 0000000..10ddf79
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/meminfo_linux_test.go
@@ -0,0 +1,38 @@
+package system
+
+import (
+ "strings"
+ "testing"
+
+ "github.com/docker/docker/pkg/units"
+)
+
+// TestMemInfo tests parseMemInfo with a static meminfo string
+func TestMemInfo(t *testing.T) {
+ const input = `
+ MemTotal: 1 kB
+ MemFree: 2 kB
+ SwapTotal: 3 kB
+ SwapFree: 4 kB
+ Malformed1:
+ Malformed2: 1
+ Malformed3: 2 MB
+ Malformed4: X kB
+ `
+ meminfo, err := parseMemInfo(strings.NewReader(input))
+ if err != nil {
+ t.Fatal(err)
+ }
+ if meminfo.MemTotal != 1*units.KiB {
+ t.Fatalf("Unexpected MemTotal: %d", meminfo.MemTotal)
+ }
+ if meminfo.MemFree != 2*units.KiB {
+ t.Fatalf("Unexpected MemFree: %d", meminfo.MemFree)
+ }
+ if meminfo.SwapTotal != 3*units.KiB {
+ t.Fatalf("Unexpected SwapTotal: %d", meminfo.SwapTotal)
+ }
+ if meminfo.SwapFree != 4*units.KiB {
+ t.Fatalf("Unexpected SwapFree: %d", meminfo.SwapFree)
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/meminfo_unsupported.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/meminfo_unsupported.go
new file mode 100644
index 0000000..604d338
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/meminfo_unsupported.go
@@ -0,0 +1,7 @@
+// +build !linux,!windows
+
+package system
+
+func ReadMemInfo() (*MemInfo, error) {
+ return nil, ErrNotSupportedPlatform
+}
diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/meminfo_windows.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/meminfo_windows.go
new file mode 100644
index 0000000..d466425
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/meminfo_windows.go
@@ -0,0 +1,44 @@
+package system
+
+import (
+ "syscall"
+ "unsafe"
+)
+
+var (
+ modkernel32 = syscall.NewLazyDLL("kernel32.dll")
+
+ procGlobalMemoryStatusEx = modkernel32.NewProc("GlobalMemoryStatusEx")
+)
+
+// https://msdn.microsoft.com/en-us/library/windows/desktop/aa366589(v=vs.85).aspx
+// https://msdn.microsoft.com/en-us/library/windows/desktop/aa366770(v=vs.85).aspx
+type memorystatusex struct {
+ dwLength uint32
+ dwMemoryLoad uint32
+ ullTotalPhys uint64
+ ullAvailPhys uint64
+ ullTotalPageFile uint64
+ ullAvailPageFile uint64
+ ullTotalVirtual uint64
+ ullAvailVirtual uint64
+ ullAvailExtendedVirtual uint64
+}
+
+// ReadMemInfo retrieves memory statistics of the host system and returns a
+// MemInfo type.
+func ReadMemInfo() (*MemInfo, error) {
+ msi := &memorystatusex{
+ dwLength: 64,
+ }
+ r1, _, _ := procGlobalMemoryStatusEx.Call(uintptr(unsafe.Pointer(msi)))
+ if r1 == 0 {
+ return &MemInfo{}, nil
+ }
+ return &MemInfo{
+ MemTotal: int64(msi.ullTotalPhys),
+ MemFree: int64(msi.ullAvailPhys),
+ SwapTotal: int64(msi.ullTotalPageFile),
+ SwapFree: int64(msi.ullAvailPageFile),
+ }, nil
+}
diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/mknod.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/mknod.go
new file mode 100644
index 0000000..26617eb
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/mknod.go
@@ -0,0 +1,20 @@
+// +build !windows
+
+package system
+
+import (
+ "syscall"
+)
+
+// Mknod creates a filesystem node (file, device special file or named pipe) named path
+// with attributes specified by mode and dev
+func Mknod(path string, mode uint32, dev int) error {
+ return syscall.Mknod(path, mode, dev)
+}
+
+// Linux device nodes are a bit weird due to backwards compat with 16 bit device nodes.
+// They are, from low to high: the lower 8 bits of the minor, then 12 bits of the major,
+// then the top 12 bits of the minor
+func Mkdev(major int64, minor int64) uint32 {
+ return uint32(((minor & 0xfff00) << 12) | ((major & 0xfff) << 8) | (minor & 0xff))
+}
diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/mknod_windows.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/mknod_windows.go
new file mode 100644
index 0000000..1811542
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/mknod_windows.go
@@ -0,0 +1,11 @@
+// +build windows
+
+package system
+
+func Mknod(path string, mode uint32, dev int) error {
+ return ErrNotSupportedPlatform
+}
+
+func Mkdev(major int64, minor int64) uint32 {
+ panic("Mkdev not implemented on Windows.")
+}
diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/stat.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/stat.go
new file mode 100644
index 0000000..e2ecfe5
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/stat.go
@@ -0,0 +1,46 @@
+// +build !windows
+
+package system
+
+import (
+ "syscall"
+)
+
+// Stat_t type contains status of a file. It contains metadata
+// like permission, owner, group, size, etc about a file
+type Stat_t struct {
+ mode uint32
+ uid uint32
+ gid uint32
+ rdev uint64
+ size int64
+ mtim syscall.Timespec
+}
+
+func (s Stat_t) Mode() uint32 {
+ return s.mode
+}
+
+func (s Stat_t) Uid() uint32 {
+ return s.uid
+}
+
+func (s Stat_t) Gid() uint32 {
+ return s.gid
+}
+
+func (s Stat_t) Rdev() uint64 {
+ return s.rdev
+}
+
+func (s Stat_t) Size() int64 {
+ return s.size
+}
+
+func (s Stat_t) Mtim() syscall.Timespec {
+ return s.mtim
+}
+
+func (s Stat_t) GetLastModification() syscall.Timespec {
+ return s.Mtim()
+}
diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/stat_linux.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/stat_linux.go
new file mode 100644
index 0000000..80262d9
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/stat_linux.go
@@ -0,0 +1,33 @@
+package system
+
+import (
+ "syscall"
+)
+
+// fromStatT converts a syscall.Stat_t type to a system.Stat_t type
+func fromStatT(s *syscall.Stat_t) (*Stat_t, error) {
+ return &Stat_t{size: s.Size,
+ mode: s.Mode,
+ uid: s.Uid,
+ gid: s.Gid,
+ rdev: s.Rdev,
+ mtim: s.Mtim}, nil
+}
+
+// FromStatT exists only on linux, and loads a system.Stat_t from a
+// syscal.Stat_t.
+func FromStatT(s *syscall.Stat_t) (*Stat_t, error) {
+ return fromStatT(s)
+}
+
+// Stat takes a path to a file and returns
+// a system.Stat_t type pertaining to that file.
+//
+// Throws an error if the file does not exist
+func Stat(path string) (*Stat_t, error) {
+ s := &syscall.Stat_t{}
+ if err := syscall.Stat(path, s); err != nil {
+ return nil, err
+ }
+ return fromStatT(s)
+}
diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/stat_test.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/stat_test.go
new file mode 100644
index 0000000..4534129
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/stat_test.go
@@ -0,0 +1,37 @@
+package system
+
+import (
+ "os"
+ "syscall"
+ "testing"
+)
+
+// TestFromStatT tests fromStatT for a tempfile
+func TestFromStatT(t *testing.T) {
+ file, _, _, dir := prepareFiles(t)
+ defer os.RemoveAll(dir)
+
+ stat := &syscall.Stat_t{}
+ err := syscall.Lstat(file, stat)
+
+ s, err := fromStatT(stat)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if stat.Mode != s.Mode() {
+ t.Fatal("got invalid mode")
+ }
+ if stat.Uid != s.Uid() {
+ t.Fatal("got invalid uid")
+ }
+ if stat.Gid != s.Gid() {
+ t.Fatal("got invalid gid")
+ }
+ if stat.Rdev != s.Rdev() {
+ t.Fatal("got invalid rdev")
+ }
+ if stat.Mtim != s.Mtim() {
+ t.Fatal("got invalid mtim")
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/stat_unsupported.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/stat_unsupported.go
new file mode 100644
index 0000000..7e0d034
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/stat_unsupported.go
@@ -0,0 +1,17 @@
+// +build !linux,!windows
+
+package system
+
+import (
+ "syscall"
+)
+
+// fromStatT creates a system.Stat_t type from a syscall.Stat_t type
+func fromStatT(s *syscall.Stat_t) (*Stat_t, error) {
+ return &Stat_t{size: s.Size,
+ mode: uint32(s.Mode),
+ uid: s.Uid,
+ gid: s.Gid,
+ rdev: uint64(s.Rdev),
+ mtim: s.Mtimespec}, nil
+}
diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/stat_windows.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/stat_windows.go
new file mode 100644
index 0000000..b1fd39e
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/stat_windows.go
@@ -0,0 +1,36 @@
+// +build windows
+
+package system
+
+import (
+ "os"
+ "time"
+)
+
+type Stat_t struct {
+ name string
+ size int64
+ mode os.FileMode
+ modTime time.Time
+ isDir bool
+}
+
+func (s Stat_t) Name() string {
+ return s.name
+}
+
+func (s Stat_t) Size() int64 {
+ return s.size
+}
+
+func (s Stat_t) Mode() os.FileMode {
+ return s.mode
+}
+
+func (s Stat_t) ModTime() time.Time {
+ return s.modTime
+}
+
+func (s Stat_t) IsDir() bool {
+ return s.isDir
+}
diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/umask.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/umask.go
new file mode 100644
index 0000000..fddbecd
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/umask.go
@@ -0,0 +1,11 @@
+// +build !windows
+
+package system
+
+import (
+ "syscall"
+)
+
+func Umask(newmask int) (oldmask int, err error) {
+ return syscall.Umask(newmask), nil
+}
diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/umask_windows.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/umask_windows.go
new file mode 100644
index 0000000..3be563f
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/umask_windows.go
@@ -0,0 +1,8 @@
+// +build windows
+
+package system
+
+func Umask(newmask int) (oldmask int, err error) {
+ // should not be called on cli code path
+ return 0, ErrNotSupportedPlatform
+}
diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/utimes_darwin.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/utimes_darwin.go
new file mode 100644
index 0000000..4c6002f
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/utimes_darwin.go
@@ -0,0 +1,11 @@
+package system
+
+import "syscall"
+
+func LUtimesNano(path string, ts []syscall.Timespec) error {
+ return ErrNotSupportedPlatform
+}
+
+func UtimesNano(path string, ts []syscall.Timespec) error {
+ return syscall.UtimesNano(path, ts)
+}
diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/utimes_freebsd.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/utimes_freebsd.go
new file mode 100644
index 0000000..ceaa044
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/utimes_freebsd.go
@@ -0,0 +1,24 @@
+package system
+
+import (
+ "syscall"
+ "unsafe"
+)
+
+func LUtimesNano(path string, ts []syscall.Timespec) error {
+ var _path *byte
+ _path, err := syscall.BytePtrFromString(path)
+ if err != nil {
+ return err
+ }
+
+ if _, _, err := syscall.Syscall(syscall.SYS_LUTIMES, uintptr(unsafe.Pointer(_path)), uintptr(unsafe.Pointer(&ts[0])), 0); err != 0 && err != syscall.ENOSYS {
+ return err
+ }
+
+ return nil
+}
+
+func UtimesNano(path string, ts []syscall.Timespec) error {
+ return syscall.UtimesNano(path, ts)
+}
diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/utimes_linux.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/utimes_linux.go
new file mode 100644
index 0000000..8f90298
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/utimes_linux.go
@@ -0,0 +1,28 @@
+package system
+
+import (
+ "syscall"
+ "unsafe"
+)
+
+func LUtimesNano(path string, ts []syscall.Timespec) error {
+ // These are not currently available in syscall
+ AT_FDCWD := -100
+ AT_SYMLINK_NOFOLLOW := 0x100
+
+ var _path *byte
+ _path, err := syscall.BytePtrFromString(path)
+ if err != nil {
+ return err
+ }
+
+ if _, _, err := syscall.Syscall6(syscall.SYS_UTIMENSAT, uintptr(AT_FDCWD), uintptr(unsafe.Pointer(_path)), uintptr(unsafe.Pointer(&ts[0])), uintptr(AT_SYMLINK_NOFOLLOW), 0, 0); err != 0 && err != syscall.ENOSYS {
+ return err
+ }
+
+ return nil
+}
+
+func UtimesNano(path string, ts []syscall.Timespec) error {
+ return syscall.UtimesNano(path, ts)
+}
diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/utimes_test.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/utimes_test.go
new file mode 100644
index 0000000..350cce1
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/utimes_test.go
@@ -0,0 +1,66 @@
+package system
+
+import (
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "syscall"
+ "testing"
+)
+
+// prepareFiles creates files for testing in the temp directory
+func prepareFiles(t *testing.T) (string, string, string, string) {
+ dir, err := ioutil.TempDir("", "docker-system-test")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ file := filepath.Join(dir, "exist")
+ if err := ioutil.WriteFile(file, []byte("hello"), 0644); err != nil {
+ t.Fatal(err)
+ }
+
+ invalid := filepath.Join(dir, "doesnt-exist")
+
+ symlink := filepath.Join(dir, "symlink")
+ if err := os.Symlink(file, symlink); err != nil {
+ t.Fatal(err)
+ }
+
+ return file, invalid, symlink, dir
+}
+
+func TestLUtimesNano(t *testing.T) {
+ file, invalid, symlink, dir := prepareFiles(t)
+ defer os.RemoveAll(dir)
+
+ before, err := os.Stat(file)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ ts := []syscall.Timespec{{0, 0}, {0, 0}}
+ if err := LUtimesNano(symlink, ts); err != nil {
+ t.Fatal(err)
+ }
+
+ symlinkInfo, err := os.Lstat(symlink)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if before.ModTime().Unix() == symlinkInfo.ModTime().Unix() {
+ t.Fatal("The modification time of the symlink should be different")
+ }
+
+ fileInfo, err := os.Stat(file)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if before.ModTime().Unix() != fileInfo.ModTime().Unix() {
+ t.Fatal("The modification time of the file should be same")
+ }
+
+ if err := LUtimesNano(invalid, ts); err == nil {
+ t.Fatal("Doesn't return an error on a non-existing file")
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/utimes_unsupported.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/utimes_unsupported.go
new file mode 100644
index 0000000..adf2734
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/utimes_unsupported.go
@@ -0,0 +1,13 @@
+// +build !linux,!freebsd,!darwin
+
+package system
+
+import "syscall"
+
+func LUtimesNano(path string, ts []syscall.Timespec) error {
+ return ErrNotSupportedPlatform
+}
+
+func UtimesNano(path string, ts []syscall.Timespec) error {
+ return ErrNotSupportedPlatform
+}
diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/xattrs_linux.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/xattrs_linux.go
new file mode 100644
index 0000000..00edb20
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/xattrs_linux.go
@@ -0,0 +1,59 @@
+package system
+
+import (
+ "syscall"
+ "unsafe"
+)
+
+// Returns a nil slice and nil error if the xattr is not set
+func Lgetxattr(path string, attr string) ([]byte, error) {
+ pathBytes, err := syscall.BytePtrFromString(path)
+ if err != nil {
+ return nil, err
+ }
+ attrBytes, err := syscall.BytePtrFromString(attr)
+ if err != nil {
+ return nil, err
+ }
+
+ dest := make([]byte, 128)
+ destBytes := unsafe.Pointer(&dest[0])
+ sz, _, errno := syscall.Syscall6(syscall.SYS_LGETXATTR, uintptr(unsafe.Pointer(pathBytes)), uintptr(unsafe.Pointer(attrBytes)), uintptr(destBytes), uintptr(len(dest)), 0, 0)
+ if errno == syscall.ENODATA {
+ return nil, nil
+ }
+ if errno == syscall.ERANGE {
+ dest = make([]byte, sz)
+ destBytes := unsafe.Pointer(&dest[0])
+ sz, _, errno = syscall.Syscall6(syscall.SYS_LGETXATTR, uintptr(unsafe.Pointer(pathBytes)), uintptr(unsafe.Pointer(attrBytes)), uintptr(destBytes), uintptr(len(dest)), 0, 0)
+ }
+ if errno != 0 {
+ return nil, errno
+ }
+
+ return dest[:sz], nil
+}
+
+var _zero uintptr
+
+func Lsetxattr(path string, attr string, data []byte, flags int) error {
+ pathBytes, err := syscall.BytePtrFromString(path)
+ if err != nil {
+ return err
+ }
+ attrBytes, err := syscall.BytePtrFromString(attr)
+ if err != nil {
+ return err
+ }
+ var dataBytes unsafe.Pointer
+ if len(data) > 0 {
+ dataBytes = unsafe.Pointer(&data[0])
+ } else {
+ dataBytes = unsafe.Pointer(&_zero)
+ }
+ _, _, errno := syscall.Syscall6(syscall.SYS_LSETXATTR, uintptr(unsafe.Pointer(pathBytes)), uintptr(unsafe.Pointer(attrBytes)), uintptr(dataBytes), uintptr(len(data)), uintptr(flags), 0)
+ if errno != 0 {
+ return errno
+ }
+ return nil
+}
diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/xattrs_unsupported.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/xattrs_unsupported.go
new file mode 100644
index 0000000..0060c16
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/xattrs_unsupported.go
@@ -0,0 +1,11 @@
+// +build !linux
+
+package system
+
+func Lgetxattr(path string, attr string) ([]byte, error) {
+ return nil, ErrNotSupportedPlatform
+}
+
+func Lsetxattr(path string, attr string, data []byte, flags int) error {
+ return ErrNotSupportedPlatform
+}
diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/units/duration.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/units/duration.go
new file mode 100644
index 0000000..c219a8a
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/units/duration.go
@@ -0,0 +1,33 @@
+// Package units provides helper function to parse and print size and time units
+// in human-readable format.
+package units
+
+import (
+ "fmt"
+ "time"
+)
+
+// HumanDuration returns a human-readable approximation of a duration
+// (eg. "About a minute", "4 hours ago", etc.).
+func HumanDuration(d time.Duration) string {
+ if seconds := int(d.Seconds()); seconds < 1 {
+ return "Less than a second"
+ } else if seconds < 60 {
+ return fmt.Sprintf("%d seconds", seconds)
+ } else if minutes := int(d.Minutes()); minutes == 1 {
+ return "About a minute"
+ } else if minutes < 60 {
+ return fmt.Sprintf("%d minutes", minutes)
+ } else if hours := int(d.Hours()); hours == 1 {
+ return "About an hour"
+ } else if hours < 48 {
+ return fmt.Sprintf("%d hours", hours)
+ } else if hours < 24*7*2 {
+ return fmt.Sprintf("%d days", hours/24)
+ } else if hours < 24*30*3 {
+ return fmt.Sprintf("%d weeks", hours/24/7)
+ } else if hours < 24*365*2 {
+ return fmt.Sprintf("%d months", hours/24/30)
+ }
+ return fmt.Sprintf("%d years", int(d.Hours())/24/365)
+}
diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/units/duration_test.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/units/duration_test.go
new file mode 100644
index 0000000..fcfb6b7
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/units/duration_test.go
@@ -0,0 +1,46 @@
+package units
+
+import (
+ "testing"
+ "time"
+)
+
+func TestHumanDuration(t *testing.T) {
+ // Useful duration abstractions
+ day := 24 * time.Hour
+ week := 7 * day
+ month := 30 * day
+ year := 365 * day
+
+ assertEquals(t, "Less than a second", HumanDuration(450*time.Millisecond))
+ assertEquals(t, "47 seconds", HumanDuration(47*time.Second))
+ assertEquals(t, "About a minute", HumanDuration(1*time.Minute))
+ assertEquals(t, "3 minutes", HumanDuration(3*time.Minute))
+ assertEquals(t, "35 minutes", HumanDuration(35*time.Minute))
+ assertEquals(t, "35 minutes", HumanDuration(35*time.Minute+40*time.Second))
+ assertEquals(t, "About an hour", HumanDuration(1*time.Hour))
+ assertEquals(t, "About an hour", HumanDuration(1*time.Hour+45*time.Minute))
+ assertEquals(t, "3 hours", HumanDuration(3*time.Hour))
+ assertEquals(t, "3 hours", HumanDuration(3*time.Hour+59*time.Minute))
+ assertEquals(t, "4 hours", HumanDuration(3*time.Hour+60*time.Minute))
+ assertEquals(t, "24 hours", HumanDuration(24*time.Hour))
+ assertEquals(t, "36 hours", HumanDuration(1*day+12*time.Hour))
+ assertEquals(t, "2 days", HumanDuration(2*day))
+ assertEquals(t, "7 days", HumanDuration(7*day))
+ assertEquals(t, "13 days", HumanDuration(13*day+5*time.Hour))
+ assertEquals(t, "2 weeks", HumanDuration(2*week))
+ assertEquals(t, "2 weeks", HumanDuration(2*week+4*day))
+ assertEquals(t, "3 weeks", HumanDuration(3*week))
+ assertEquals(t, "4 weeks", HumanDuration(4*week))
+ assertEquals(t, "4 weeks", HumanDuration(4*week+3*day))
+ assertEquals(t, "4 weeks", HumanDuration(1*month))
+ assertEquals(t, "6 weeks", HumanDuration(1*month+2*week))
+ assertEquals(t, "8 weeks", HumanDuration(2*month))
+ assertEquals(t, "3 months", HumanDuration(3*month+1*week))
+ assertEquals(t, "5 months", HumanDuration(5*month+2*week))
+ assertEquals(t, "13 months", HumanDuration(13*month))
+ assertEquals(t, "23 months", HumanDuration(23*month))
+ assertEquals(t, "24 months", HumanDuration(24*month))
+ assertEquals(t, "2 years", HumanDuration(24*month+2*week))
+ assertEquals(t, "3 years", HumanDuration(3*year+2*month))
+}
diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/units/size.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/units/size.go
new file mode 100644
index 0000000..2fde3b4
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/units/size.go
@@ -0,0 +1,95 @@
+package units
+
+import (
+ "fmt"
+ "regexp"
+ "strconv"
+ "strings"
+)
+
+// See: http://en.wikipedia.org/wiki/Binary_prefix
+const (
+ // Decimal
+
+ KB = 1000
+ MB = 1000 * KB
+ GB = 1000 * MB
+ TB = 1000 * GB
+ PB = 1000 * TB
+
+ // Binary
+
+ KiB = 1024
+ MiB = 1024 * KiB
+ GiB = 1024 * MiB
+ TiB = 1024 * GiB
+ PiB = 1024 * TiB
+)
+
+type unitMap map[string]int64
+
+var (
+ decimalMap = unitMap{"k": KB, "m": MB, "g": GB, "t": TB, "p": PB}
+ binaryMap = unitMap{"k": KiB, "m": MiB, "g": GiB, "t": TiB, "p": PiB}
+ sizeRegex = regexp.MustCompile(`^(\d+)([kKmMgGtTpP])?[bB]?$`)
+)
+
+var decimapAbbrs = []string{"B", "kB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB"}
+var binaryAbbrs = []string{"B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB", "ZiB", "YiB"}
+
+// CustomSize returns a human-readable approximation of a size
+// using custom format.
+func CustomSize(format string, size float64, base float64, _map []string) string {
+ i := 0
+ for size >= base {
+ size = size / base
+ i++
+ }
+ return fmt.Sprintf(format, size, _map[i])
+}
+
+// HumanSize returns a human-readable approximation of a size
+// using SI standard (eg. "44kB", "17MB").
+func HumanSize(size float64) string {
+ return CustomSize("%.4g %s", size, 1000.0, decimapAbbrs)
+}
+
+// BytesSize returns a human-readable size in bytes, kibibytes,
+// mebibytes, gibibytes, or tebibytes (eg. "44kiB", "17MiB").
+func BytesSize(size float64) string {
+ return CustomSize("%.4g %s", size, 1024.0, binaryAbbrs)
+}
+
+// FromHumanSize returns an integer from a human-readable specification of a
+// size using SI standard (eg. "44kB", "17MB").
+func FromHumanSize(size string) (int64, error) {
+ return parseSize(size, decimalMap)
+}
+
+// RAMInBytes parses a human-readable string representing an amount of RAM
+// in bytes, kibibytes, mebibytes, gibibytes, or tebibytes and
+// returns the number of bytes, or -1 if the string is unparseable.
+// Units are case-insensitive, and the 'b' suffix is optional.
+func RAMInBytes(size string) (int64, error) {
+ return parseSize(size, binaryMap)
+}
+
+// Parses the human-readable size string into the amount it represents.
+func parseSize(sizeStr string, uMap unitMap) (int64, error) {
+ matches := sizeRegex.FindStringSubmatch(sizeStr)
+ if len(matches) != 3 {
+ return -1, fmt.Errorf("invalid size: '%s'", sizeStr)
+ }
+
+ size, err := strconv.ParseInt(matches[1], 10, 0)
+ if err != nil {
+ return -1, err
+ }
+
+ unitPrefix := strings.ToLower(matches[2])
+ if mul, ok := uMap[unitPrefix]; ok {
+ size *= mul
+ }
+
+ return size, nil
+}
diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/units/size_test.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/units/size_test.go
new file mode 100644
index 0000000..67c3b81
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/units/size_test.go
@@ -0,0 +1,108 @@
+package units
+
+import (
+ "reflect"
+ "runtime"
+ "strings"
+ "testing"
+)
+
+func TestBytesSize(t *testing.T) {
+ assertEquals(t, "1 KiB", BytesSize(1024))
+ assertEquals(t, "1 MiB", BytesSize(1024*1024))
+ assertEquals(t, "1 MiB", BytesSize(1048576))
+ assertEquals(t, "2 MiB", BytesSize(2*MiB))
+ assertEquals(t, "3.42 GiB", BytesSize(3.42*GiB))
+ assertEquals(t, "5.372 TiB", BytesSize(5.372*TiB))
+ assertEquals(t, "2.22 PiB", BytesSize(2.22*PiB))
+}
+
+func TestHumanSize(t *testing.T) {
+ assertEquals(t, "1 kB", HumanSize(1000))
+ assertEquals(t, "1.024 kB", HumanSize(1024))
+ assertEquals(t, "1 MB", HumanSize(1000000))
+ assertEquals(t, "1.049 MB", HumanSize(1048576))
+ assertEquals(t, "2 MB", HumanSize(2*MB))
+ assertEquals(t, "3.42 GB", HumanSize(float64(3.42*GB)))
+ assertEquals(t, "5.372 TB", HumanSize(float64(5.372*TB)))
+ assertEquals(t, "2.22 PB", HumanSize(float64(2.22*PB)))
+}
+
+func TestFromHumanSize(t *testing.T) {
+ assertSuccessEquals(t, 32, FromHumanSize, "32")
+ assertSuccessEquals(t, 32, FromHumanSize, "32b")
+ assertSuccessEquals(t, 32, FromHumanSize, "32B")
+ assertSuccessEquals(t, 32*KB, FromHumanSize, "32k")
+ assertSuccessEquals(t, 32*KB, FromHumanSize, "32K")
+ assertSuccessEquals(t, 32*KB, FromHumanSize, "32kb")
+ assertSuccessEquals(t, 32*KB, FromHumanSize, "32Kb")
+ assertSuccessEquals(t, 32*MB, FromHumanSize, "32Mb")
+ assertSuccessEquals(t, 32*GB, FromHumanSize, "32Gb")
+ assertSuccessEquals(t, 32*TB, FromHumanSize, "32Tb")
+ assertSuccessEquals(t, 32*PB, FromHumanSize, "32Pb")
+
+ assertError(t, FromHumanSize, "")
+ assertError(t, FromHumanSize, "hello")
+ assertError(t, FromHumanSize, "-32")
+ assertError(t, FromHumanSize, "32.3")
+ assertError(t, FromHumanSize, " 32 ")
+ assertError(t, FromHumanSize, "32.3Kb")
+ assertError(t, FromHumanSize, "32 mb")
+ assertError(t, FromHumanSize, "32m b")
+ assertError(t, FromHumanSize, "32bm")
+}
+
+func TestRAMInBytes(t *testing.T) {
+ assertSuccessEquals(t, 32, RAMInBytes, "32")
+ assertSuccessEquals(t, 32, RAMInBytes, "32b")
+ assertSuccessEquals(t, 32, RAMInBytes, "32B")
+ assertSuccessEquals(t, 32*KiB, RAMInBytes, "32k")
+ assertSuccessEquals(t, 32*KiB, RAMInBytes, "32K")
+ assertSuccessEquals(t, 32*KiB, RAMInBytes, "32kb")
+ assertSuccessEquals(t, 32*KiB, RAMInBytes, "32Kb")
+ assertSuccessEquals(t, 32*MiB, RAMInBytes, "32Mb")
+ assertSuccessEquals(t, 32*GiB, RAMInBytes, "32Gb")
+ assertSuccessEquals(t, 32*TiB, RAMInBytes, "32Tb")
+ assertSuccessEquals(t, 32*PiB, RAMInBytes, "32Pb")
+ assertSuccessEquals(t, 32*PiB, RAMInBytes, "32PB")
+ assertSuccessEquals(t, 32*PiB, RAMInBytes, "32P")
+
+ assertError(t, RAMInBytes, "")
+ assertError(t, RAMInBytes, "hello")
+ assertError(t, RAMInBytes, "-32")
+ assertError(t, RAMInBytes, "32.3")
+ assertError(t, RAMInBytes, " 32 ")
+ assertError(t, RAMInBytes, "32.3Kb")
+ assertError(t, RAMInBytes, "32 mb")
+ assertError(t, RAMInBytes, "32m b")
+ assertError(t, RAMInBytes, "32bm")
+}
+
+func assertEquals(t *testing.T, expected, actual interface{}) {
+ if expected != actual {
+ t.Errorf("Expected '%v' but got '%v'", expected, actual)
+ }
+}
+
+// func that maps to the parse function signatures as testing abstraction
+type parseFn func(string) (int64, error)
+
+// Define 'String()' for pretty-print
+func (fn parseFn) String() string {
+ fnName := runtime.FuncForPC(reflect.ValueOf(fn).Pointer()).Name()
+ return fnName[strings.LastIndex(fnName, ".")+1:]
+}
+
+func assertSuccessEquals(t *testing.T, expected int64, fn parseFn, arg string) {
+ res, err := fn(arg)
+ if err != nil || res != expected {
+ t.Errorf("%s(\"%s\") -> expected '%d' but got '%d' with error '%v'", fn, arg, expected, res, err)
+ }
+}
+
+func assertError(t *testing.T, fn parseFn, arg string) {
+ res, err := fn(arg)
+ if err == nil && res != -1 {
+ t.Errorf("%s(\"%s\") -> expected error but got '%d'", fn, arg, res)
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/satori/go.uuid/.travis.yml b/Godeps/_workspace/src/github.com/satori/go.uuid/.travis.yml
new file mode 100644
index 0000000..0bbdc41
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/satori/go.uuid/.travis.yml
@@ -0,0 +1,10 @@
+language: go
+go:
+ - 1.0
+ - 1.1
+ - 1.2
+ - 1.3
+ - 1.4
+sudo: false
+notifications:
+ email: false
diff --git a/Godeps/_workspace/src/github.com/satori/go.uuid/LICENSE b/Godeps/_workspace/src/github.com/satori/go.uuid/LICENSE
new file mode 100644
index 0000000..6a1fb91
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/satori/go.uuid/LICENSE
@@ -0,0 +1,20 @@
+Copyright (C) 2013-2015 by Maxim Bublis
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice shall be
+included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/Godeps/_workspace/src/github.com/satori/go.uuid/README.md b/Godeps/_workspace/src/github.com/satori/go.uuid/README.md
new file mode 100644
index 0000000..759f77c
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/satori/go.uuid/README.md
@@ -0,0 +1,66 @@
+# UUID package for Go language
+
+[![Build Status](https://travis-ci.org/satori/go.uuid.png?branch=master)](https://travis-ci.org/satori/go.uuid)
+[![GoDoc](http://godoc.org/github.com/satori/go.uuid?status.png)](http://godoc.org/github.com/satori/go.uuid)
+
+This package provides pure Go implementation of Universally Unique Identifier (UUID). Supported both creation and parsing of UUIDs.
+
+With 100% test coverage and benchmarks out of box.
+
+Supported versions:
+* Version 1, based on timestamp and MAC address (RFC 4122)
+* Version 2, based on timestamp, MAC address and POSIX UID/GID (DCE 1.1)
+* Version 3, based on MD5 hashing (RFC 4122)
+* Version 4, based on random numbers (RFC 4122)
+* Version 5, based on SHA-1 hashing (RFC 4122)
+
+## Installation
+
+Use the `go` command:
+
+ $ go get github.com/satori/go.uuid
+
+## Requirements
+
+UUID package requires any stable version of Go Programming Language.
+
+It is tested against following versions of Go: 1.0-1.4
+
+## Example
+
+```go
+package main
+
+import (
+ "fmt"
+ "github.com/satori/go.uuid"
+)
+
+func main() {
+ // Creating UUID Version 4
+ u1 := uuid.NewV4()
+ fmt.Printf("UUIDv4: %s\n", u1)
+
+ // Parsing UUID from string input
+ u2, err := uuid.FromString("6ba7b810-9dad-11d1-80b4-00c04fd430c8")
+ if err != nil {
+ fmt.Printf("Something gone wrong: %s", err)
+ }
+ fmt.Printf("Successfully parsed: %s", u2)
+}
+```
+
+## Documentation
+
+[Documentation](http://godoc.org/github.com/satori/go.uuid) is hosted at GoDoc project.
+
+## Links
+* [RFC 4122](http://tools.ietf.org/html/rfc4122)
+* [DCE 1.1: Authentication and Security Services](http://pubs.opengroup.org/onlinepubs/9696989899/chap5.htm#tagcjh_08_02_01_01)
+
+## Copyright
+
+Copyright (C) 2013-2015 by Maxim Bublis .
+
+UUID package released under MIT License.
+See [LICENSE](https://github.com/satori/go.uuid/blob/master/LICENSE) for details.
diff --git a/Godeps/_workspace/src/github.com/satori/go.uuid/benchmarks_test.go b/Godeps/_workspace/src/github.com/satori/go.uuid/benchmarks_test.go
new file mode 100644
index 0000000..9a85f7c
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/satori/go.uuid/benchmarks_test.go
@@ -0,0 +1,121 @@
+// Copyright (C) 2013-2014 by Maxim Bublis
+//
+// Permission is hereby granted, free of charge, to any person obtaining
+// a copy of this software and associated documentation files (the
+// "Software"), to deal in the Software without restriction, including
+// without limitation the rights to use, copy, modify, merge, publish,
+// distribute, sublicense, and/or sell copies of the Software, and to
+// permit persons to whom the Software is furnished to do so, subject to
+// the following conditions:
+//
+// The above copyright notice and this permission notice shall be
+// included in all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+package uuid
+
+import (
+ "testing"
+)
+
+func BenchmarkFromBytes(b *testing.B) {
+ bytes := []byte{0x6b, 0xa7, 0xb8, 0x10, 0x9d, 0xad, 0x11, 0xd1, 0x80, 0xb4, 0x00, 0xc0, 0x4f, 0xd4, 0x30, 0xc8}
+ for i := 0; i < b.N; i++ {
+ FromBytes(bytes)
+ }
+}
+
+func BenchmarkFromString(b *testing.B) {
+ s := "6ba7b810-9dad-11d1-80b4-00c04fd430c8"
+ for i := 0; i < b.N; i++ {
+ FromString(s)
+ }
+}
+
+func BenchmarkFromStringUrn(b *testing.B) {
+ s := "urn:uuid:6ba7b810-9dad-11d1-80b4-00c04fd430c8"
+ for i := 0; i < b.N; i++ {
+ FromString(s)
+ }
+}
+
+func BenchmarkFromStringWithBrackets(b *testing.B) {
+ s := "{6ba7b810-9dad-11d1-80b4-00c04fd430c8}"
+ for i := 0; i < b.N; i++ {
+ FromString(s)
+ }
+}
+
+func BenchmarkNewV1(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ NewV1()
+ }
+}
+
+func BenchmarkNewV2(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ NewV2(DomainPerson)
+ }
+}
+
+func BenchmarkNewV3(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ NewV3(NamespaceDNS, "www.example.com")
+ }
+}
+
+func BenchmarkNewV4(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ NewV4()
+ }
+}
+
+func BenchmarkNewV5(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ NewV5(NamespaceDNS, "www.example.com")
+ }
+}
+
+func BenchmarkMarshalBinary(b *testing.B) {
+ u := NewV4()
+ for i := 0; i < b.N; i++ {
+ u.MarshalBinary()
+ }
+}
+
+func BenchmarkMarshalText(b *testing.B) {
+ u := NewV4()
+ for i := 0; i < b.N; i++ {
+ u.MarshalText()
+ }
+}
+
+func BenchmarkUnmarshalBinary(b *testing.B) {
+ bytes := []byte{0x6b, 0xa7, 0xb8, 0x10, 0x9d, 0xad, 0x11, 0xd1, 0x80, 0xb4, 0x00, 0xc0, 0x4f, 0xd4, 0x30, 0xc8}
+ u := UUID{}
+ for i := 0; i < b.N; i++ {
+ u.UnmarshalBinary(bytes)
+ }
+}
+
+func BenchmarkUnmarshalText(b *testing.B) {
+ bytes := []byte("6ba7b810-9dad-11d1-80b4-00c04fd430c8")
+ u := UUID{}
+ for i := 0; i < b.N; i++ {
+ u.UnmarshalText(bytes)
+ }
+}
+
+func BenchmarkMarshalToString(b *testing.B) {
+ u := NewV4()
+ for i := 0; i < b.N; i++ {
+ u.String()
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/satori/go.uuid/uuid.go b/Godeps/_workspace/src/github.com/satori/go.uuid/uuid.go
new file mode 100644
index 0000000..b4dc4ea
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/satori/go.uuid/uuid.go
@@ -0,0 +1,429 @@
+// Copyright (C) 2013-2015 by Maxim Bublis
+//
+// Permission is hereby granted, free of charge, to any person obtaining
+// a copy of this software and associated documentation files (the
+// "Software"), to deal in the Software without restriction, including
+// without limitation the rights to use, copy, modify, merge, publish,
+// distribute, sublicense, and/or sell copies of the Software, and to
+// permit persons to whom the Software is furnished to do so, subject to
+// the following conditions:
+//
+// The above copyright notice and this permission notice shall be
+// included in all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+// Package uuid provides implementation of Universally Unique Identifier (UUID).
+// Supported versions are 1, 3, 4 and 5 (as specified in RFC 4122) and
+// version 2 (as specified in DCE 1.1).
+package uuid
+
+import (
+ "bytes"
+ "crypto/md5"
+ "crypto/rand"
+ "crypto/sha1"
+ "encoding/binary"
+ "encoding/hex"
+ "fmt"
+ "hash"
+ "net"
+ "os"
+ "sync"
+ "time"
+)
+
+// UUID layout variants.
+const (
+ VariantNCS = iota
+ VariantRFC4122
+ VariantMicrosoft
+ VariantFuture
+)
+
+// UUID DCE domains.
+const (
+ DomainPerson = iota
+ DomainGroup
+ DomainOrg
+)
+
+// Difference in 100-nanosecond intervals between
+// UUID epoch (October 15, 1582) and Unix epoch (January 1, 1970).
+const epochStart = 122192928000000000
+
+// Used in string method conversion
+const dash byte = '-'
+
+// UUID v1/v2 storage.
+var (
+ storageMutex sync.Mutex
+ storageOnce sync.Once
+ epochFunc = unixTimeFunc
+ clockSequence uint16
+ lastTime uint64
+ hardwareAddr [6]byte
+ posixUID = uint32(os.Getuid())
+ posixGID = uint32(os.Getgid())
+)
+
+// String parse helpers.
+var (
+ urnPrefix = []byte("urn:uuid:")
+ byteGroups = []int{8, 4, 4, 4, 12}
+)
+
+func initClockSequence() {
+ buf := make([]byte, 2)
+ safeRandom(buf)
+ clockSequence = binary.BigEndian.Uint16(buf)
+}
+
+func initHardwareAddr() {
+ interfaces, err := net.Interfaces()
+ if err == nil {
+ for _, iface := range interfaces {
+ if len(iface.HardwareAddr) >= 6 {
+ copy(hardwareAddr[:], iface.HardwareAddr)
+ return
+ }
+ }
+ }
+
+ // Initialize hardwareAddr randomly in case
+ // of real network interfaces absence
+ safeRandom(hardwareAddr[:])
+
+ // Set multicast bit as recommended in RFC 4122
+ hardwareAddr[0] |= 0x01
+}
+
+func initStorage() {
+ initClockSequence()
+ initHardwareAddr()
+}
+
+func safeRandom(dest []byte) {
+ if _, err := rand.Read(dest); err != nil {
+ panic(err)
+ }
+}
+
+// Returns difference in 100-nanosecond intervals between
+// UUID epoch (October 15, 1582) and current time.
+// This is default epoch calculation function.
+func unixTimeFunc() uint64 {
+ return epochStart + uint64(time.Now().UnixNano()/100)
+}
+
+// UUID representation compliant with specification
+// described in RFC 4122.
+type UUID [16]byte
+
+// The nil UUID is special form of UUID that is specified to have all
+// 128 bits set to zero.
+var Nil = UUID{}
+
+// Predefined namespace UUIDs.
+var (
+ NamespaceDNS, _ = FromString("6ba7b810-9dad-11d1-80b4-00c04fd430c8")
+ NamespaceURL, _ = FromString("6ba7b811-9dad-11d1-80b4-00c04fd430c8")
+ NamespaceOID, _ = FromString("6ba7b812-9dad-11d1-80b4-00c04fd430c8")
+ NamespaceX500, _ = FromString("6ba7b814-9dad-11d1-80b4-00c04fd430c8")
+)
+
+// And returns result of binary AND of two UUIDs.
+func And(u1 UUID, u2 UUID) UUID {
+ u := UUID{}
+ for i := 0; i < 16; i++ {
+ u[i] = u1[i] & u2[i]
+ }
+ return u
+}
+
+// Or returns result of binary OR of two UUIDs.
+func Or(u1 UUID, u2 UUID) UUID {
+ u := UUID{}
+ for i := 0; i < 16; i++ {
+ u[i] = u1[i] | u2[i]
+ }
+ return u
+}
+
+// Equal returns true if u1 and u2 equals, otherwise returns false.
+func Equal(u1 UUID, u2 UUID) bool {
+ return bytes.Equal(u1[:], u2[:])
+}
+
+// Version returns algorithm version used to generate UUID.
+func (u UUID) Version() uint {
+ return uint(u[6] >> 4)
+}
+
+// Variant returns UUID layout variant.
+func (u UUID) Variant() uint {
+ switch {
+ case (u[8] & 0x80) == 0x00:
+ return VariantNCS
+ case (u[8]&0xc0)|0x80 == 0x80:
+ return VariantRFC4122
+ case (u[8]&0xe0)|0xc0 == 0xc0:
+ return VariantMicrosoft
+ }
+ return VariantFuture
+}
+
+// Bytes returns bytes slice representation of UUID.
+func (u UUID) Bytes() []byte {
+ return u[:]
+}
+
+// Returns canonical string representation of UUID:
+// xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx.
+func (u UUID) String() string {
+ buf := make([]byte, 36)
+
+ hex.Encode(buf[0:8], u[0:4])
+ buf[8] = dash
+ hex.Encode(buf[9:13], u[4:6])
+ buf[13] = dash
+ hex.Encode(buf[14:18], u[6:8])
+ buf[18] = dash
+ hex.Encode(buf[19:23], u[8:10])
+ buf[23] = dash
+ hex.Encode(buf[24:], u[10:])
+
+ return string(buf)
+}
+
+// SetVersion sets version bits.
+func (u *UUID) SetVersion(v byte) {
+ u[6] = (u[6] & 0x0f) | (v << 4)
+}
+
+// SetVariant sets variant bits as described in RFC 4122.
+func (u *UUID) SetVariant() {
+ u[8] = (u[8] & 0xbf) | 0x80
+}
+
+// MarshalText implements the encoding.TextMarshaler interface.
+// The encoding is the same as returned by String.
+func (u UUID) MarshalText() (text []byte, err error) {
+ text = []byte(u.String())
+ return
+}
+
+// UnmarshalText implements the encoding.TextUnmarshaler interface.
+// Following formats are supported:
+// "6ba7b810-9dad-11d1-80b4-00c04fd430c8",
+// "{6ba7b810-9dad-11d1-80b4-00c04fd430c8}",
+// "urn:uuid:6ba7b810-9dad-11d1-80b4-00c04fd430c8"
+func (u *UUID) UnmarshalText(text []byte) (err error) {
+ if len(text) < 32 {
+ err = fmt.Errorf("uuid: invalid UUID string: %s", text)
+ return
+ }
+
+ if bytes.Equal(text[:9], urnPrefix) {
+ text = text[9:]
+ } else if text[0] == '{' {
+ text = text[1:]
+ }
+
+ b := u[:]
+
+ for _, byteGroup := range byteGroups {
+ if text[0] == '-' {
+ text = text[1:]
+ }
+
+ _, err = hex.Decode(b[:byteGroup/2], text[:byteGroup])
+
+ if err != nil {
+ return
+ }
+
+ text = text[byteGroup:]
+ b = b[byteGroup/2:]
+ }
+
+ return
+}
+
+// MarshalBinary implements the encoding.BinaryMarshaler interface.
+func (u UUID) MarshalBinary() (data []byte, err error) {
+ data = u.Bytes()
+ return
+}
+
+// UnmarshalBinary implements the encoding.BinaryUnmarshaler interface.
+// It will return error if the slice isn't 16 bytes long.
+func (u *UUID) UnmarshalBinary(data []byte) (err error) {
+ if len(data) != 16 {
+ err = fmt.Errorf("uuid: UUID must be exactly 16 bytes long, got %d bytes", len(data))
+ return
+ }
+ copy(u[:], data)
+
+ return
+}
+
+// Scan implements the sql.Scanner interface.
+// A 16-byte slice is handled by UnmarshalBinary, while
+// a longer byte slice or a string is handled by UnmarshalText.
+func (u *UUID) Scan(src interface{}) error {
+ switch src := src.(type) {
+ case []byte:
+ if len(src) == 16 {
+ return u.UnmarshalBinary(src)
+ }
+ return u.UnmarshalText(src)
+
+ case string:
+ return u.UnmarshalText([]byte(src))
+ }
+
+ return fmt.Errorf("uuid: cannot convert %T to UUID", src)
+}
+
+// FromBytes returns UUID converted from raw byte slice input.
+// It will return error if the slice isn't 16 bytes long.
+func FromBytes(input []byte) (u UUID, err error) {
+ err = u.UnmarshalBinary(input)
+ return
+}
+
+// FromBytesOrNil returns UUID converted from raw byte slice input.
+// Same behavior as FromBytes, but returns a Nil UUID on error.
+func FromBytesOrNil(input []byte) UUID {
+ uuid, err := FromBytes(input)
+ if err != nil {
+ return Nil
+ }
+ return uuid
+}
+
+// FromString returns UUID parsed from string input.
+// Input is expected in a form accepted by UnmarshalText.
+func FromString(input string) (u UUID, err error) {
+ err = u.UnmarshalText([]byte(input))
+ return
+}
+
+// FromStringOrNil returns UUID parsed from string input.
+// Same behavior as FromString, but returns a Nil UUID on error.
+func FromStringOrNil(input string) UUID {
+ uuid, err := FromString(input)
+ if err != nil {
+ return Nil
+ }
+ return uuid
+}
+
+// Returns UUID v1/v2 storage state.
+// Returns epoch timestamp, clock sequence, and hardware address.
+func getStorage() (uint64, uint16, []byte) {
+ storageOnce.Do(initStorage)
+
+ storageMutex.Lock()
+ defer storageMutex.Unlock()
+
+ timeNow := epochFunc()
+ // Clock changed backwards since last UUID generation.
+ // Should increase clock sequence.
+ if timeNow <= lastTime {
+ clockSequence++
+ }
+ lastTime = timeNow
+
+ return timeNow, clockSequence, hardwareAddr[:]
+}
+
+// NewV1 returns UUID based on current timestamp and MAC address.
+func NewV1() UUID {
+ u := UUID{}
+
+ timeNow, clockSeq, hardwareAddr := getStorage()
+
+ binary.BigEndian.PutUint32(u[0:], uint32(timeNow))
+ binary.BigEndian.PutUint16(u[4:], uint16(timeNow>>32))
+ binary.BigEndian.PutUint16(u[6:], uint16(timeNow>>48))
+ binary.BigEndian.PutUint16(u[8:], clockSeq)
+
+ copy(u[10:], hardwareAddr)
+
+ u.SetVersion(1)
+ u.SetVariant()
+
+ return u
+}
+
+// NewV2 returns DCE Security UUID based on POSIX UID/GID.
+func NewV2(domain byte) UUID {
+ u := UUID{}
+
+ timeNow, clockSeq, hardwareAddr := getStorage()
+
+ switch domain {
+ case DomainPerson:
+ binary.BigEndian.PutUint32(u[0:], posixUID)
+ case DomainGroup:
+ binary.BigEndian.PutUint32(u[0:], posixGID)
+ }
+
+ binary.BigEndian.PutUint16(u[4:], uint16(timeNow>>32))
+ binary.BigEndian.PutUint16(u[6:], uint16(timeNow>>48))
+ binary.BigEndian.PutUint16(u[8:], clockSeq)
+ u[9] = domain
+
+ copy(u[10:], hardwareAddr)
+
+ u.SetVersion(2)
+ u.SetVariant()
+
+ return u
+}
+
+// NewV3 returns UUID based on MD5 hash of namespace UUID and name.
+func NewV3(ns UUID, name string) UUID {
+ u := newFromHash(md5.New(), ns, name)
+ u.SetVersion(3)
+ u.SetVariant()
+
+ return u
+}
+
+// NewV4 returns random generated UUID.
+func NewV4() UUID {
+ u := UUID{}
+ safeRandom(u[:])
+ u.SetVersion(4)
+ u.SetVariant()
+
+ return u
+}
+
+// NewV5 returns UUID based on SHA-1 hash of namespace UUID and name.
+func NewV5(ns UUID, name string) UUID {
+ u := newFromHash(sha1.New(), ns, name)
+ u.SetVersion(5)
+ u.SetVariant()
+
+ return u
+}
+
+// Returns UUID based on hashing of namespace UUID and name.
+func newFromHash(h hash.Hash, ns UUID, name string) UUID {
+ u := UUID{}
+ h.Write(ns[:])
+ h.Write([]byte(name))
+ copy(u[:], h.Sum(nil))
+
+ return u
+}
diff --git a/Godeps/_workspace/src/github.com/satori/go.uuid/uuid_test.go b/Godeps/_workspace/src/github.com/satori/go.uuid/uuid_test.go
new file mode 100644
index 0000000..c77d2d3
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/satori/go.uuid/uuid_test.go
@@ -0,0 +1,492 @@
+// Copyright (C) 2013, 2015 by Maxim Bublis
+//
+// Permission is hereby granted, free of charge, to any person obtaining
+// a copy of this software and associated documentation files (the
+// "Software"), to deal in the Software without restriction, including
+// without limitation the rights to use, copy, modify, merge, publish,
+// distribute, sublicense, and/or sell copies of the Software, and to
+// permit persons to whom the Software is furnished to do so, subject to
+// the following conditions:
+//
+// The above copyright notice and this permission notice shall be
+// included in all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+package uuid
+
+import (
+ "bytes"
+ "testing"
+)
+
+func TestBytes(t *testing.T) {
+ u := UUID{0x6b, 0xa7, 0xb8, 0x10, 0x9d, 0xad, 0x11, 0xd1, 0x80, 0xb4, 0x00, 0xc0, 0x4f, 0xd4, 0x30, 0xc8}
+
+ bytes1 := []byte{0x6b, 0xa7, 0xb8, 0x10, 0x9d, 0xad, 0x11, 0xd1, 0x80, 0xb4, 0x00, 0xc0, 0x4f, 0xd4, 0x30, 0xc8}
+
+ if !bytes.Equal(u.Bytes(), bytes1) {
+ t.Errorf("Incorrect bytes representation for UUID: %s", u)
+ }
+}
+
+func TestString(t *testing.T) {
+ if NamespaceDNS.String() != "6ba7b810-9dad-11d1-80b4-00c04fd430c8" {
+ t.Errorf("Incorrect string representation for UUID: %s", NamespaceDNS.String())
+ }
+}
+
+func TestEqual(t *testing.T) {
+ if !Equal(NamespaceDNS, NamespaceDNS) {
+ t.Errorf("Incorrect comparison of %s and %s", NamespaceDNS, NamespaceDNS)
+ }
+
+ if Equal(NamespaceDNS, NamespaceURL) {
+ t.Errorf("Incorrect comparison of %s and %s", NamespaceDNS, NamespaceURL)
+ }
+}
+
+func TestOr(t *testing.T) {
+ u1 := UUID{0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff}
+ u2 := UUID{0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00}
+
+ u := UUID{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}
+
+ if !Equal(u, Or(u1, u2)) {
+ t.Errorf("Incorrect bitwise OR result %s", Or(u1, u2))
+ }
+}
+
+func TestAnd(t *testing.T) {
+ u1 := UUID{0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff}
+ u2 := UUID{0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00}
+
+ u := UUID{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}
+
+ if !Equal(u, And(u1, u2)) {
+ t.Errorf("Incorrect bitwise AND result %s", And(u1, u2))
+ }
+}
+
+func TestVersion(t *testing.T) {
+ u := UUID{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}
+
+ if u.Version() != 1 {
+ t.Errorf("Incorrect version for UUID: %d", u.Version())
+ }
+}
+
+func TestSetVersion(t *testing.T) {
+ u := UUID{}
+ u.SetVersion(4)
+
+ if u.Version() != 4 {
+ t.Errorf("Incorrect version for UUID after u.setVersion(4): %d", u.Version())
+ }
+}
+
+func TestVariant(t *testing.T) {
+ u1 := UUID{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}
+
+ if u1.Variant() != VariantNCS {
+ t.Errorf("Incorrect variant for UUID variant %d: %d", VariantNCS, u1.Variant())
+ }
+
+ u2 := UUID{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}
+
+ if u2.Variant() != VariantRFC4122 {
+ t.Errorf("Incorrect variant for UUID variant %d: %d", VariantRFC4122, u2.Variant())
+ }
+
+ u3 := UUID{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}
+
+ if u3.Variant() != VariantMicrosoft {
+ t.Errorf("Incorrect variant for UUID variant %d: %d", VariantMicrosoft, u3.Variant())
+ }
+
+ u4 := UUID{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xe0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}
+
+ if u4.Variant() != VariantFuture {
+ t.Errorf("Incorrect variant for UUID variant %d: %d", VariantFuture, u4.Variant())
+ }
+}
+
+func TestSetVariant(t *testing.T) {
+ u := new(UUID)
+ u.SetVariant()
+
+ if u.Variant() != VariantRFC4122 {
+ t.Errorf("Incorrect variant for UUID after u.setVariant(): %d", u.Variant())
+ }
+}
+
+func TestFromBytes(t *testing.T) {
+ u := UUID{0x6b, 0xa7, 0xb8, 0x10, 0x9d, 0xad, 0x11, 0xd1, 0x80, 0xb4, 0x00, 0xc0, 0x4f, 0xd4, 0x30, 0xc8}
+ b1 := []byte{0x6b, 0xa7, 0xb8, 0x10, 0x9d, 0xad, 0x11, 0xd1, 0x80, 0xb4, 0x00, 0xc0, 0x4f, 0xd4, 0x30, 0xc8}
+
+ u1, err := FromBytes(b1)
+ if err != nil {
+ t.Errorf("Error parsing UUID from bytes: %s", err)
+ }
+
+ if !Equal(u, u1) {
+ t.Errorf("UUIDs should be equal: %s and %s", u, u1)
+ }
+
+ b2 := []byte{}
+
+ _, err = FromBytes(b2)
+ if err == nil {
+ t.Errorf("Should return error parsing from empty byte slice, got %s", err)
+ }
+}
+
+func TestMarshalBinary(t *testing.T) {
+ u := UUID{0x6b, 0xa7, 0xb8, 0x10, 0x9d, 0xad, 0x11, 0xd1, 0x80, 0xb4, 0x00, 0xc0, 0x4f, 0xd4, 0x30, 0xc8}
+ b1 := []byte{0x6b, 0xa7, 0xb8, 0x10, 0x9d, 0xad, 0x11, 0xd1, 0x80, 0xb4, 0x00, 0xc0, 0x4f, 0xd4, 0x30, 0xc8}
+
+ b2, err := u.MarshalBinary()
+ if err != nil {
+ t.Errorf("Error marshaling UUID: %s", err)
+ }
+
+ if !bytes.Equal(b1, b2) {
+ t.Errorf("Marshaled UUID should be %s, got %s", b1, b2)
+ }
+}
+
+func TestUnmarshalBinary(t *testing.T) {
+ u := UUID{0x6b, 0xa7, 0xb8, 0x10, 0x9d, 0xad, 0x11, 0xd1, 0x80, 0xb4, 0x00, 0xc0, 0x4f, 0xd4, 0x30, 0xc8}
+ b1 := []byte{0x6b, 0xa7, 0xb8, 0x10, 0x9d, 0xad, 0x11, 0xd1, 0x80, 0xb4, 0x00, 0xc0, 0x4f, 0xd4, 0x30, 0xc8}
+
+ u1 := UUID{}
+ err := u1.UnmarshalBinary(b1)
+ if err != nil {
+ t.Errorf("Error unmarshaling UUID: %s", err)
+ }
+
+ if !Equal(u, u1) {
+ t.Errorf("UUIDs should be equal: %s and %s", u, u1)
+ }
+
+ b2 := []byte{}
+ u2 := UUID{}
+
+ err = u2.UnmarshalBinary(b2)
+ if err == nil {
+ t.Errorf("Should return error unmarshalling from empty byte slice, got %s", err)
+ }
+}
+
+func TestFromString(t *testing.T) {
+ u := UUID{0x6b, 0xa7, 0xb8, 0x10, 0x9d, 0xad, 0x11, 0xd1, 0x80, 0xb4, 0x00, 0xc0, 0x4f, 0xd4, 0x30, 0xc8}
+
+ s1 := "6ba7b810-9dad-11d1-80b4-00c04fd430c8"
+ s2 := "{6ba7b810-9dad-11d1-80b4-00c04fd430c8}"
+ s3 := "urn:uuid:6ba7b810-9dad-11d1-80b4-00c04fd430c8"
+
+ _, err := FromString("")
+ if err == nil {
+ t.Errorf("Should return error trying to parse empty string, got %s", err)
+ }
+
+ u1, err := FromString(s1)
+ if err != nil {
+ t.Errorf("Error parsing UUID from string: %s", err)
+ }
+
+ if !Equal(u, u1) {
+ t.Errorf("UUIDs should be equal: %s and %s", u, u1)
+ }
+
+ u2, err := FromString(s2)
+ if err != nil {
+ t.Errorf("Error parsing UUID from string: %s", err)
+ }
+
+ if !Equal(u, u2) {
+ t.Errorf("UUIDs should be equal: %s and %s", u, u2)
+ }
+
+ u3, err := FromString(s3)
+ if err != nil {
+ t.Errorf("Error parsing UUID from string: %s", err)
+ }
+
+ if !Equal(u, u3) {
+ t.Errorf("UUIDs should be equal: %s and %s", u, u3)
+ }
+}
+
+func TestFromStringOrNil(t *testing.T) {
+ u := FromStringOrNil("")
+ if u != Nil {
+ t.Errorf("Should return Nil UUID on parse failure, got %s", u)
+ }
+}
+
+func TestFromBytesOrNil(t *testing.T) {
+ b := []byte{}
+ u := FromBytesOrNil(b)
+ if u != Nil {
+ t.Errorf("Should return Nil UUID on parse failure, got %s", u)
+ }
+}
+
+func TestMarshalText(t *testing.T) {
+ u := UUID{0x6b, 0xa7, 0xb8, 0x10, 0x9d, 0xad, 0x11, 0xd1, 0x80, 0xb4, 0x00, 0xc0, 0x4f, 0xd4, 0x30, 0xc8}
+ b1 := []byte("6ba7b810-9dad-11d1-80b4-00c04fd430c8")
+
+ b2, err := u.MarshalText()
+ if err != nil {
+ t.Errorf("Error marshaling UUID: %s", err)
+ }
+
+ if !bytes.Equal(b1, b2) {
+ t.Errorf("Marshaled UUID should be %s, got %s", b1, b2)
+ }
+}
+
+func TestUnmarshalText(t *testing.T) {
+ u := UUID{0x6b, 0xa7, 0xb8, 0x10, 0x9d, 0xad, 0x11, 0xd1, 0x80, 0xb4, 0x00, 0xc0, 0x4f, 0xd4, 0x30, 0xc8}
+ b1 := []byte("6ba7b810-9dad-11d1-80b4-00c04fd430c8")
+
+ u1 := UUID{}
+ err := u1.UnmarshalText(b1)
+ if err != nil {
+ t.Errorf("Error unmarshaling UUID: %s", err)
+ }
+
+ if !Equal(u, u1) {
+ t.Errorf("UUIDs should be equal: %s and %s", u, u1)
+ }
+
+ b2 := []byte("")
+ u2 := UUID{}
+
+ err = u2.UnmarshalText(b2)
+ if err == nil {
+ t.Errorf("Should return error trying to unmarshal from empty string")
+ }
+}
+
+func TestScanBinary(t *testing.T) {
+ u := UUID{0x6b, 0xa7, 0xb8, 0x10, 0x9d, 0xad, 0x11, 0xd1, 0x80, 0xb4, 0x00, 0xc0, 0x4f, 0xd4, 0x30, 0xc8}
+ b1 := []byte{0x6b, 0xa7, 0xb8, 0x10, 0x9d, 0xad, 0x11, 0xd1, 0x80, 0xb4, 0x00, 0xc0, 0x4f, 0xd4, 0x30, 0xc8}
+
+ u1 := UUID{}
+ err := u1.Scan(b1)
+ if err != nil {
+ t.Errorf("Error unmarshaling UUID: %s", err)
+ }
+
+ if !Equal(u, u1) {
+ t.Errorf("UUIDs should be equal: %s and %s", u, u1)
+ }
+
+ b2 := []byte{}
+ u2 := UUID{}
+
+ err = u2.Scan(b2)
+ if err == nil {
+ t.Errorf("Should return error unmarshalling from empty byte slice, got %s", err)
+ }
+}
+
+func TestScanString(t *testing.T) {
+ u := UUID{0x6b, 0xa7, 0xb8, 0x10, 0x9d, 0xad, 0x11, 0xd1, 0x80, 0xb4, 0x00, 0xc0, 0x4f, 0xd4, 0x30, 0xc8}
+ s1 := "6ba7b810-9dad-11d1-80b4-00c04fd430c8"
+
+ u1 := UUID{}
+ err := u1.Scan(s1)
+ if err != nil {
+ t.Errorf("Error unmarshaling UUID: %s", err)
+ }
+
+ if !Equal(u, u1) {
+ t.Errorf("UUIDs should be equal: %s and %s", u, u1)
+ }
+
+ s2 := ""
+ u2 := UUID{}
+
+ err = u2.Scan(s2)
+ if err == nil {
+ t.Errorf("Should return error trying to unmarshal from empty string")
+ }
+}
+
+func TestScanText(t *testing.T) {
+ u := UUID{0x6b, 0xa7, 0xb8, 0x10, 0x9d, 0xad, 0x11, 0xd1, 0x80, 0xb4, 0x00, 0xc0, 0x4f, 0xd4, 0x30, 0xc8}
+ b1 := []byte("6ba7b810-9dad-11d1-80b4-00c04fd430c8")
+
+ u1 := UUID{}
+ err := u1.Scan(b1)
+ if err != nil {
+ t.Errorf("Error unmarshaling UUID: %s", err)
+ }
+
+ if !Equal(u, u1) {
+ t.Errorf("UUIDs should be equal: %s and %s", u, u1)
+ }
+
+ b2 := []byte("")
+ u2 := UUID{}
+
+ err = u2.Scan(b2)
+ if err == nil {
+ t.Errorf("Should return error trying to unmarshal from empty string")
+ }
+}
+
+func TestScanUnsupported(t *testing.T) {
+ u := UUID{}
+
+ err := u.Scan(true)
+ if err == nil {
+ t.Errorf("Should return error trying to unmarshal from bool")
+ }
+}
+
+func TestNewV1(t *testing.T) {
+ u := NewV1()
+
+ if u.Version() != 1 {
+ t.Errorf("UUIDv1 generated with incorrect version: %d", u.Version())
+ }
+
+ if u.Variant() != VariantRFC4122 {
+ t.Errorf("UUIDv1 generated with incorrect variant: %d", u.Variant())
+ }
+
+ u1 := NewV1()
+ u2 := NewV1()
+
+ if Equal(u1, u2) {
+ t.Errorf("UUIDv1 generated two equal UUIDs: %s and %s", u1, u2)
+ }
+
+ oldFunc := epochFunc
+ epochFunc = func() uint64 { return 0 }
+
+ u3 := NewV1()
+ u4 := NewV1()
+
+ if Equal(u3, u4) {
+ t.Errorf("UUIDv1 generated two equal UUIDs: %s and %s", u3, u4)
+ }
+
+ epochFunc = oldFunc
+}
+
+func TestNewV2(t *testing.T) {
+ u1 := NewV2(DomainPerson)
+
+ if u1.Version() != 2 {
+ t.Errorf("UUIDv2 generated with incorrect version: %d", u1.Version())
+ }
+
+ if u1.Variant() != VariantRFC4122 {
+ t.Errorf("UUIDv2 generated with incorrect variant: %d", u1.Variant())
+ }
+
+ u2 := NewV2(DomainGroup)
+
+ if u2.Version() != 2 {
+ t.Errorf("UUIDv2 generated with incorrect version: %d", u2.Version())
+ }
+
+ if u2.Variant() != VariantRFC4122 {
+ t.Errorf("UUIDv2 generated with incorrect variant: %d", u2.Variant())
+ }
+}
+
+func TestNewV3(t *testing.T) {
+ u := NewV3(NamespaceDNS, "www.example.com")
+
+ if u.Version() != 3 {
+ t.Errorf("UUIDv3 generated with incorrect version: %d", u.Version())
+ }
+
+ if u.Variant() != VariantRFC4122 {
+ t.Errorf("UUIDv3 generated with incorrect variant: %d", u.Variant())
+ }
+
+ if u.String() != "5df41881-3aed-3515-88a7-2f4a814cf09e" {
+ t.Errorf("UUIDv3 generated incorrectly: %s", u.String())
+ }
+
+ u = NewV3(NamespaceDNS, "python.org")
+
+ if u.String() != "6fa459ea-ee8a-3ca4-894e-db77e160355e" {
+ t.Errorf("UUIDv3 generated incorrectly: %s", u.String())
+ }
+
+ u1 := NewV3(NamespaceDNS, "golang.org")
+ u2 := NewV3(NamespaceDNS, "golang.org")
+ if !Equal(u1, u2) {
+ t.Errorf("UUIDv3 generated different UUIDs for same namespace and name: %s and %s", u1, u2)
+ }
+
+ u3 := NewV3(NamespaceDNS, "example.com")
+ if Equal(u1, u3) {
+ t.Errorf("UUIDv3 generated same UUIDs for different names in same namespace: %s and %s", u1, u2)
+ }
+
+ u4 := NewV3(NamespaceURL, "golang.org")
+ if Equal(u1, u4) {
+ t.Errorf("UUIDv3 generated same UUIDs for sane names in different namespaces: %s and %s", u1, u4)
+ }
+}
+
+func TestNewV4(t *testing.T) {
+ u := NewV4()
+
+ if u.Version() != 4 {
+ t.Errorf("UUIDv4 generated with incorrect version: %d", u.Version())
+ }
+
+ if u.Variant() != VariantRFC4122 {
+ t.Errorf("UUIDv4 generated with incorrect variant: %d", u.Variant())
+ }
+}
+
+func TestNewV5(t *testing.T) {
+ u := NewV5(NamespaceDNS, "www.example.com")
+
+ if u.Version() != 5 {
+ t.Errorf("UUIDv5 generated with incorrect version: %d", u.Version())
+ }
+
+ if u.Variant() != VariantRFC4122 {
+ t.Errorf("UUIDv5 generated with incorrect variant: %d", u.Variant())
+ }
+
+ u = NewV5(NamespaceDNS, "python.org")
+
+ if u.String() != "886313e1-3b8a-5372-9b90-0c9aee199e5d" {
+ t.Errorf("UUIDv5 generated incorrectly: %s", u.String())
+ }
+
+ u1 := NewV5(NamespaceDNS, "golang.org")
+ u2 := NewV5(NamespaceDNS, "golang.org")
+ if !Equal(u1, u2) {
+ t.Errorf("UUIDv5 generated different UUIDs for same namespace and name: %s and %s", u1, u2)
+ }
+
+ u3 := NewV5(NamespaceDNS, "example.com")
+ if Equal(u1, u3) {
+ t.Errorf("UUIDv5 generated same UUIDs for different names in same namespace: %s and %s", u1, u2)
+ }
+
+ u4 := NewV5(NamespaceURL, "golang.org")
+ if Equal(u1, u4) {
+ t.Errorf("UUIDv3 generated same UUIDs for sane names in different namespaces: %s and %s", u1, u4)
+ }
+}