diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index c6b5c839..539a00f3 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -24,8 +24,6 @@ jobs: - run: make unit_tests - run: make integration_tests timeout-minutes: 15 - env: - ROUTER_MONGO_URL: 127.0.0.1 golangci-lint: runs-on: ubuntu-latest steps: @@ -35,4 +33,4 @@ jobs: - uses: actions/setup-go@v5 with: go-version-file: go.mod - - uses: golangci/golangci-lint-action@971e284b6050e8a5849b72094c50ab08da042db8 # v6.1.1 + - uses: golangci/golangci-lint-action@971e284b6050e8a5849b72094c50ab08da042db8 # v6.1.1 diff --git a/Makefile b/Makefile index fc393719..35599e11 100644 --- a/Makefile +++ b/Makefile @@ -1,4 +1,4 @@ -.PHONY: all clean build test lint unit_tests integration_tests start_mongo stop_mongo update_deps +.PHONY: all clean build test lint unit_tests integration_tests update_deps .NOTPARALLEL: TARGET_MODULE := router @@ -27,15 +27,8 @@ lint: unit_tests: go test -race $$(go list ./... | grep -v integration_tests) -integration_tests: build start_mongo - go test -race -v ./integration_tests +integration_tests: build go test -race -v ./cs_integration_tests -start_mongo: - ./mongo.sh start - -stop_mongo: - ./mongo.sh stop - update_deps: go get -t -u ./... && go mod tidy && go mod vendor diff --git a/cs_integration_tests/router_support.go b/cs_integration_tests/router_support.go index 4cffe2df..b19f9adb 100644 --- a/cs_integration_tests/router_support.go +++ b/cs_integration_tests/router_support.go @@ -90,11 +90,9 @@ func startRouter(port, apiPort int, extraEnv []string) error { } cmd := exec.Command(bin) - cmd.Env = append(cmd.Environ(), "ROUTER_MONGO_DB=router_test") cmd.Env = append(cmd.Env, fmt.Sprintf("ROUTER_PUBADDR=%s", pubAddr)) cmd.Env = append(cmd.Env, fmt.Sprintf("ROUTER_APIADDR=%s", apiAddr)) cmd.Env = append(cmd.Env, fmt.Sprintf("ROUTER_ERROR_LOG=%s", tempLogfile.Name())) - cmd.Env = append(cmd.Env, "CSMUX_SAMPLE_RATE=1.0") cmd.Env = append(cmd.Env, "CONTENT_STORE_DATABASE_URL="+postgresContainer.MustConnectionString(context.Background())) cmd.Env = append(cmd.Env, extraEnv...) diff --git a/docs/data-structure.md b/docs/data-structure.md index 3ba5619d..7fb7cada 100644 --- a/docs/data-structure.md +++ b/docs/data-structure.md @@ -1,17 +1,14 @@ # Data structure -The Router requires two MongoDB collections: `routes` and `backends`. - ## Routes -The `routes` collection uses the following data structure: +The `routes` uses the following data structure: ```json { - "_id" : ObjectId(), - "route_type" : ["prefix","exact"], - "incoming_path" : "/url-path/here", - "handler" : ["backend", "redirect", "gone"], + "route_type": ["prefix", "exact"], + "incoming_path": "/url-path/here", + "handler": ["backend", "redirect", "gone"] } ``` @@ -28,7 +25,7 @@ The `backend` handler causes the Router to reverse proxy to a named ```json { - "backend_id" : "backend-id-corresponding-to-backends-collection" + "backend_id": "backend-id-corresponding-to-backends-collection" } ``` @@ -40,22 +37,10 @@ extra fields are supported: ```json { - "redirect_to" : "/target-of-redirect" + "redirect_to": "/target-of-redirect" } ``` ### `gone` handler The `gone` handler causes the Router to return a 410 response. - -## Backends - -The `backends` collection uses the following data structure: - -```json -{ - "_id" : ObjectId(), - "backend_id" : "arbitrary-slug-or-name", - "backend_url" : "https://example.com:port/" -} -``` diff --git a/go.mod b/go.mod index 99887565..fa49c311 100644 --- a/go.mod +++ b/go.mod @@ -4,7 +4,6 @@ go 1.22.5 require ( github.com/getsentry/sentry-go v0.30.0 - github.com/globalsign/mgo v0.0.0-20181015135952-eeefdecb41b8 github.com/jackc/pgx/v5 v5.7.1 github.com/onsi/ginkgo/v2 v2.22.0 github.com/onsi/gomega v1.36.0 diff --git a/go.sum b/go.sum index 002fda38..0eab39b5 100644 --- a/go.sum +++ b/go.sum @@ -39,8 +39,6 @@ github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2 github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/getsentry/sentry-go v0.30.0 h1:lWUwDnY7sKHaVIoZ9wYqRHJ5iEmoc0pqcRqFkosKzBo= github.com/getsentry/sentry-go v0.30.0/go.mod h1:WU9B9/1/sHDqeV8T+3VwwbjeR5MSXs/6aqG3mqZrezA= -github.com/globalsign/mgo v0.0.0-20181015135952-eeefdecb41b8 h1:DujepqpGd1hyOd7aW59XpK7Qymp8iy83xq74fLr21is= -github.com/globalsign/mgo v0.0.0-20181015135952-eeefdecb41b8/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q= github.com/go-errors/errors v1.4.2 h1:J6MZopCL4uSllY1OfXM374weqZFFItUbrImctkmUxIA= github.com/go-errors/errors v1.4.2/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= diff --git a/integration_tests/backend_helpers.go b/integration_tests/backend_helpers.go deleted file mode 100644 index 07b2611d..00000000 --- a/integration_tests/backend_helpers.go +++ /dev/null @@ -1,99 +0,0 @@ -package integration - -import ( - "net" - "net/http" - "net/http/httptest" - "strconv" - "time" - - // revive:disable:dot-imports - . "github.com/onsi/gomega" - // revive:enable:dot-imports - "github.com/onsi/gomega/ghttp" -) - -var backends = map[string]string{ - "backend-1": "127.0.0.1:6789", - "backend-2": "127.0.0.1:6790", - "outer": "127.0.0.1:6792", - "inner": "127.0.0.1:6793", - "innerer": "127.0.0.1:6794", - "root": "127.0.0.1:6795", - "other": "127.0.0.1:6796", - "fallthrough": "127.0.0.1:6797", - "down": "127.0.0.1:6798", - "slow-1": "127.0.0.1:6799", - "slow-2": "127.0.0.1:6800", - "backend": "127.0.0.1:6801", - "be": "127.0.0.1:6802", - "not-running": "127.0.0.1:6803", - "with-path": "127.0.0.1:6804", -} - -func startSimpleBackend(identifier, host string) *httptest.Server { - l, err := net.Listen("tcp", host) - Expect(err).NotTo(HaveOccurred()) - - ts := httptest.NewUnstartedServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - _, err := w.Write([]byte(identifier)) - Expect(err).NotTo(HaveOccurred()) - })) - ts.Listener.Close() - ts.Listener = l - ts.Start() - return ts -} - -func startTarpitBackend(host string, delays ...time.Duration) *httptest.Server { - responseDelay := 2 * time.Second - if len(delays) > 0 { - responseDelay = delays[0] - } - bodyDelay := 0 * time.Second - if len(delays) > 1 { - bodyDelay = delays[1] - } - - l, err := net.Listen("tcp", host) - Expect(err).NotTo(HaveOccurred()) - - ts := httptest.NewUnstartedServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - body := "Tarpit\n" - - if responseDelay > 0 { - time.Sleep(responseDelay) - } - w.Header().Add("Content-Length", strconv.Itoa(len(body))) - w.WriteHeader(http.StatusOK) - w.(http.Flusher).Flush() - - if bodyDelay > 0 { - time.Sleep(bodyDelay) - } - _, err := w.Write([]byte(body)) - Expect(err).NotTo(HaveOccurred()) - })) - ts.Listener.Close() - ts.Listener = l - ts.Start() - return ts -} - -func startRecordingBackend(tls bool, host string) *ghttp.Server { - l, err := net.Listen("tcp", host) - Expect(err).NotTo(HaveOccurred()) - - ts := ghttp.NewUnstartedServer() - ts.HTTPTestServer.Listener.Close() - ts.HTTPTestServer.Listener = l - if tls { - ts.HTTPTestServer.StartTLS() - } else { - ts.Start() - } - - ts.AllowUnhandledRequests = true - ts.UnhandledRequestStatusCode = http.StatusOK - return ts -} diff --git a/integration_tests/error_handling_test.go b/integration_tests/error_handling_test.go deleted file mode 100644 index afe7f0eb..00000000 --- a/integration_tests/error_handling_test.go +++ /dev/null @@ -1,50 +0,0 @@ -package integration - -import ( - "time" - - . "github.com/onsi/ginkgo/v2" - . "github.com/onsi/gomega" -) - -var _ = Describe("error handling", func() { - - Describe("handling an empty routing table", func() { - BeforeEach(func() { - reloadRoutes(apiPort) - }) - - It("should return a 503 error to the client", func() { - resp := routerRequest(routerPort, "/") - Expect(resp.StatusCode).To(Equal(503)) - - resp = routerRequest(routerPort, "/foo") - Expect(resp.StatusCode).To(Equal(503)) - }) - }) - - Describe("handling a panic", func() { - BeforeEach(func() { - addRoute("/boom", Route{Handler: "boom"}) - reloadRoutes(apiPort) - }) - - It("should return a 500 error to the client", func() { - resp := routerRequest(routerPort, "/boom") - Expect(resp.StatusCode).To(Equal(500)) - }) - - It("should log the fact", func() { - routerRequest(routerPort, "/boom") - - logDetails := lastRouterErrorLogEntry() - Expect(logDetails.Fields).To(Equal(map[string]interface{}{ - "error": "panic: Boom!!!", - "request": "GET /boom HTTP/1.1", - "request_method": "GET", - "status": float64(500), // All numbers in JSON are floating point - })) - Expect(logDetails.Timestamp).To(BeTemporally("~", time.Now(), time.Second)) - }) - }) -}) diff --git a/integration_tests/gone_test.go b/integration_tests/gone_test.go deleted file mode 100644 index 990606cd..00000000 --- a/integration_tests/gone_test.go +++ /dev/null @@ -1,35 +0,0 @@ -package integration - -import ( - . "github.com/onsi/ginkgo/v2" - . "github.com/onsi/gomega" -) - -var _ = Describe("Gone routes", func() { - - BeforeEach(func() { - addRoute("/foo", NewGoneRoute()) - addRoute("/bar", NewGoneRoute("prefix")) - reloadRoutes(apiPort) - }) - - It("should support an exact gone route", func() { - resp := routerRequest(routerPort, "/foo") - Expect(resp.StatusCode).To(Equal(410)) - Expect(readBody(resp)).To(Equal("410 Gone\n")) - - resp = routerRequest(routerPort, "/foo/bar") - Expect(resp.StatusCode).To(Equal(404)) - Expect(readBody(resp)).To(Equal("404 page not found\n")) - }) - - It("should support a prefix gone route", func() { - resp := routerRequest(routerPort, "/bar") - Expect(resp.StatusCode).To(Equal(410)) - Expect(readBody(resp)).To(Equal("410 Gone\n")) - - resp = routerRequest(routerPort, "/bar/baz") - Expect(resp.StatusCode).To(Equal(410)) - Expect(readBody(resp)).To(Equal("410 Gone\n")) - }) -}) diff --git a/integration_tests/http_request_helpers.go b/integration_tests/http_request_helpers.go deleted file mode 100644 index 9822f2fa..00000000 --- a/integration_tests/http_request_helpers.go +++ /dev/null @@ -1,83 +0,0 @@ -package integration - -import ( - "bufio" - "context" - "encoding/json" - "fmt" - "io" - "net" - "net/http" - "net/textproto" - - // revive:disable:dot-imports - . "github.com/onsi/gomega" - // revive:enable:dot-imports -) - -func routerRequest(port int, path string) *http.Response { - return doRequest(newRequest("GET", routerURL(port, path))) -} - -func routerRequestWithHeaders(port int, path string, headers map[string]string) *http.Response { - return doRequest(newRequestWithHeaders("GET", routerURL(port, path), headers)) -} - -func newRequest(method, url string) *http.Request { - req, err := http.NewRequestWithContext(context.Background(), method, url, nil) - Expect(err).NotTo(HaveOccurred()) - return req -} - -func newRequestWithHeaders(method, url string, headers map[string]string) *http.Request { - req := newRequest(method, url) - for k, v := range headers { - req.Header.Set(k, v) - } - return req -} - -func doRequest(req *http.Request) *http.Response { - if _, ok := req.Header[textproto.CanonicalMIMEHeaderKey("User-Agent")]; !ok { - // Setting a blank User-Agent causes the http lib not to output one, whereas if there - // is no header, it will output a default one. - // See: https://github.com/golang/go/blob/release-branch.go1.5/src/net/http/request.go#L419 - req.Header.Set("User-Agent", "") - } - resp, err := http.DefaultTransport.RoundTrip(req) - Expect(err).NotTo(HaveOccurred()) - return resp -} - -func doHTTP10Request(req *http.Request) *http.Response { - conn, err := net.Dial("tcp", req.URL.Host) - Expect(err).NotTo(HaveOccurred()) - defer conn.Close() - - if req.Method == "" { - req.Method = "GET" - } - req.Proto = "HTTP/1.0" - req.ProtoMinor = 0 - fmt.Fprintf(conn, "%s %s %s\r\n", req.Method, req.URL.RequestURI(), req.Proto) - err = req.Header.Write(conn) - Expect(err).NotTo(HaveOccurred()) - fmt.Fprintf(conn, "\r\n") - - resp, err := http.ReadResponse(bufio.NewReader(conn), req) - Expect(err).NotTo(HaveOccurred()) - return resp -} - -func readBody(resp *http.Response) string { - bytes, err := io.ReadAll(resp.Body) - Expect(err).NotTo(HaveOccurred()) - return string(bytes) -} - -func readJSONBody(resp *http.Response, data interface{}) { - bytes, err := io.ReadAll(resp.Body) - Expect(err).NotTo(HaveOccurred()) - err = json.Unmarshal(bytes, data) - Expect(err).NotTo(HaveOccurred()) -} diff --git a/integration_tests/integration_test.go b/integration_tests/integration_test.go deleted file mode 100644 index e716ac91..00000000 --- a/integration_tests/integration_test.go +++ /dev/null @@ -1,47 +0,0 @@ -package integration - -import ( - "runtime" - "testing" - - . "github.com/onsi/ginkgo/v2" - . "github.com/onsi/gomega" -) - -func TestEverything(t *testing.T) { - RegisterFailHandler(Fail) - RunSpecs(t, "Integration test suite") -} - -var _ = BeforeSuite(func() { - runtime.GOMAXPROCS(runtime.NumCPU()) - var err error - err = setupTempLogfile() - if err != nil { - Fail(err.Error()) - } - - backendEnvVars := []string{} - for id, host := range backends { - envVar := "BACKEND_URL_" + id + "=http://" + host - backendEnvVars = append(backendEnvVars, envVar) - } - - err = startRouter(routerPort, apiPort, backendEnvVars) - if err != nil { - Fail(err.Error()) - } - err = initRouteHelper() - if err != nil { - Fail(err.Error()) - } -}) - -var _ = BeforeEach(func() { - resetTempLogfile() -}) - -var _ = AfterSuite(func() { - stopRouter(routerPort) - cleanupTempLogfile() -}) diff --git a/integration_tests/metrics_test.go b/integration_tests/metrics_test.go deleted file mode 100644 index ba4812e9..00000000 --- a/integration_tests/metrics_test.go +++ /dev/null @@ -1,22 +0,0 @@ -package integration - -import ( - . "github.com/onsi/ginkgo/v2" - . "github.com/onsi/gomega" -) - -var _ = Describe("/metrics API endpoint", func() { - Context("response body", func() { - var responseBody string - - BeforeEach(func() { - resp := doRequest(newRequest("GET", routerURL(apiPort, "/metrics"))) - Expect(resp.StatusCode).To(Equal(200)) - responseBody = readBody(resp) - }) - - It("should contain at least one metric", func() { - Expect(responseBody).To(ContainSubstring("router_")) - }) - }) -}) diff --git a/integration_tests/performance_test.go b/integration_tests/performance_test.go deleted file mode 100644 index 697cb841..00000000 --- a/integration_tests/performance_test.go +++ /dev/null @@ -1,154 +0,0 @@ -package integration - -import ( - "net/http/httptest" - "time" - - . "github.com/onsi/ginkgo/v2" - . "github.com/onsi/gomega" - vegeta "github.com/tsenart/vegeta/v12/lib" -) - -const routerLatencyThreshold = 20 * time.Millisecond - -var _ = Describe("Performance", func() { - - Context("with two healthy backends", func() { - var ( - backend1 *httptest.Server - backend2 *httptest.Server - ) - - BeforeEach(func() { - backend1 = startSimpleBackend("backend 1", backends["backend-1"]) - backend2 = startSimpleBackend("backend 2", backends["backend-2"]) - addRoute("/one", NewBackendRoute("backend-1")) - addRoute("/two", NewBackendRoute("backend-2")) - reloadRoutes(apiPort) - }) - AfterEach(func() { - backend1.Close() - backend2.Close() - }) - - It("Router should not cause errors or much latency", func() { - assertPerformantRouter(backend1, backend2, 100) - }) - - Describe("when the routes are being reloaded repeatedly", func() { - It("Router should not cause errors or much latency", func() { - stopCh := make(chan struct{}) - defer close(stopCh) - go func() { - ticker := time.NewTicker(100 * time.Millisecond) - defer ticker.Stop() - select { - case <-stopCh: - return - case <-ticker.C: - reloadRoutes(apiPort) - } - }() - - assertPerformantRouter(backend1, backend2, 100) - }) - }) - - Describe("with one slow backend hit separately", func() { - It("Router should not cause errors or much latency", func() { - slowBackend := startTarpitBackend(backends["slow-1"], time.Second) - defer slowBackend.Close() - addRoute("/slow", NewBackendRoute("slow-1")) - reloadRoutes(apiPort) - - _, gen := generateLoad([]string{routerURL(routerPort, "/slow")}, 50) - defer gen.Stop() - - assertPerformantRouter(backend1, backend2, 50) - }) - }) - - Describe("with one downed backend hit separately", func() { - It("Router should not cause errors or much latency", func() { - addRoute("/down", NewBackendRoute("down")) - reloadRoutes(apiPort) - - _, gen := generateLoad([]string{routerURL(routerPort, "/down")}, 50) - defer gen.Stop() - - assertPerformantRouter(backend1, backend2, 50) - }) - }) - - Describe("with high request throughput", func() { - It("Router should not cause errors or much latency", func() { - assertPerformantRouter(backend1, backend2, 500) - }) - }) - }) - - Describe("many concurrent slow connections", func() { - var backend1 *httptest.Server - var backend2 *httptest.Server - - BeforeEach(func() { - backend1 = startTarpitBackend(backends["slow-1"], time.Second) - backend2 = startTarpitBackend(backends["slow-2"], time.Second) - addRoute("/one", NewBackendRoute("slow-1")) - addRoute("/two", NewBackendRoute("slow-2")) - reloadRoutes(apiPort) - }) - AfterEach(func() { - backend1.Close() - backend2.Close() - }) - - It("Router should not cause errors or much latency", func() { - assertPerformantRouter(backend1, backend2, 500) - }) - }) -}) - -func assertPerformantRouter(backend1, backend2 *httptest.Server, rps int) { - directResultsCh, _ := generateLoad([]string{backend1.URL + "/one", backend2.URL + "/two"}, rps) - routerResultsCh, _ := generateLoad([]string{routerURL(routerPort, "/one"), routerURL(routerPort, "/two")}, rps) - - directResults := <-directResultsCh - routerResults := <-routerResultsCh - - Expect(routerResults.Requests).To(Equal(directResults.Requests)) - Expect(routerResults.Success).To(BeNumerically("~", 1.0)) - Expect(directResults.Success).To(BeNumerically("~", 1.0)) - - Expect(routerResults.Latencies.Mean).To(BeNumerically("~", directResults.Latencies.Mean, routerLatencyThreshold)) - Expect(routerResults.Latencies.P95).To(BeNumerically("~", directResults.Latencies.P95, routerLatencyThreshold)) - Expect(routerResults.Latencies.P99).To(BeNumerically("~", directResults.Latencies.P99, routerLatencyThreshold*2)) - Expect(routerResults.Latencies.Max).To(BeNumerically("~", directResults.Latencies.Max, routerLatencyThreshold*2)) -} - -func generateLoad(targetURLs []string, rps int) (chan *vegeta.Metrics, *vegeta.Attacker) { - targets := make([]vegeta.Target, 0, len(targetURLs)) - for _, url := range targetURLs { - targets = append(targets, vegeta.Target{ - Method: "GET", - URL: url, - }) - } - targeter := vegeta.NewStaticTargeter(targets...) - metrics := make(chan *vegeta.Metrics, 1) - veg := vegeta.NewAttacker() - go vegetaAttack(veg, targeter, rps, metrics) - return metrics, veg -} - -func vegetaAttack(veg *vegeta.Attacker, targets vegeta.Targeter, rps int, metrics chan *vegeta.Metrics) { - pace := vegeta.Pacer(vegeta.ConstantPacer{Freq: rps, Per: time.Second}) - - var m vegeta.Metrics - for res := range veg.Attack(targets, pace, 10*time.Second, "load") { - m.Add(res) - } - m.Close() - - metrics <- &m -} diff --git a/integration_tests/proxy_function_test.go b/integration_tests/proxy_function_test.go deleted file mode 100644 index 127d0321..00000000 --- a/integration_tests/proxy_function_test.go +++ /dev/null @@ -1,382 +0,0 @@ -package integration - -import ( - "io" - "net/http" - "net/http/httptest" - "net/textproto" - "net/url" - "strings" - "time" - - . "github.com/onsi/ginkgo/v2" - . "github.com/onsi/gomega" - "github.com/onsi/gomega/ghttp" -) - -var _ = Describe("Functioning as a reverse proxy", func() { - var recorder *ghttp.Server - - Describe("connecting to the backend", func() { - It("should return a 502 if the connection to the backend is refused", func() { - addRoute("/not-running", NewBackendRoute("not-running")) - reloadRoutes(apiPort) - - req, err := http.NewRequest(http.MethodGet, routerURL(routerPort, "/not-running"), nil) - Expect(err).NotTo(HaveOccurred()) - - resp := doRequest(req) - Expect(resp.StatusCode).To(Equal(502)) - - logDetails := lastRouterErrorLogEntry() - Expect(logDetails.Fields).To(Equal(map[string]interface{}{ - "error": "dial tcp 127.0.0.1:6803: connect: connection refused", - "request": "GET /not-running HTTP/1.1", - "request_method": "GET", - "status": float64(502), // All numbers in JSON are floating point - "upstream_addr": "127.0.0.1:6803", - })) - Expect(logDetails.Timestamp).To(BeTemporally("~", time.Now(), time.Second)) - }) - - It("should log and return a 504 if the connection times out in the configured time", func() { - err := startRouter(3167, 3166, []string{"ROUTER_BACKEND_CONNECT_TIMEOUT=0.3s", "BACKEND_URL_black-hole=http://240.0.0.0:1234/"}) - Expect(err).NotTo(HaveOccurred()) - defer stopRouter(3167) - - addRoute("/should-time-out", NewBackendRoute("black-hole")) - reloadRoutes(3166) - - req, err := http.NewRequest(http.MethodGet, routerURL(3167, "/should-time-out"), nil) - Expect(err).NotTo(HaveOccurred()) - - start := time.Now() - resp := doRequest(req) - duration := time.Since(start) - - Expect(resp.StatusCode).To(Equal(504)) - Expect(duration).To(BeNumerically("~", 320*time.Millisecond, 20*time.Millisecond)) // 300 - 340 ms - - logDetails := lastRouterErrorLogEntry() - Expect(logDetails.Fields).To(Equal(map[string]interface{}{ - "error": "dial tcp 240.0.0.0:1234: i/o timeout", - "request": "GET /should-time-out HTTP/1.1", - "request_method": "GET", - "status": float64(504), // All numbers in JSON are floating point - "upstream_addr": "240.0.0.0:1234", - })) - Expect(logDetails.Timestamp).To(BeTemporally("~", time.Now(), time.Second)) - }) - - Describe("response header timeout", func() { - var tarpit1, tarpit2 *httptest.Server - - BeforeEach(func() { - err := startRouter(3167, 3166, []string{"ROUTER_BACKEND_HEADER_TIMEOUT=0.3s", "BACKEND_URL_slow-1=http://127.0.0.1:6256/", "BACKEND_URL_slow-2=http://127.0.0.1:6253/"}) - Expect(err).NotTo(HaveOccurred()) - tarpit1 = startTarpitBackend("127.0.0.1:6256", time.Second) - tarpit2 = startTarpitBackend("127.0.0.1:6253", 100*time.Millisecond, 500*time.Millisecond) - addRoute("/tarpit1", NewBackendRoute("slow-1")) - addRoute("/tarpit2", NewBackendRoute("slow-2")) - reloadRoutes(3166) - }) - - AfterEach(func() { - tarpit1.Close() - tarpit2.Close() - stopRouter(3167) - }) - - It("should log and return a 504 if a backend takes longer than the configured response timeout to start returning a response", func() { - req := newRequest(http.MethodGet, routerURL(3167, "/tarpit1")) - resp := doRequest(req) - Expect(resp.StatusCode).To(Equal(504)) - - logDetails := lastRouterErrorLogEntry() - tarpitURL, _ := url.Parse(tarpit1.URL) - Expect(logDetails.Fields).To(Equal(map[string]interface{}{ - "error": "net/http: timeout awaiting response headers", - "request": "GET /tarpit1 HTTP/1.1", - "request_method": "GET", - "status": float64(504), // All numbers in JSON are floating point - "upstream_addr": tarpitURL.Host, - })) - Expect(logDetails.Timestamp).To(BeTemporally("~", time.Now(), time.Second)) - }) - - It("should still return the response if the body takes longer than the header timeout", func() { - resp := routerRequest(3167, "/tarpit2") - Expect(resp.StatusCode).To(Equal(200)) - Expect(readBody(resp)).To(Equal("Tarpit\n")) - }) - }) - }) - - Describe("header handling", func() { - BeforeEach(func() { - recorder = startRecordingBackend(false, backends["backend"]) - addRoute("/foo", NewBackendRoute("backend", "prefix")) - reloadRoutes(apiPort) - }) - - AfterEach(func() { - recorder.Close() - }) - - It("should pass through most http headers to the backend", func() { - resp := routerRequestWithHeaders(routerPort, "/foo", map[string]string{ - "Foo": "bar", - "User-Agent": "Router test suite 2.7182", - }) - Expect(resp.StatusCode).To(Equal(200)) - - Expect(recorder.ReceivedRequests()).To(HaveLen(1)) - beReq := recorder.ReceivedRequests()[0] - Expect(beReq.Header.Get("Foo")).To(Equal("bar")) - Expect(beReq.Header.Get("User-Agent")).To(Equal("Router test suite 2.7182")) - }) - - It("should set the Host header to the backend hostname", func() { - resp := routerRequestWithHeaders(routerPort, "/foo", map[string]string{ - "Host": "www.example.com", - }) - Expect(resp.StatusCode).To(Equal(200)) - - recorderURL, err := url.Parse(recorder.URL()) - Expect(err).NotTo(HaveOccurred()) - - Expect(recorder.ReceivedRequests()).To(HaveLen(1)) - beReq := recorder.ReceivedRequests()[0] - Expect(beReq.Host).To(Equal(recorderURL.Host)) - }) - - It("should not add a default User-Agent if there isn't one in the request", func() { - // Most http libraries add a default User-Agent header. - resp := routerRequest(routerPort, "/foo") - Expect(resp.StatusCode).To(Equal(200)) - - Expect(recorder.ReceivedRequests()).To(HaveLen(1)) - beReq := recorder.ReceivedRequests()[0] - _, ok := beReq.Header[textproto.CanonicalMIMEHeaderKey("User-Agent")] - Expect(ok).To(BeFalse()) - }) - - It("should add the client IP to X-Forwarded-For", func() { - resp := routerRequest(routerPort, "/foo") - Expect(resp.StatusCode).To(Equal(200)) - - Expect(recorder.ReceivedRequests()).To(HaveLen(1)) - beReq := recorder.ReceivedRequests()[0] - Expect(beReq.Header.Get("X-Forwarded-For")).To(Equal("127.0.0.1")) - - resp = routerRequestWithHeaders(routerPort, "/foo", map[string]string{ - "X-Forwarded-For": "10.9.8.7", - }) - Expect(resp.StatusCode).To(Equal(200)) - - Expect(recorder.ReceivedRequests()).To(HaveLen(2)) - beReq = recorder.ReceivedRequests()[1] - Expect(beReq.Header.Get("X-Forwarded-For")).To(Equal("10.9.8.7, 127.0.0.1")) - }) - - Describe("setting the Via header", func() { - // See https://tools.ietf.org/html/rfc2616#section-14.45 - - It("should add itself to the Via request header for an HTTP/1.1 request", func() { - resp := routerRequest(routerPort, "/foo") - Expect(resp.StatusCode).To(Equal(200)) - - Expect(recorder.ReceivedRequests()).To(HaveLen(1)) - beReq := recorder.ReceivedRequests()[0] - Expect(beReq.Header.Get("Via")).To(Equal("1.1 router")) - - resp = routerRequestWithHeaders(routerPort, "/foo", map[string]string{ - "Via": "1.0 fred, 1.1 barney", - }) - Expect(resp.StatusCode).To(Equal(200)) - - Expect(recorder.ReceivedRequests()).To(HaveLen(2)) - beReq = recorder.ReceivedRequests()[1] - Expect(beReq.Header.Get("Via")).To(Equal("1.0 fred, 1.1 barney, 1.1 router")) - }) - - It("should add itself to the Via request header for an HTTP/1.0 request", func() { - req := newRequest(http.MethodGet, routerURL(routerPort, "/foo")) - resp := doHTTP10Request(req) - Expect(resp.StatusCode).To(Equal(200)) - - Expect(recorder.ReceivedRequests()).To(HaveLen(1)) - beReq := recorder.ReceivedRequests()[0] - Expect(beReq.Header.Get("Via")).To(Equal("1.0 router")) - - req = newRequestWithHeaders("GET", routerURL(routerPort, "/foo"), map[string]string{ - "Via": "1.0 fred, 1.1 barney", - }) - resp = doHTTP10Request(req) - Expect(resp.StatusCode).To(Equal(200)) - - Expect(recorder.ReceivedRequests()).To(HaveLen(2)) - beReq = recorder.ReceivedRequests()[1] - Expect(beReq.Header.Get("Via")).To(Equal("1.0 fred, 1.1 barney, 1.0 router")) - }) - - It("should add itself to the Via response heaver", func() { - resp := routerRequest(routerPort, "/foo") - Expect(resp.StatusCode).To(Equal(200)) - Expect(resp.Header.Get("Via")).To(Equal("1.1 router")) - - recorder.AppendHandlers(ghttp.RespondWith(200, "body", http.Header{ - "Via": []string{"1.0 fred, 1.1 barney"}, - })) - resp = routerRequest(routerPort, "/foo") - Expect(resp.StatusCode).To(Equal(200)) - Expect(resp.Header.Get("Via")).To(Equal("1.0 fred, 1.1 barney, 1.1 router")) - }) - }) - }) - - Describe("request verb, path, query and body handling", func() { - BeforeEach(func() { - recorder = startRecordingBackend(false, backends["backend"]) - addRoute("/foo", NewBackendRoute("backend", "prefix")) - reloadRoutes(apiPort) - }) - - AfterEach(func() { - recorder.Close() - }) - - It("should use the same verb and path when proxying", func() { - recorder.AppendHandlers( - ghttp.VerifyRequest("POST", "/foo"), - ghttp.VerifyRequest("DELETE", "/foo/bar/baz.json"), - ) - - req := newRequest("POST", routerURL(routerPort, "/foo")) - resp := doRequest(req) - Expect(resp.StatusCode).To(Equal(200)) - - req = newRequest("DELETE", routerURL(routerPort, "/foo/bar/baz.json")) - resp = doRequest(req) - Expect(resp.StatusCode).To(Equal(200)) - - Expect(recorder.ReceivedRequests()).To(HaveLen(2)) - }) - - It("should pass through the query string unmodified", func() { - recorder.AppendHandlers( - ghttp.VerifyRequest("GET", "/foo/bar", "baz=qux"), - ) - resp := routerRequest(routerPort, "/foo/bar?baz=qux") - Expect(resp.StatusCode).To(Equal(200)) - - Expect(recorder.ReceivedRequests()).To(HaveLen(1)) - }) - - It("should pass through the body unmodified", func() { - recorder.AppendHandlers(func(w http.ResponseWriter, req *http.Request) { - body, err := io.ReadAll(req.Body) - req.Body.Close() - Expect(err).NotTo(HaveOccurred()) - Expect(string(body)).To(Equal("I am the request body. Woohoo!")) - }) - - req := newRequest("POST", routerURL(routerPort, "/foo")) - req.Body = io.NopCloser(strings.NewReader("I am the request body. Woohoo!")) - resp := doRequest(req) - Expect(resp.StatusCode).To(Equal(200)) - - Expect(recorder.ReceivedRequests()).To(HaveLen(1)) - }) - }) - - Describe("handling a backend with a non '/' path", func() { - BeforeEach(func() { - err := startRouter(3167, 3166, []string{"ROUTER_TLS_SKIP_VERIFY=1", "BACKEND_URL_with-path=http://127.0.0.1:6804/something"}) - Expect(err).NotTo(HaveOccurred()) - recorder = startRecordingBackend(false, backends["with-path"]) - addRoute("/foo/bar", NewBackendRoute("with-path", "prefix")) - reloadRoutes(3166) - }) - - AfterEach(func() { - recorder.Close() - stopRouter(3167) - }) - - It("should merge the 2 paths", func() { - resp := routerRequest(3167, "/foo/bar") - Expect(resp.StatusCode).To(Equal(200)) - - Expect(recorder.ReceivedRequests()).To(HaveLen(1)) - beReq := recorder.ReceivedRequests()[0] - Expect(beReq.URL.RequestURI()).To(Equal("/something/foo/bar")) - }) - - It("should preserve the request query string", func() { - resp := routerRequest(3167, "/foo/bar?baz=qux") - Expect(resp.StatusCode).To(Equal(200)) - - Expect(recorder.ReceivedRequests()).To(HaveLen(1)) - beReq := recorder.ReceivedRequests()[0] - Expect(beReq.URL.RequestURI()).To(Equal("/something/foo/bar?baz=qux")) - }) - }) - - Describe("handling HTTP/1.0 requests", func() { - BeforeEach(func() { - recorder = startRecordingBackend(false, backends["backend"]) - addRoute("/foo", NewBackendRoute("backend", "prefix")) - reloadRoutes(apiPort) - }) - - AfterEach(func() { - recorder.Close() - }) - - It("should work with incoming HTTP/1.1 requests", func() { - req := newRequest("GET", routerURL(routerPort, "/foo")) - resp := doHTTP10Request(req) - Expect(resp.StatusCode).To(Equal(200)) - - Expect(recorder.ReceivedRequests()).To(HaveLen(1)) - beReq := recorder.ReceivedRequests()[0] - Expect(beReq.URL.RequestURI()).To(Equal("/foo")) - }) - - It("should proxy to the backend as HTTP/1.1 requests", func() { - req := newRequest("GET", routerURL(routerPort, "/foo")) - resp := doHTTP10Request(req) - Expect(resp.StatusCode).To(Equal(200)) - - Expect(recorder.ReceivedRequests()).To(HaveLen(1)) - beReq := recorder.ReceivedRequests()[0] - Expect(beReq.Proto).To(Equal("HTTP/1.1")) - }) - }) - - Describe("handling requests to a HTTPS backend", func() { - BeforeEach(func() { - err := startRouter(3167, 3166, []string{"ROUTER_TLS_SKIP_VERIFY=1", "BACKEND_URL_backend=https://127.0.0.1:2486"}) - Expect(err).NotTo(HaveOccurred()) - recorder = startRecordingBackend(true, "127.0.0.1:2486") - addRoute("/foo", NewBackendRoute("backend", "prefix")) - reloadRoutes(3166) - }) - - AfterEach(func() { - recorder.Close() - stopRouter(3167) - }) - - It("should correctly reverse proxy to a HTTPS backend", func() { - req := newRequest("GET", routerURL(3167, "/foo")) - resp := doRequest(req) - Expect(resp.StatusCode).To(Equal(200)) - - Expect(recorder.ReceivedRequests()).To(HaveLen(1)) - beReq := recorder.ReceivedRequests()[0] - Expect(beReq.URL.RequestURI()).To(Equal("/foo")) - }) - }) -}) diff --git a/integration_tests/redirect_test.go b/integration_tests/redirect_test.go deleted file mode 100644 index 14089021..00000000 --- a/integration_tests/redirect_test.go +++ /dev/null @@ -1,263 +0,0 @@ -package integration - -import ( - "time" - - . "github.com/onsi/ginkgo/v2" - . "github.com/onsi/gomega" - "github.com/onsi/gomega/ghttp" -) - -var _ = Describe("Redirection", func() { - - Describe("exact redirects", func() { - BeforeEach(func() { - addRoute("/foo", NewRedirectRoute("/bar")) - addRoute("/foo-temp", NewRedirectRoute("/bar", "exact")) - addRoute("/query-temp", NewRedirectRoute("/bar?query=true", "exact")) - addRoute("/fragment", NewRedirectRoute("/bar#section", "exact")) - addRoute("/preserve-query", NewRedirectRoute("/qux", "exact", "preserve")) - reloadRoutes(apiPort) - }) - - It("should redirect", func() { - resp := routerRequest(routerPort, "/foo") - Expect(resp.StatusCode).To(Equal(301)) - }) - - It("should contain the redirect location", func() { - resp := routerRequest(routerPort, "/foo") - Expect(resp.Header.Get("Location")).To(Equal("/bar")) - }) - - It("should not preserve the query string for the source by default", func() { - resp := routerRequest(routerPort, "/foo?baz=qux") - Expect(resp.Header.Get("Location")).To(Equal("/bar")) - }) - - It("should preserve the query string for the source if specified", func() { - resp := routerRequest(routerPort, "/preserve-query?foo=bar") - Expect(resp.Header.Get("Location")).To(Equal("/qux?foo=bar")) - }) - - It("should preserve the query string for the target", func() { - resp := routerRequest(routerPort, "/query-temp") - Expect(resp.Header.Get("Location")).To(Equal("/bar?query=true")) - }) - - It("should preserve the fragment for the target", func() { - resp := routerRequest(routerPort, "/fragment") - Expect(resp.Header.Get("Location")).To(Equal("/bar#section")) - }) - - It("should contain cache headers of 30 mins", func() { - resp := routerRequest(routerPort, "/foo") - Expect(resp.Header.Get("Cache-Control")).To(Equal("max-age=1800, public")) - - Expect( - time.Parse(time.RFC1123, resp.Header.Get("Expires")), - ).To(BeTemporally( - "~", - time.Now().Add(30*time.Minute), - time.Second, - )) - }) - }) - - Describe("prefix redirects", func() { - BeforeEach(func() { - addRoute("/foo", NewRedirectRoute("/bar", "prefix")) - addRoute("/foo-temp", NewRedirectRoute("/bar-temp", "prefix")) - addRoute("/qux", NewRedirectRoute("/baz", "prefix", "ignore")) - reloadRoutes(apiPort) - }) - - It("should redirect to the destination", func() { - resp := routerRequest(routerPort, "/foo") - Expect(resp.StatusCode).To(Equal(301)) - Expect(resp.Header.Get("Location")).To(Equal("/bar")) - }) - - It("should preserve extra path sections when redirecting by default", func() { - resp := routerRequest(routerPort, "/foo/baz") - Expect(resp.Header.Get("Location")).To(Equal("/bar/baz")) - }) - - It("should ignore extra path sections when redirecting if specified", func() { - resp := routerRequest(routerPort, "/qux/quux") - Expect(resp.Header.Get("Location")).To(Equal("/baz")) - }) - - It("should preserve the query string when redirecting by default", func() { - resp := routerRequest(routerPort, "/foo?baz=qux") - Expect(resp.Header.Get("Location")).To(Equal("/bar?baz=qux")) - }) - - It("should not preserve the query string when redirecting if specified", func() { - resp := routerRequest(routerPort, "/qux/quux?foo=bar") - Expect(resp.Header.Get("Location")).To(Equal("/baz")) - }) - - It("should contain cache headers of 30 mins", func() { - resp := routerRequest(routerPort, "/foo") - Expect(resp.Header.Get("Cache-Control")).To(Equal("max-age=1800, public")) - - Expect( - time.Parse(time.RFC1123, resp.Header.Get("Expires")), - ).To(BeTemporally( - "~", - time.Now().Add(30*time.Minute), - time.Second, - )) - }) - - It("should handle path-preserving redirects with special characters", func() { - addRoute("/foo%20bar", NewRedirectRoute("/bar%20baz", "prefix")) - reloadRoutes(apiPort) - - resp := routerRequest(routerPort, "/foo bar/something") - Expect(resp.StatusCode).To(Equal(301)) - Expect(resp.Header.Get("Location")).To(Equal("/bar%20baz/something")) - }) - }) - - Describe("external redirects", func() { - BeforeEach(func() { - addRoute("/foo", NewRedirectRoute("http://foo.example.com/foo")) - addRoute("/baz", NewRedirectRoute("http://foo.example.com/baz", "exact", "preserve")) - addRoute("/bar", NewRedirectRoute("http://bar.example.com/bar", "prefix")) - addRoute("/qux", NewRedirectRoute("http://bar.example.com/qux", "prefix", "ignore")) - reloadRoutes(apiPort) - }) - - Describe("exact redirect", func() { - It("should redirect to the external URL", func() { - resp := routerRequest(routerPort, "/foo") - Expect(resp.Header.Get("Location")).To(Equal("http://foo.example.com/foo")) - }) - - It("should not preserve the query string by default", func() { - resp := routerRequest(routerPort, "/foo?foo=qux") - Expect(resp.Header.Get("Location")).To(Equal("http://foo.example.com/foo")) - }) - - It("should preserve the query string if specified", func() { - resp := routerRequest(routerPort, "/baz?foo=qux") - Expect(resp.Header.Get("Location")).To(Equal("http://foo.example.com/baz?foo=qux")) - }) - }) - - Describe("prefix redirect", func() { - It("should redirect to the external URL", func() { - resp := routerRequest(routerPort, "/bar") - Expect(resp.Header.Get("Location")).To(Equal("http://bar.example.com/bar")) - }) - - It("should preserve extra path sections when redirecting by default", func() { - resp := routerRequest(routerPort, "/bar/baz") - Expect(resp.Header.Get("Location")).To(Equal("http://bar.example.com/bar/baz")) - }) - - It("should ignore extra path sections when redirecting if specified", func() { - resp := routerRequest(routerPort, "/qux/baz") - Expect(resp.Header.Get("Location")).To(Equal("http://bar.example.com/qux")) - }) - - It("should preserve the query string when redirecting", func() { - resp := routerRequest(routerPort, "/bar?baz=qux") - Expect(resp.Header.Get("Location")).To(Equal("http://bar.example.com/bar?baz=qux")) - }) - }) - }) - - Describe("redirects with a _ga parameter", func() { - BeforeEach(func() { - addRoute("/foo", NewRedirectRoute("https://hmrc.service.gov.uk/pay", "prefix", "ignore")) - addRoute("/bar", NewRedirectRoute("https://bar.service.gov.uk/bar", "exact", "preserve")) - addRoute("/baz", NewRedirectRoute("https://gov.uk/baz-luhrmann", "exact", "ignore")) - addRoute("/pay-tax", NewRedirectRoute("https://tax.service.gov.uk/pay", "exact", "ignore")) - addRoute("/biz-bank", NewRedirectRoute("https://british-business-bank.co.uk", "prefix", "ignore")) - addRoute("/query-paramed", NewRedirectRoute("https://param.servicegov.uk?included-param=true", "exact", "ignore")) - reloadRoutes(apiPort) - }) - - It("should only preserve the _ga parameter when redirecting to service URLs that want to ignore query params", func() { - resp := routerRequest(routerPort, "/foo?_ga=identifier&blah=xyz") - Expect(resp.Header.Get("Location")).To(Equal("https://hmrc.service.gov.uk/pay?_ga=identifier")) - }) - - It("should retain all params when redirecting to a route that wants them", func() { - resp := routerRequest(routerPort, "/bar?wanted=param&_ga=xyz&blah=xyz") - Expect(resp.Header.Get("Location")).To(Equal("https://bar.service.gov.uk/bar?wanted=param&_ga=xyz&blah=xyz")) - }) - - It("should preserve the _ga parameter when redirecting to gov.uk URLs", func() { - resp := routerRequest(routerPort, "/baz?_ga=identifier") - Expect(resp.Header.Get("Location")).To(Equal("https://gov.uk/baz-luhrmann?_ga=identifier")) - }) - - It("should preserve the _ga parameter when redirecting to service.gov.uk URLs", func() { - resp := routerRequest(routerPort, "/pay-tax?_ga=12345") - Expect(resp.Header.Get("Location")).To(Equal("https://tax.service.gov.uk/pay?_ga=12345")) - }) - - It("should preserve only the first _ga parameter", func() { - resp := routerRequest(routerPort, "/pay-tax/?_ga=12345&_ga=6789") - Expect(resp.Header.Get("Location")).To(Equal("https://tax.service.gov.uk/pay?_ga=12345")) - }) - - It("should preserve the _ga param when redirecting to british business bank", func() { - resp := routerRequest(routerPort, "/biz-bank?unwanted=param&_ga=12345") - Expect(resp.Header.Get("Location")).To(Equal("https://british-business-bank.co.uk?_ga=12345")) - }) - - It("should preserve the _ga param and any existing query string that the target URL has", func() { - resp := routerRequest(routerPort, "/query-paramed?unwanted_param=blah&_ga=12345") - // https://param.servicegov.uk?included-param=true?unwanted_param=blah&_ga=12345 - Expect(resp.Header.Get("Location")).To(Equal("https://param.servicegov.uk?_ga=12345&included-param=true")) - }) - }) - - Describe("path case normalisation rule", func() { - var recorder *ghttp.Server - - BeforeEach(func() { - recorder = startRecordingBackend(false, backends["be"]) - addRoute("/guidance/keeping-a-pet-pig-or-micropig", NewBackendRoute("be", "exact")) - addRoute("/GUIDANCE/keeping-a-pet-pig-or-micropig", NewBackendRoute("be", "exact")) - reloadRoutes(apiPort) - }) - - AfterEach(func() { - recorder.Close() - }) - - It("should permanently redirect an ALL CAPS path to lowercase", func() { - resp := routerRequest(routerPort, "/GUIDANCE/KEEPING-A-PET-PIG-OR-MICROPIG") - Expect(resp.StatusCode).To(Equal(301)) - Expect(resp.Header.Get("Location")).To(Equal("/guidance/keeping-a-pet-pig-or-micropig")) - }) - - It("should preserve case in the query string", func() { - resp := routerRequest(routerPort, "/GUIDANCE/KEEPING-A-PET-PIG-OR-MICROPIG?Pig=Kunekune") - Expect(resp.StatusCode).To(Equal(301)) - Expect(resp.Header.Get("Location")).To(Equal("/guidance/keeping-a-pet-pig-or-micropig?Pig=Kunekune")) - }) - - It("should forward an all-lowercase path unchanged", func() { - resp := routerRequest(routerPort, "/guidance/keeping-a-pet-pig-or-micropig") - Expect(resp.StatusCode).To(Equal(200)) - Expect(recorder.ReceivedRequests()).To(HaveLen(1)) - beReq := recorder.ReceivedRequests()[0] - Expect(beReq.URL.RequestURI()).To(Equal("/guidance/keeping-a-pet-pig-or-micropig")) - }) - - It("should forward a mixed-case path unchanged", func() { - resp := routerRequest(routerPort, "/GUIDANCE/keeping-a-pet-pig-or-micropig") - Expect(resp.StatusCode).To(Equal(200)) - Expect(recorder.ReceivedRequests()).To(HaveLen(1)) - beReq := recorder.ReceivedRequests()[0] - Expect(beReq.URL.RequestURI()).To(Equal("/GUIDANCE/keeping-a-pet-pig-or-micropig")) - }) - }) -}) diff --git a/integration_tests/reload_api_test.go b/integration_tests/reload_api_test.go deleted file mode 100644 index 0b52cd17..00000000 --- a/integration_tests/reload_api_test.go +++ /dev/null @@ -1,81 +0,0 @@ -package integration - -import ( - "time" - - . "github.com/onsi/ginkgo/v2" - . "github.com/onsi/gomega" -) - -var _ = Describe("reload API endpoint", func() { - - Describe("request handling", func() { - It("should return 202 for POST /reload", func() { - resp := doRequest(newRequest("POST", routerURL(apiPort, "/reload"))) - Expect(resp.StatusCode).To(Equal(202)) - Expect(readBody(resp)).To(Equal("Reload queued")) - }) - - It("should return 404 for POST /foo", func() { - resp := doRequest(newRequest("POST", routerURL(apiPort, "/foo"))) - Expect(resp.StatusCode).To(Equal(404)) - }) - - It("should return 404 for POST /reload/foo", func() { - resp := doRequest(newRequest("POST", routerURL(apiPort, "/reload/foo"))) - Expect(resp.StatusCode).To(Equal(404)) - }) - - It("should return 405 for GET /reload", func() { - resp := doRequest(newRequest("GET", routerURL(apiPort, "/reload"))) - Expect(resp.StatusCode).To(Equal(405)) - Expect(resp.Header.Get("Allow")).To(Equal("POST")) - }) - - It("eventually reloads the routes", func() { - addRoute("/foo", NewRedirectRoute("/qux", "prefix")) - addRoute("/bar", NewRedirectRoute("/qux", "prefix")) - doRequest(newRequest("POST", routerURL(apiPort, "/reload"))) - - Eventually(func() int { - return routerRequest(routerPort, "/foo").StatusCode - }, time.Second*3).Should(Equal(301)) - - Eventually(func() int { - return routerRequest(routerPort, "/bar").StatusCode - }, time.Second*3).Should(Equal(301)) - }) - }) - - Describe("healthcheck", func() { - It("should return HTTP 200 OK on GET", func() { - resp := doRequest(newRequest("GET", routerURL(apiPort, "/healthcheck"))) - Expect(resp.StatusCode).To(Equal(200)) - Expect(readBody(resp)).To(Equal("OK")) - }) - - It("should return HTTP 405 Method Not Allowed on POST", func() { - resp := doRequest(newRequest("POST", routerURL(apiPort, "/healthcheck"))) - Expect(resp.StatusCode).To(Equal(405)) - Expect(resp.Header.Get("Allow")).To(Equal("GET")) - }) - }) - - Describe("memory stats", func() { - It("should return memory statistics", func() { - addRoute("/foo", NewRedirectRoute("/bar", "prefix")) - addRoute("/baz", NewRedirectRoute("/qux", "prefix")) - addRoute("/foo", NewRedirectRoute("/bar/baz")) - reloadRoutes(apiPort) - - resp := doRequest(newRequest("GET", routerURL(apiPort, "/memory-stats"))) - Expect(resp.StatusCode).To(Equal(200)) - - var data map[string]interface{} - readJSONBody(resp, &data) - - Expect(data).To(HaveKey("Alloc")) - Expect(data).To(HaveKey("HeapInuse")) - }) - }) -}) diff --git a/integration_tests/route_helpers.go b/integration_tests/route_helpers.go deleted file mode 100644 index 4d2795b8..00000000 --- a/integration_tests/route_helpers.go +++ /dev/null @@ -1,103 +0,0 @@ -package integration - -import ( - "fmt" - "os" - "time" - - "github.com/globalsign/mgo" - - // revive:disable:dot-imports - . "github.com/onsi/ginkgo/v2" - . "github.com/onsi/gomega" - // revive:enable:dot-imports -) - -var _ = AfterEach(func() { - clearRoutes() -}) - -var ( - routerDB *mgo.Database -) - -type Route struct { - IncomingPath string `bson:"incoming_path"` - RouteType string `bson:"route_type"` - Handler string `bson:"handler"` - BackendID string `bson:"backend_id"` - RedirectTo string `bson:"redirect_to"` - SegmentsMode string `bson:"segments_mode"` -} - -func NewBackendRoute(backendID string, extraParams ...string) Route { - route := Route{ - Handler: "backend", - BackendID: backendID, - } - - if len(extraParams) > 0 { - route.RouteType = extraParams[0] - } - - return route -} - -func NewRedirectRoute(redirectTo string, extraParams ...string) Route { - route := Route{ - Handler: "redirect", - RedirectTo: redirectTo, - RouteType: "exact", - } - - if len(extraParams) > 0 { - route.RouteType = extraParams[0] - } - if len(extraParams) > 1 { - route.SegmentsMode = extraParams[1] - } - - return route -} - -func NewGoneRoute(extraParams ...string) Route { - route := Route{ - Handler: "gone", - } - - if len(extraParams) > 0 { - route.RouteType = extraParams[0] - } - - return route -} - -func initRouteHelper() error { - databaseURL := os.Getenv("ROUTER_MONGO_URL") - - if databaseURL == "" { - databaseURL = "127.0.0.1" - } - - sess, err := mgo.Dial(databaseURL) - if err != nil { - return fmt.Errorf("failed to connect to mongo: %w", err) - } - sess.SetSyncTimeout(10 * time.Minute) - sess.SetSocketTimeout(10 * time.Minute) - - routerDB = sess.DB("router_test") - return nil -} - -func addRoute(path string, route Route) { - route.IncomingPath = path - - err := routerDB.C("routes").Insert(route) - Expect(err).NotTo(HaveOccurred()) -} - -func clearRoutes() { - _ = routerDB.C("routes").DropCollection() - _ = routerDB.C("backends").DropCollection() -} diff --git a/integration_tests/route_loading_test.go b/integration_tests/route_loading_test.go deleted file mode 100644 index 57a1154d..00000000 --- a/integration_tests/route_loading_test.go +++ /dev/null @@ -1,72 +0,0 @@ -package integration - -import ( - "net/http/httptest" - - . "github.com/onsi/ginkgo/v2" - . "github.com/onsi/gomega" -) - -var _ = Describe("loading routes from the db", func() { - var ( - backend1 *httptest.Server - backend2 *httptest.Server - ) - - BeforeEach(func() { - backend1 = startSimpleBackend("backend 1", backends["backend-1"]) - backend2 = startSimpleBackend("backend 2", backends["backend-2"]) - }) - AfterEach(func() { - backend1.Close() - backend2.Close() - }) - - Context("a route with an unrecognised handler type", func() { - BeforeEach(func() { - addRoute("/foo", NewBackendRoute("backend-1")) - addRoute("/bar", Route{Handler: "fooey"}) - addRoute("/baz", NewBackendRoute("backend-2")) - reloadRoutes(apiPort) - }) - - It("should skip the invalid route", func() { - resp := routerRequest(routerPort, "/bar") - Expect(resp.StatusCode).To(Equal(404)) - }) - - It("should continue to load other routes", func() { - resp := routerRequest(routerPort, "/foo") - Expect(readBody(resp)).To(Equal("backend 1")) - - resp = routerRequest(routerPort, "/baz") - Expect(readBody(resp)).To(Equal("backend 2")) - }) - }) - - Context("a route with a non-existent backend", func() { - BeforeEach(func() { - addRoute("/foo", NewBackendRoute("backend-1")) - addRoute("/bar", NewBackendRoute("backend-non-existent")) - addRoute("/baz", NewBackendRoute("backend-2")) - addRoute("/qux", NewBackendRoute("backend-1")) - reloadRoutes(apiPort) - }) - - It("should skip the invalid route", func() { - resp := routerRequest(routerPort, "/bar") - Expect(resp.StatusCode).To(Equal(404)) - }) - - It("should continue to load other routes", func() { - resp := routerRequest(routerPort, "/foo") - Expect(readBody(resp)).To(Equal("backend 1")) - - resp = routerRequest(routerPort, "/baz") - Expect(readBody(resp)).To(Equal("backend 2")) - - resp = routerRequest(routerPort, "/qux") - Expect(readBody(resp)).To(Equal("backend 1")) - }) - }) -}) diff --git a/integration_tests/route_selection_test.go b/integration_tests/route_selection_test.go deleted file mode 100644 index dacaf226..00000000 --- a/integration_tests/route_selection_test.go +++ /dev/null @@ -1,364 +0,0 @@ -package integration - -import ( - "net/http/httptest" - - . "github.com/onsi/ginkgo/v2" - . "github.com/onsi/gomega" - "github.com/onsi/gomega/ghttp" -) - -var _ = Describe("Route selection", func() { - - Describe("simple exact routes", func() { - var ( - backend1 *httptest.Server - backend2 *httptest.Server - ) - - BeforeEach(func() { - backend1 = startSimpleBackend("backend 1", backends["backend-1"]) - backend2 = startSimpleBackend("backend 2", backends["backend-2"]) - addRoute("/foo", NewBackendRoute("backend-1")) - addRoute("/bar", NewBackendRoute("backend-2")) - addRoute("/baz", NewBackendRoute("backend-1")) - reloadRoutes(apiPort) - }) - AfterEach(func() { - backend1.Close() - backend2.Close() - }) - - It("should route a matching request to the corresponding backend", func() { - resp := routerRequest(routerPort, "/foo") - Expect(readBody(resp)).To(Equal("backend 1")) - - resp = routerRequest(routerPort, "/bar") - Expect(readBody(resp)).To(Equal("backend 2")) - - resp = routerRequest(routerPort, "/baz") - Expect(readBody(resp)).To(Equal("backend 1")) - }) - - It("should 404 for children of the exact route", func() { - resp := routerRequest(routerPort, "/foo/bar") - Expect(resp.StatusCode).To(Equal(404)) - }) - - It("should 404 for non-matching requests", func() { - resp := routerRequest(routerPort, "/wibble") - Expect(resp.StatusCode).To(Equal(404)) - - resp = routerRequest(routerPort, "/") - Expect(resp.StatusCode).To(Equal(404)) - - resp = routerRequest(routerPort, "/foo.json") - Expect(resp.StatusCode).To(Equal(404)) - }) - }) - - Describe("simple prefix routes", func() { - var ( - backend1 *httptest.Server - backend2 *httptest.Server - ) - - BeforeEach(func() { - backend1 = startSimpleBackend("backend 1", backends["backend-1"]) - backend2 = startSimpleBackend("backend 2", backends["backend-2"]) - addRoute("/foo", NewBackendRoute("backend-1", "prefix")) - addRoute("/bar", NewBackendRoute("backend-2", "prefix")) - addRoute("/baz", NewBackendRoute("backend-1", "prefix")) - reloadRoutes(apiPort) - }) - AfterEach(func() { - backend1.Close() - backend2.Close() - }) - - It("should route requests for the prefix to the backend", func() { - resp := routerRequest(routerPort, "/foo") - Expect(readBody(resp)).To(Equal("backend 1")) - - resp = routerRequest(routerPort, "/bar") - Expect(readBody(resp)).To(Equal("backend 2")) - - resp = routerRequest(routerPort, "/baz") - Expect(readBody(resp)).To(Equal("backend 1")) - }) - - It("should route requests for the children of the prefix to the backend", func() { - resp := routerRequest(routerPort, "/foo/bar") - Expect(readBody(resp)).To(Equal("backend 1")) - - resp = routerRequest(routerPort, "/bar/foo.json") - Expect(readBody(resp)).To(Equal("backend 2")) - - resp = routerRequest(routerPort, "/baz/fooey/kablooie") - Expect(readBody(resp)).To(Equal("backend 1")) - }) - - It("should 404 for non-matching requests", func() { - resp := routerRequest(routerPort, "/wibble") - Expect(resp.StatusCode).To(Equal(404)) - - resp = routerRequest(routerPort, "/") - Expect(resp.StatusCode).To(Equal(404)) - - resp = routerRequest(routerPort, "/foo.json") - Expect(resp.StatusCode).To(Equal(404)) - }) - }) - - Describe("prefix route with children", func() { - var ( - outer *httptest.Server - inner *httptest.Server - ) - - BeforeEach(func() { - outer = startSimpleBackend("outer", backends["outer"]) - inner = startSimpleBackend("inner", backends["inner"]) - addRoute("/foo", NewBackendRoute("outer", "prefix")) - reloadRoutes(apiPort) - }) - AfterEach(func() { - outer.Close() - inner.Close() - }) - - Describe("with an exact child", func() { - BeforeEach(func() { - addRoute("/foo/bar", NewBackendRoute("inner")) - reloadRoutes(apiPort) - }) - - It("should route the prefix to the outer backend", func() { - resp := routerRequest(routerPort, "/foo") - Expect(readBody(resp)).To(Equal("outer")) - }) - - It("should route the exact child to the inner backend", func() { - resp := routerRequest(routerPort, "/foo/bar") - Expect(readBody(resp)).To(Equal("inner")) - }) - - It("should route the children of the exact child to the outer backend", func() { - resp := routerRequest(routerPort, "/foo/bar/baz") - Expect(readBody(resp)).To(Equal("outer")) - }) - }) - - Describe("with a prefix child", func() { - BeforeEach(func() { - addRoute("/foo/bar", NewBackendRoute("inner", "prefix")) - reloadRoutes(apiPort) - }) - - It("should route the outer prefix to the outer backend", func() { - resp := routerRequest(routerPort, "/foo") - Expect(readBody(resp)).To(Equal("outer")) - }) - - It("should route the inner prefix to the inner backend", func() { - resp := routerRequest(routerPort, "/foo/bar") - Expect(readBody(resp)).To(Equal("inner")) - }) - - It("should route the children of the inner prefix to the inner backend", func() { - resp := routerRequest(routerPort, "/foo/bar/baz") - Expect(readBody(resp)).To(Equal("inner")) - }) - - It("should route other children of the outer prefix to the outer backend", func() { - resp := routerRequest(routerPort, "/foo/baz") - Expect(readBody(resp)).To(Equal("outer")) - - resp = routerRequest(routerPort, "/foo/bar.json") - Expect(readBody(resp)).To(Equal("outer")) - }) - }) - - Describe("with an exact child and a deeper prefix child", func() { - var ( - innerer *httptest.Server - ) - BeforeEach(func() { - innerer = startSimpleBackend("innerer", backends["innerer"]) - addRoute("/foo/bar", NewBackendRoute("inner")) - addRoute("/foo/bar/baz", NewBackendRoute("innerer", "prefix")) - reloadRoutes(apiPort) - }) - AfterEach(func() { - innerer.Close() - }) - - It("should route the outer prefix to the outer backend", func() { - resp := routerRequest(routerPort, "/foo") - Expect(readBody(resp)).To(Equal("outer")) - - resp = routerRequest(routerPort, "/foo/baz") - Expect(readBody(resp)).To(Equal("outer")) - - resp = routerRequest(routerPort, "/foo/bar.json") - Expect(readBody(resp)).To(Equal("outer")) - }) - - It("should route the exact route to the inner backend", func() { - resp := routerRequest(routerPort, "/foo/bar") - Expect(readBody(resp)).To(Equal("inner")) - }) - - It("should route other children of the exact route to the outer backend", func() { - resp := routerRequest(routerPort, "/foo/bar/wibble") - Expect(readBody(resp)).To(Equal("outer")) - - resp = routerRequest(routerPort, "/foo/bar/baz.json") - Expect(readBody(resp)).To(Equal("outer")) - }) - - It("should route the inner prefix route to the innerer backend", func() { - resp := routerRequest(routerPort, "/foo/bar/baz") - Expect(readBody(resp)).To(Equal("innerer")) - }) - - It("should route children of the inner prefix route to the innerer backend", func() { - resp := routerRequest(routerPort, "/foo/bar/baz/wibble") - Expect(readBody(resp)).To(Equal("innerer")) - }) - }) - }) - - Describe("prefix and exact route at the same level", func() { - var ( - backend1 *httptest.Server - backend2 *httptest.Server - ) - - BeforeEach(func() { - backend1 = startSimpleBackend("backend 1", backends["backend-1"]) - backend2 = startSimpleBackend("backend 2", backends["backend-2"]) - addRoute("/foo", NewBackendRoute("backend-1", "prefix")) - addRoute("/foo", NewBackendRoute("backend-2")) - reloadRoutes(apiPort) - }) - AfterEach(func() { - backend1.Close() - backend2.Close() - }) - - It("should route the exact route to the exact backend", func() { - resp := routerRequest(routerPort, "/foo") - Expect(readBody(resp)).To(Equal("backend 2")) - }) - - It("should route children of the route to the prefix backend", func() { - resp := routerRequest(routerPort, "/foo/bar") - Expect(readBody(resp)).To(Equal("backend 1")) - }) - }) - - Describe("routes at the root level", func() { - var ( - root *httptest.Server - other *httptest.Server - ) - - BeforeEach(func() { - root = startSimpleBackend("root backend", backends["root"]) - other = startSimpleBackend("other backend", backends["other"]) - addRoute("/foo", NewBackendRoute("other")) - }) - AfterEach(func() { - root.Close() - other.Close() - }) - - It("should handle an exact route at the root level", func() { - addRoute("/", NewBackendRoute("root")) - reloadRoutes(apiPort) - - resp := routerRequest(routerPort, "/") - Expect(readBody(resp)).To(Equal("root backend")) - - resp = routerRequest(routerPort, "/foo") - Expect(readBody(resp)).To(Equal("other backend")) - - resp = routerRequest(routerPort, "/bar") - Expect(resp.StatusCode).To(Equal(404)) - }) - - It("should handle a prefix route at the root level", func() { - addRoute("/", NewBackendRoute("root", "prefix")) - reloadRoutes(apiPort) - - resp := routerRequest(routerPort, "/") - Expect(readBody(resp)).To(Equal("root backend")) - - resp = routerRequest(routerPort, "/foo") - Expect(readBody(resp)).To(Equal("other backend")) - - resp = routerRequest(routerPort, "/bar") - Expect(readBody(resp)).To(Equal("root backend")) - }) - }) - - Describe("double slashes", func() { - var ( - root *httptest.Server - recorder *ghttp.Server - ) - - BeforeEach(func() { - root = startSimpleBackend("fallthrough", backends["fallthrough"]) - recorder = startRecordingBackend(false, backends["other"]) - addRoute("/", NewBackendRoute("fallthrough", "prefix")) - addRoute("/foo/bar", NewBackendRoute("other", "prefix")) - reloadRoutes(apiPort) - }) - AfterEach(func() { - root.Close() - recorder.Close() - }) - - It("should not be redirected by our simple test backend", func() { - resp := routerRequest(routerPort, "//") - Expect(readBody(resp)).To(Equal("fallthrough")) - }) - - It("should not be redirected by our recorder backend", func() { - resp := routerRequest(routerPort, "/foo/bar/baz//qux") - Expect(resp.StatusCode).To(Equal(200)) - Expect(recorder.ReceivedRequests()).To(HaveLen(1)) - Expect(recorder.ReceivedRequests()[0].URL.Path).To(Equal("/foo/bar/baz//qux")) - }) - - It("should collapse double slashes when looking up route, but pass request as-is", func() { - resp := routerRequest(routerPort, "/foo//bar") - Expect(resp.StatusCode).To(Equal(200)) - Expect(recorder.ReceivedRequests()).To(HaveLen(1)) - Expect(recorder.ReceivedRequests()[0].URL.Path).To(Equal("/foo//bar")) - }) - }) - - Describe("special characters in paths", func() { - var recorder *ghttp.Server - - BeforeEach(func() { - recorder = startRecordingBackend(false, backends["backend"]) - }) - AfterEach(func() { - recorder.Close() - }) - - It("should handle spaces (%20) in paths", func() { - addRoute("/foo%20bar", NewBackendRoute("backend")) - reloadRoutes(apiPort) - - resp := routerRequest(routerPort, "/foo bar") - Expect(resp.StatusCode).To(Equal(200)) - Expect(recorder.ReceivedRequests()).To(HaveLen(1)) - Expect(recorder.ReceivedRequests()[0].RequestURI).To(Equal("/foo%20bar")) - }) - }) -}) diff --git a/integration_tests/router_logging.go b/integration_tests/router_logging.go deleted file mode 100644 index 461ff3ab..00000000 --- a/integration_tests/router_logging.go +++ /dev/null @@ -1,69 +0,0 @@ -package integration - -import ( - "bufio" - "encoding/json" - "os" - "time" - - // revive:disable:dot-imports - . "github.com/onsi/gomega" - // revive:enable:dot-imports -) - -var ( - tempLogfile *os.File -) - -func setupTempLogfile() error { - file, err := os.CreateTemp("", "router_error_log") - if err != nil { - return err - } - tempLogfile = file - return nil -} - -func resetTempLogfile() { - _, err := tempLogfile.Seek(0, 0) - Expect(err).NotTo(HaveOccurred()) - err = tempLogfile.Truncate(0) - Expect(err).NotTo(HaveOccurred()) -} - -func cleanupTempLogfile() { - if tempLogfile != nil { - tempLogfile.Close() - os.Remove(tempLogfile.Name()) - } -} - -type routerLogEntry struct { - Timestamp time.Time `json:"@timestamp"` - Fields map[string]interface{} `json:"@fields"` -} - -func lastRouterErrorLogLine() []byte { - var line []byte - - Eventually(func() ([]byte, error) { - scanner := bufio.NewScanner(tempLogfile) - for scanner.Scan() { - line = scanner.Bytes() - } - if err := scanner.Err(); err != nil { - return nil, err - } - return line, nil - }).ShouldNot(BeNil(), "No log line found after 1 second") - - return line -} - -func lastRouterErrorLogEntry() *routerLogEntry { - line := lastRouterErrorLogLine() - var entry *routerLogEntry - err := json.Unmarshal(line, &entry) - Expect(err).NotTo(HaveOccurred()) - return entry -} diff --git a/integration_tests/router_support.go b/integration_tests/router_support.go deleted file mode 100644 index d03d526f..00000000 --- a/integration_tests/router_support.go +++ /dev/null @@ -1,102 +0,0 @@ -package integration - -import ( - "context" - "fmt" - "net" - "net/http" - "os" - "os/exec" - "strconv" - "syscall" - "time" - - // revive:disable:dot-imports - . "github.com/onsi/gomega" - // revive:enable:dot-imports -) - -const ( - routerPort = 3169 - apiPort = 3168 -) - -func routerURL(port int, path string) string { - return fmt.Sprintf("http://127.0.0.1:%d%s", port, path) -} - -func reloadRoutes(port int) { - req, err := http.NewRequestWithContext( - context.Background(), - http.MethodPost, - fmt.Sprintf("http://127.0.0.1:%d/reload", port), - http.NoBody, - ) - Expect(err).NotTo(HaveOccurred()) - - resp, err := http.DefaultClient.Do(req) - Expect(err).NotTo(HaveOccurred()) - Expect(resp.StatusCode).To(Equal(202)) - resp.Body.Close() - // Now that reloading is done asynchronously, we need a small sleep to ensure - // it has actually been performed. - time.Sleep(time.Millisecond * 50) -} - -var runningRouters = make(map[int]*exec.Cmd) - -func startRouter(port, apiPort int, extraEnv []string) error { - host := "localhost" - pubAddr := net.JoinHostPort(host, strconv.Itoa(port)) - apiAddr := net.JoinHostPort(host, strconv.Itoa(apiPort)) - - bin := os.Getenv("BINARY") - if bin == "" { - bin = "../router" - } - cmd := exec.Command(bin) - - cmd.Env = append(cmd.Environ(), "ROUTER_MONGO_DB=router_test") - cmd.Env = append(cmd.Env, fmt.Sprintf("ROUTER_PUBADDR=%s", pubAddr)) - cmd.Env = append(cmd.Env, fmt.Sprintf("ROUTER_APIADDR=%s", apiAddr)) - cmd.Env = append(cmd.Env, fmt.Sprintf("ROUTER_ERROR_LOG=%s", tempLogfile.Name())) - cmd.Env = append(cmd.Env, extraEnv...) - - if os.Getenv("ROUTER_DEBUG_TESTS") != "" { - cmd.Stdout = os.Stdout - cmd.Stderr = os.Stderr - } - - err := cmd.Start() - if err != nil { - return err - } - - waitForServerUp(pubAddr) - - runningRouters[port] = cmd - return nil -} - -func stopRouter(port int) { - cmd := runningRouters[port] - if cmd != nil && cmd.Process != nil { - err := cmd.Process.Signal(syscall.SIGINT) - Expect(err).NotTo(HaveOccurred()) - _, err = cmd.Process.Wait() - Expect(err).NotTo(HaveOccurred()) - } - delete(runningRouters, port) -} - -func waitForServerUp(addr string) { - for i := 0; i < 20; i++ { - conn, err := net.Dial("tcp", addr) - if err == nil { - conn.Close() - return - } - time.Sleep(100 * time.Millisecond) - } - panic("Server not accepting connections after 20 attempts") -} diff --git a/lib/load_routes_test.go b/lib/load_routes_test.go index 05130849..ef1f6431 100644 --- a/lib/load_routes_test.go +++ b/lib/load_routes_test.go @@ -219,8 +219,7 @@ var _ = Describe("Router", func() { Expect(err).NotTo(HaveOccurred()) router = &Router{ - ReloadChan: make(chan bool, 1), - lock: sync.RWMutex{}, + lock: sync.RWMutex{}, backends: map[string]http.Handler{ "backend1": http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { http.Redirect(w, r, "http://example.com", http.StatusFound) diff --git a/lib/logcompat.go b/lib/logcompat.go index e9a9c2f9..3d85228d 100644 --- a/lib/logcompat.go +++ b/lib/logcompat.go @@ -13,9 +13,3 @@ func logWarn(msg ...interface{}) { func logInfo(msg ...interface{}) { log.Println(msg...) } - -func logDebug(msg ...interface{}) { - if EnableDebugOutput { - log.Println(msg...) - } -} diff --git a/lib/router.go b/lib/router.go index 56aa2ab9..328958da 100644 --- a/lib/router.go +++ b/lib/router.go @@ -3,22 +3,16 @@ package router import ( "context" "fmt" - "math/rand/v2" "net/http" - "net/url" "os" - "strconv" "sync" "time" "github.com/jackc/pgx/v5/pgxpool" "github.com/prometheus/client_golang/prometheus" - "github.com/alphagov/router/handlers" "github.com/alphagov/router/logger" "github.com/alphagov/router/triemux" - "github.com/globalsign/mgo" - "github.com/globalsign/mgo/bson" ) const ( @@ -32,65 +26,25 @@ const ( ) // Router is a wrapper around an HTTP multiplexer (trie.Mux) which retrieves its -// routes from a passed mongo database. -// -// TODO: decouple Router from its database backend. Router should not know -// anything about the database backend. Its representation of the route table -// should be independent of the underlying DBMS. Route should define an -// abstract interface for some other module to be able to bulk-load and -// incrementally update routes. Since Router should not care where its routes -// come from, Route and Backend should not contain bson fields. -// MongoReplicaSet, MongoReplicaSetMember etc. should move out of this module. +// routes from a postgres database. type Router struct { backends map[string]http.Handler - mux *triemux.Mux csMux *triemux.Mux lock sync.RWMutex - mongoReadToOptime bson.MongoTimestamp logger logger.Logger opts Options - ReloadChan chan bool CsReloadChan chan bool - csMuxSampleRate float64 - csLastAttemptReloadTime time.Time pool *pgxpool.Pool + csLastAttemptReloadTime time.Time } type Options struct { - MongoURL string - MongoDBName string - MongoPollInterval time.Duration BackendConnTimeout time.Duration BackendHeaderTimeout time.Duration LogFileName string RouteReloadInterval time.Duration } -type Backend struct { - BackendID string `bson:"backend_id"` - BackendURL string `bson:"backend_url"` - SubdomainName string `bson:"subdomain_name"` -} - -type MongoReplicaSet struct { - Members []MongoReplicaSetMember `bson:"members"` -} - -type MongoReplicaSetMember struct { - Name string `bson:"name"` - Optime bson.MongoTimestamp `bson:"optime"` - Current bool `bson:"self"` -} - -type Route struct { - IncomingPath string `bson:"incoming_path"` - RouteType string `bson:"route_type"` - Handler string `bson:"handler"` - BackendID string `bson:"backend_id"` - RedirectTo string `bson:"redirect_to"` - SegmentsMode string `bson:"segments_mode"` -} - // RegisterMetrics registers Prometheus metrics from the router module and the // modules that it directly depends on. To use the default (global) registry, // pass prometheus.DefaultRegisterer. @@ -98,74 +52,42 @@ func RegisterMetrics(r prometheus.Registerer) { registerMetrics(r) } -// NewRouter returns a new empty router instance. You will need to call -// SelfUpdateRoutes() to initialise the self-update process for routes. func NewRouter(o Options) (rt *Router, err error) { - logInfo("router: using mongo poll interval:", o.MongoPollInterval) - logInfo("router: using backend connect timeout:", o.BackendConnTimeout) - logInfo("router: using backend header timeout:", o.BackendHeaderTimeout) - l, err := logger.New(o.LogFileName) if err != nil { return nil, err } logInfo("router: logging errors as JSON to", o.LogFileName) - mongoReadToOptime, err := bson.NewMongoTimestamp(time.Date(1970, time.January, 1, 0, 0, 0, 0, time.UTC), 1) - if err != nil { - return nil, err - } - backends := loadBackendsFromEnv(o.BackendConnTimeout, o.BackendHeaderTimeout, l) - csMuxSampleRate, err := strconv.ParseFloat(os.Getenv("CSMUX_SAMPLE_RATE"), 64) - - if err != nil { - csMuxSampleRate = 0.0 - } - - logInfo("router: content store mux sample rate set at", csMuxSampleRate) - var pool *pgxpool.Pool - if csMuxSampleRate != 0.0 { - pool, err = pgxpool.New(context.Background(), os.Getenv("CONTENT_STORE_DATABASE_URL")) - if err != nil { - return nil, err - } - logInfo("router: postgres connection pool created") - } else { - logInfo("router: not using content store postgres") + pool, err = pgxpool.New(context.Background(), os.Getenv("CONTENT_STORE_DATABASE_URL")) + if err != nil { + return nil, err } + logInfo("router: postgres connection pool created") - reloadChan := make(chan bool, 1) csReloadChan := make(chan bool, 1) rt = &Router{ - backends: backends, - mux: triemux.NewMux(), - csMux: triemux.NewMux(), - mongoReadToOptime: mongoReadToOptime, - logger: l, - opts: o, - ReloadChan: reloadChan, - CsReloadChan: csReloadChan, - pool: pool, - csMuxSampleRate: csMuxSampleRate, + backends: backends, + csMux: triemux.NewMux(), + logger: l, + opts: o, + CsReloadChan: csReloadChan, + pool: pool, } - if csMuxSampleRate != 0.0 { - rt.reloadCsRoutes(pool) - - go func() { - if err := rt.listenForContentStoreUpdates(context.Background()); err != nil { - logWarn(fmt.Sprintf("router: error in listenForContentStoreUpdates: %v", err)) - } - }() + rt.reloadCsRoutes(pool) - go rt.waitForReload() - } + go func() { + if err := rt.listenForContentStoreUpdates(context.Background()); err != nil { + logWarn(fmt.Sprintf("router: error in listenForContentStoreUpdates: %v", err)) + } + }() - go rt.pollAndReload() + go rt.waitForReload() return rt, nil } @@ -192,216 +114,15 @@ func (rt *Router) ServeHTTP(w http.ResponseWriter, req *http.Request) { } }() - useContentStoreMux := rt.csMuxSampleRate > 0 && rand.Float64() < rt.csMuxSampleRate //nolint:gosec var mux *triemux.Mux rt.lock.RLock() - if useContentStoreMux { - mux = rt.csMux - } else { - mux = rt.mux - } + mux = rt.csMux rt.lock.RUnlock() mux.ServeHTTP(w, req) } -func (rt *Router) SelfUpdateRoutes() { - logInfo(fmt.Sprintf("router: starting self-update process, polling for route changes every %v", rt.opts.MongoPollInterval)) - - tick := time.Tick(rt.opts.MongoPollInterval) - for range tick { - logDebug("router: polling MongoDB for changes") - - rt.ReloadChan <- true - } -} - -// pollAndReload blocks until it receives a message on reloadChan, -// and will immediately reload again if another message was received -// during reload. -func (rt *Router) pollAndReload() { - for range rt.ReloadChan { - func() { - defer func() { - if r := recover(); r != nil { - logWarn(r) - } - }() - - logDebug("mgo: connecting to", rt.opts.MongoURL) - - sess, err := mgo.Dial(rt.opts.MongoURL) - if err != nil { - logWarn(fmt.Sprintf("mgo: error connecting to MongoDB, skipping update (error: %v)", err)) - return - } - - defer sess.Close() - sess.SetMode(mgo.SecondaryPreferred, true) - - currentMongoInstance, err := rt.getCurrentMongoInstance(sess.DB("admin")) - if err != nil { - logWarn(err) - return - } - - logDebug("mgo: communicating with replica set member", currentMongoInstance.Name) - - logDebug("router: polled mongo instance is ", currentMongoInstance.Name) - logDebug("router: polled mongo optime is ", currentMongoInstance.Optime) - logDebug("router: current read-to mongo optime is ", rt.mongoReadToOptime) - - if rt.shouldReload(currentMongoInstance) { - logDebug("router: updates found") - rt.reloadRoutes(sess.DB(rt.opts.MongoDBName), currentMongoInstance.Optime) - } else { - logDebug("router: no updates found") - } - }() - } -} - -type mongoDatabase interface { - Run(command interface{}, result interface{}) error -} - -// reloadRoutes reloads the routes for this Router instance on the fly. It will -// create a new proxy mux, load applications (backends) and routes into it, and -// then flip the "mux" pointer in the Router. -func (rt *Router) reloadRoutes(db *mgo.Database, currentOptime bson.MongoTimestamp) { - var success bool - timer := prometheus.NewTimer(prometheus.ObserverFunc(func(v float64) { - labels := prometheus.Labels{"success": strconv.FormatBool(success), "source": "mongo"} - routeReloadDurationMetric.With(labels).Observe(v) - })) - defer func() { - success = true - if r := recover(); r != nil { - success = false - logWarn("router: recovered from panic in reloadRoutes:", r) - logInfo("router: original routes have not been modified") - errorMessage := fmt.Sprintf("panic: %v", r) - err := logger.RecoveredError{ErrorMessage: errorMessage} - logger.NotifySentry(logger.ReportableError{Error: err}) - } else { - rt.mongoReadToOptime = currentOptime - } - timer.ObserveDuration() - }() - - logInfo("router: reloading routes") - newmux := triemux.NewMux() - - loadRoutes(db.C("routes"), newmux, rt.backends) - routeCount := newmux.RouteCount() - - rt.lock.Lock() - rt.mux = newmux - rt.lock.Unlock() - - logInfo(fmt.Sprintf("router: reloaded %d routes", routeCount)) - routesCountMetric.WithLabelValues("mongo").Set(float64(routeCount)) -} - -func (rt *Router) getCurrentMongoInstance(db mongoDatabase) (MongoReplicaSetMember, error) { - replicaSetStatus := bson.M{} - - if err := db.Run("replSetGetStatus", &replicaSetStatus); err != nil { - return MongoReplicaSetMember{}, fmt.Errorf("router: couldn't get replica set status from MongoDB, skipping update (error: %w)", err) - } - - replicaSetStatusBytes, err := bson.Marshal(replicaSetStatus) - if err != nil { - return MongoReplicaSetMember{}, fmt.Errorf("router: couldn't marshal replica set status from MongoDB, skipping update (error: %w)", err) - } - - replicaSet := MongoReplicaSet{} - err = bson.Unmarshal(replicaSetStatusBytes, &replicaSet) - if err != nil { - return MongoReplicaSetMember{}, fmt.Errorf("router: couldn't unmarshal replica set status from MongoDB, skipping update (error: %w)", err) - } - - currentInstance := make([]MongoReplicaSetMember, 0) - for _, instance := range replicaSet.Members { - if instance.Current { - currentInstance = append(currentInstance, instance) - } - } - - logDebug("router: MongoDB instances", currentInstance) - - if len(currentInstance) != 1 { - return MongoReplicaSetMember{}, fmt.Errorf("router: did not find exactly one current MongoDB instance, skipping update (current instances found: %d)", len(currentInstance)) - } - - // #nosec G602 -- not actually an out-of-bounds access. - return currentInstance[0], nil -} - -func (rt *Router) shouldReload(currentMongoInstance MongoReplicaSetMember) bool { - return currentMongoInstance.Optime > rt.mongoReadToOptime -} - -// loadRoutes is a helper function which loads routes from the passed mongo -// collection and registers them with the passed proxy mux. -func loadRoutes(c *mgo.Collection, mux *triemux.Mux, backends map[string]http.Handler) { - route := &Route{} - - iter := c.Find(nil).Iter() - - goneHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - http.Error(w, "410 Gone", http.StatusGone) - }) - - for iter.Next(&route) { - prefix := (route.RouteType == RouteTypePrefix) - - // the database contains paths with % encoded routes. - // Unescape them here because the http.Request objects we match against contain the unescaped variants. - incomingURL, err := url.Parse(route.IncomingPath) - if err != nil { - logWarn(fmt.Sprintf("router: found route %+v with invalid incoming path '%s', skipping!", route, route.IncomingPath)) - continue - } - - switch route.Handler { - case HandlerTypeBackend: - handler, ok := backends[route.BackendID] - if !ok { - logWarn(fmt.Sprintf("router: found route %+v which references unknown backend "+ - "%s, skipping!", route, route.BackendID)) - continue - } - mux.Handle(incomingURL.Path, prefix, handler) - logDebug(fmt.Sprintf("router: registered %s (prefix: %v) for %s", - incomingURL.Path, prefix, route.BackendID)) - case HandlerTypeRedirect: - handler := handlers.NewRedirectHandler(incomingURL.Path, route.RedirectTo, shouldPreserveSegments(route.RouteType, route.SegmentsMode)) - mux.Handle(incomingURL.Path, prefix, handler) - logDebug(fmt.Sprintf("router: registered %s (prefix: %v) -> %s", - incomingURL.Path, prefix, route.RedirectTo)) - case HandlerTypeGone: - mux.Handle(incomingURL.Path, prefix, goneHandler) - logDebug(fmt.Sprintf("router: registered %s (prefix: %v) -> Gone", incomingURL.Path, prefix)) - case "boom": - // Special handler so that we can test failure behaviour. - mux.Handle(incomingURL.Path, prefix, http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - panic("Boom!!!") - })) - logDebug(fmt.Sprintf("router: registered %s (prefix: %v) -> Boom!!!", incomingURL.Path, prefix)) - default: - logWarn(fmt.Sprintf("router: found route %+v with unknown handler type "+ - "%s, skipping!", route, route.Handler)) - continue - } - } - - if err := iter.Err(); err != nil { - panic(err) - } -} - func shouldPreserveSegments(routeType, segmentsMode string) bool { switch routeType { case RouteTypeExact: diff --git a/lib/router_api.go b/lib/router_api.go index f60b6154..3c09ef46 100644 --- a/lib/router_api.go +++ b/lib/router_api.go @@ -21,11 +21,6 @@ func NewAPIHandler(rout *Router) (api http.Handler, err error) { // oplog optime and start a reload if necessary. // If the channel is already full, no message will be sent and the request // won't be blocked. - select { - case rout.ReloadChan <- true: - default: - } - select { case rout.CsReloadChan <- true: default: diff --git a/lib/router_test.go b/lib/router_test.go index 5a11ce88..d31c8528 100644 --- a/lib/router_test.go +++ b/lib/router_test.go @@ -1,171 +1,13 @@ package router import ( - "errors" "testing" - "time" - - "github.com/globalsign/mgo/bson" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" ) -type mockMongoDB struct { - result bson.M - err error -} - -func (m *mockMongoDB) Run(_ interface{}, res interface{}) error { - if m.err != nil { - return m.err - } - - bytes, err := bson.Marshal(m.result) - if err != nil { - return err - } - - err = bson.Unmarshal(bytes, res) - if err != nil { - return err - } - - return nil -} - func TestRouter(t *testing.T) { RegisterFailHandler(Fail) RunSpecs(t, "Router Suite") } - -var _ = Describe("Router", func() { - Context("When calling shouldReload", func() { - Context("with an up-to-date mongo instance", func() { - It("should return false", func() { - rt := Router{} - initialOptime, _ := bson.NewMongoTimestamp(time.Date(2021, time.March, 12, 8, 0, 0, 0, time.UTC), 1) - rt.mongoReadToOptime = initialOptime - - currentOptime, _ := bson.NewMongoTimestamp(time.Date(2021, time.March, 12, 8, 0, 0, 0, time.UTC), 1) - mongoInstance := MongoReplicaSetMember{} - mongoInstance.Optime = currentOptime - - Expect(rt.shouldReload(mongoInstance)).To( - BeFalse(), - "Router should determine no reload is necessary when Mongo optime hasn't changed", - ) - }) - }) - - Context("with a stale mongo instance", func() { - It("should return false when timestamp differs", func() { - rt := Router{} - initialOptime, _ := bson.NewMongoTimestamp(time.Date(2021, time.March, 12, 8, 0, 0, 0, time.UTC), 1) - rt.mongoReadToOptime = initialOptime - - currentOptime, _ := bson.NewMongoTimestamp(time.Date(2021, time.March, 12, 8, 2, 30, 0, time.UTC), 1) - mongoInstance := MongoReplicaSetMember{} - mongoInstance.Optime = currentOptime - - Expect(rt.shouldReload(mongoInstance)).To( - BeTrue(), - "Router should determine reload is necessary when Mongo optime has changed by timestamp", - ) - }) - - It("should return false when operand differs", func() { - rt := Router{} - initialOptime, _ := bson.NewMongoTimestamp(time.Date(2021, time.March, 12, 8, 0, 0, 0, time.UTC), 1) - rt.mongoReadToOptime = initialOptime - - currentOptime, _ := bson.NewMongoTimestamp(time.Date(2021, time.March, 12, 8, 0, 0, 0, time.UTC), 2) - mongoInstance := MongoReplicaSetMember{} - mongoInstance.Optime = currentOptime - - Expect(rt.shouldReload(mongoInstance)).To( - BeTrue(), - "Router should determine reload is necessary when Mongo optime has changed by operand", - ) - }) - }) - }) - - Context("When calling getCurrentMongoInstance", func() { - It("should return error when unable to get the replica set", func() { - mockMongoObj := &mockMongoDB{ - err: errors.New("Error connecting to replica set"), - } - - rt := Router{} - _, err := rt.getCurrentMongoInstance(mockMongoObj) - - Expect(err).To( - HaveOccurred(), - "Router should raise an error when it can't get replica set status from Mongo") - }) - - It("should return fail to find an instance when the replica set status schema doesn't match the expected schema", func() { - replicaSetStatusBson := bson.M{"members": []bson.M{{"unknownProperty": "unknown"}}} - mockMongoObj := &mockMongoDB{ - result: replicaSetStatusBson, - } - - rt := Router{} - _, err := rt.getCurrentMongoInstance(mockMongoObj) - - Expect(err).To( - HaveOccurred(), - "Router should raise an error when the current Mongo instance can't be found in the replica set status response") - }) - - It("should return fail to find an instance when the replica set status contains no instances marked with self:true", func() { - replicaSetStatusBson := bson.M{"members": []bson.M{{"name": "mongo1", "self": false}}} - mockMongoObj := &mockMongoDB{ - result: replicaSetStatusBson, - } - - rt := Router{} - _, err := rt.getCurrentMongoInstance(mockMongoObj) - - Expect(err).To( - HaveOccurred(), - "Router should raise an error when the current Mongo instance can't be found in the replica set status response") - }) - - It("should return fail to find an instance when the replica set status contains multiple instances marked with self:true", func() { - replicaSetStatusBson := bson.M{"members": []bson.M{{"name": "mongo1", "self": true}, {"name": "mongo2", "self": true}}} - mockMongoObj := &mockMongoDB{ - result: replicaSetStatusBson, - } - - rt := Router{} - _, err := rt.getCurrentMongoInstance(mockMongoObj) - - Expect(err).To( - HaveOccurred(), - "Router should raise an error when the replica set status response contains multiple current Mongo instances") - }) - - It("should successfully return the current Mongo instance from the replica set", func() { - replicaSetStatusBson := bson.M{"members": []bson.M{{"name": "mongo1", "self": false}, {"name": "mongo2", "optime": 6945383634312364034, "self": true}}} - mockMongoObj := &mockMongoDB{ - result: replicaSetStatusBson, - } - - expectedMongoInstance := MongoReplicaSetMember{ - Name: "mongo2", - Optime: 6945383634312364034, - Current: true, - } - - rt := Router{} - currentMongoInstance, _ := rt.getCurrentMongoInstance(mockMongoObj) - - Expect(currentMongoInstance).To( - Equal(expectedMongoInstance), - "Router should get the current Mongo instance from the replica set status response", - ) - }) - }) -}) diff --git a/main.go b/main.go index a6164bf4..ecc1ee65 100644 --- a/main.go +++ b/main.go @@ -25,9 +25,6 @@ The following environment variables and defaults are available: ROUTER_PUBADDR=:8080 Address on which to serve public requests ROUTER_APIADDR=:8081 Address on which to receive reload requests -ROUTER_MONGO_URL=127.0.0.1 Address of mongo cluster (e.g. 'mongo1,mongo2,mongo3') -ROUTER_MONGO_DB=router Name of mongo database to use -ROUTER_MONGO_POLL_INTERVAL=2s Interval to poll mongo for route changes ROUTER_ERROR_LOG=STDERR File to log errors to (in JSON format) ROUTER_DEBUG= Enable debug output if non-empty @@ -90,9 +87,6 @@ func main() { var ( pubAddr = getenv("ROUTER_PUBADDR", ":8080") apiAddr = getenv("ROUTER_APIADDR", ":8081") - mongoURL = getenv("ROUTER_MONGO_URL", "127.0.0.1") - mongoDBName = getenv("ROUTER_MONGO_DB", "router") - mongoPollInterval = getenvDuration("ROUTER_MONGO_POLL_INTERVAL", "2s") errorLogFile = getenv("ROUTER_ERROR_LOG", "STDERR") tlsSkipVerify = os.Getenv("ROUTER_TLS_SKIP_VERIFY") != "" beConnTimeout = getenvDuration("ROUTER_BACKEND_CONNECT_TIMEOUT", "1s") @@ -115,9 +109,6 @@ func main() { router.RegisterMetrics(prometheus.DefaultRegisterer) rout, err := router.NewRouter(router.Options{ - MongoURL: mongoURL, - MongoDBName: mongoDBName, - MongoPollInterval: mongoPollInterval, BackendConnTimeout: beConnTimeout, BackendHeaderTimeout: beHeaderTimeout, LogFileName: errorLogFile, @@ -127,7 +118,6 @@ func main() { log.Fatal(err) } go rout.PeriodicCSRouteUpdates() - go rout.SelfUpdateRoutes() go listenAndServeOrFatal(pubAddr, rout, feReadTimeout, feWriteTimeout) log.Printf("router: listening for requests on %v", pubAddr) diff --git a/mongo.sh b/mongo.sh deleted file mode 100755 index 6e5fa4bd..00000000 --- a/mongo.sh +++ /dev/null @@ -1,74 +0,0 @@ -#!/bin/dash -set -eu - -usage() { - echo "$0 restart|start|stop" - exit 64 -} - -failure_hints() { - echo ' - Failed to start mongo. If using Docker Desktop: - - Go into Settings -> Features in development - - untick "Use containerd" - - tick "Use Rosetta"' - exit 1 -} - -docker_run() { - docker run --name router-mongo -dp 27017:27017 mongo:2.6 --replSet rs0 --quiet -} - -init_replicaset() { - docker exec router-mongo mongo --quiet --eval 'rs.initiate();' >/dev/null 2>&1 -} - -healthy() { - docker exec router-mongo mongo --quiet --eval \ - 'if (rs.status().members[0].health==1) print("healthy");' \ - 2>&1 | grep healthy >/dev/null -} - -# usage: retry_or_fatal description command-to-try -retry_or_fatal() { - n=60 - echo -n "Waiting up to $n s for $1"; shift - while [ "$n" -ge 0 ]; do - if "$@"; then - echo " done" - return - fi - sleep 1 && echo -n . - n=$((n-1)) - done - echo "gave up" - exit 1 -} - -stop() { - if ! docker stop router-mongo >/dev/null 2>&1; then - echo "router-mongo not running" - return - fi - echo -n Waiting for router-mongo container to exit. - docker wait router-mongo >/dev/null || true - docker rm -f router-mongo >/dev/null 2>&1 || true - echo " done" -} - -start() { - if healthy; then - echo router-mongo already running. - return - fi - stop - docker_run || failure_hints - retry_or_fatal "for successful rs.initiate()" init_replicaset - retry_or_fatal "for healthy rs.status()" healthy -} - -case $1 in - start) $1;; - stop) $1;; - *) usage -esac diff --git a/vendor/github.com/globalsign/mgo/.gitignore b/vendor/github.com/globalsign/mgo/.gitignore deleted file mode 100644 index 9a3120f6..00000000 --- a/vendor/github.com/globalsign/mgo/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -_harness -.vscode \ No newline at end of file diff --git a/vendor/github.com/globalsign/mgo/.travis.yml b/vendor/github.com/globalsign/mgo/.travis.yml deleted file mode 100644 index f1f89e96..00000000 --- a/vendor/github.com/globalsign/mgo/.travis.yml +++ /dev/null @@ -1,49 +0,0 @@ -language: go - -go_import_path: github.com/globalsign/mgo - -go: - - 1.9.x - - 1.10.x - -env: - global: - - BUCKET=https://s3.eu-west-2.amazonaws.com/globalsign-mgo - - FASTDL=https://fastdl.mongodb.org/linux - matrix: - - MONGODB=x86_64-ubuntu1404-3.0.15 - - MONGODB=x86_64-ubuntu1404-3.2.17 - - MONGODB=x86_64-ubuntu1404-3.4.10 - - MONGODB=x86_64-ubuntu1404-3.6.0 - -install: - - - wget $FASTDL/mongodb-linux-$MONGODB.tgz - - tar xzvf mongodb-linux-$MONGODB.tgz - - export PATH=$PWD/mongodb-linux-$MONGODB/bin:$PATH - - - wget $BUCKET/daemontools.tar.gz - - tar xzvf daemontools.tar.gz - - export PATH=$PWD/daemontools:$PATH - - - go get gopkg.in/check.v1 - - go get gopkg.in/yaml.v2 - - go get gopkg.in/tomb.v2 - - go get golang.org/x/lint/golint - -before_script: - - golint ./... | grep -v 'ID' | cat - - go vet github.com/globalsign/mgo/bson github.com/globalsign/mgo/txn github.com/globalsign/mgo - - export NOIPV6=1 - - make startdb - -script: - - (cd bson && go test -check.v) - - go test -check.v -fast - - (cd txn && go test -check.v) - - make stopdb - -git: - depth: 3 - -# vim:sw=4:ts=4:et diff --git a/vendor/github.com/globalsign/mgo/CONTRIBUTING.md b/vendor/github.com/globalsign/mgo/CONTRIBUTING.md deleted file mode 100644 index 79539955..00000000 --- a/vendor/github.com/globalsign/mgo/CONTRIBUTING.md +++ /dev/null @@ -1,14 +0,0 @@ -Contributing -------------------------- - -We really appreciate contributions, but they must meet the following requirements: - -* A PR should have a brief description of the problem/feature being proposed -* Pull requests should target the `development` branch -* Existing tests should pass and any new code should be covered with it's own test(s) (use [travis-ci](https://travis-ci.org)) -* New functions should be [documented](https://blog.golang.org/godoc-documenting-go-code) clearly -* Code should pass `golint`, `go vet` and `go fmt` - -We merge PRs into `development`, which is then tested in a sharded, replicated environment in our datacenter for regressions. Once everyone is happy, we merge to master - this is to maintain a bit of quality control past the usual PR process. - -**Thanks** for helping! diff --git a/vendor/github.com/globalsign/mgo/LICENSE b/vendor/github.com/globalsign/mgo/LICENSE deleted file mode 100644 index 770c7672..00000000 --- a/vendor/github.com/globalsign/mgo/LICENSE +++ /dev/null @@ -1,25 +0,0 @@ -mgo - MongoDB driver for Go - -Copyright (c) 2010-2013 - Gustavo Niemeyer - -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - -1. Redistributions of source code must retain the above copyright notice, this - list of conditions and the following disclaimer. -2. Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR -ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/globalsign/mgo/Makefile b/vendor/github.com/globalsign/mgo/Makefile deleted file mode 100644 index d1027d45..00000000 --- a/vendor/github.com/globalsign/mgo/Makefile +++ /dev/null @@ -1,5 +0,0 @@ -startdb: - @harness/setup.sh start - -stopdb: - @harness/setup.sh stop diff --git a/vendor/github.com/globalsign/mgo/README.md b/vendor/github.com/globalsign/mgo/README.md deleted file mode 100644 index 76fd0554..00000000 --- a/vendor/github.com/globalsign/mgo/README.md +++ /dev/null @@ -1,105 +0,0 @@ -[![Build Status](https://travis-ci.org/globalsign/mgo.svg?branch=master)](https://travis-ci.org/globalsign/mgo) [![GoDoc](https://godoc.org/github.com/globalsign/mgo?status.svg)](https://godoc.org/github.com/globalsign/mgo) - -The MongoDB driver for Go -------------------------- - -This fork has had a few improvements by ourselves as well as several PR's merged from the original mgo repo that are currently awaiting review. -Changes are mostly geared towards performance improvements and bug fixes, though a few new features have been added. - -Further PR's (with tests) are welcome, but please maintain backwards compatibility. - -Detailed documentation of the API is available at -[GoDoc](https://godoc.org/github.com/globalsign/mgo). - -A [sub-package](https://godoc.org/github.com/globalsign/mgo/bson) that implements the [BSON](http://bsonspec.org) specification is also included, and may be used independently of the driver. - -## Supported Versions - -`mgo` is known to work well on (and has integration tests against) MongoDB v3.0, 3.2, 3.4 and 3.6. - -MongoDB 4.0 is currently experimental - we would happily accept PRs to help improve support! - -## Changes -* Fixes attempting to authenticate before every query ([details](https://github.com/go-mgo/mgo/issues/254)) -* Removes bulk update / delete batch size limitations ([details](https://github.com/go-mgo/mgo/issues/288)) -* Adds native support for `time.Duration` marshalling ([details](https://github.com/go-mgo/mgo/pull/373)) -* Reduce memory footprint / garbage collection pressure by reusing buffers ([details](https://github.com/go-mgo/mgo/pull/229), [more](https://github.com/globalsign/mgo/pull/56)) -* Support majority read concerns ([details](https://github.com/globalsign/mgo/pull/2)) -* Improved connection handling ([details](https://github.com/globalsign/mgo/pull/5)) -* Hides SASL warnings ([details](https://github.com/globalsign/mgo/pull/7)) -* Support for partial indexes ([details](https://github.com/domodwyer/mgo/commit/5efe8eccb028238d93c222828cae4806aeae9f51)) -* Fixes timezone handling ([details](https://github.com/go-mgo/mgo/pull/464)) -* Integration tests run against MongoDB 3.2 & 3.4 releases ([details](https://github.com/globalsign/mgo/pull/4), [more](https://github.com/globalsign/mgo/pull/24), [more](https://github.com/globalsign/mgo/pull/35)) -* Improved multi-document transaction performance ([details](https://github.com/globalsign/mgo/pull/10), [more](https://github.com/globalsign/mgo/pull/11), [more](https://github.com/globalsign/mgo/pull/16)) -* Fixes cursor timeouts ([details](https://jira.mongodb.org/browse/SERVER-24899)) -* Support index hints and timeouts for count queries ([details](https://github.com/globalsign/mgo/pull/17)) -* Don't panic when handling indexed `int64` fields ([details](https://github.com/go-mgo/mgo/issues/475)) -* Supports dropping all indexes on a collection ([details](https://github.com/globalsign/mgo/pull/25)) -* Annotates log entries/profiler output with optional appName on 3.4+ ([details](https://github.com/globalsign/mgo/pull/28)) -* Support for read-only [views](https://docs.mongodb.com/manual/core/views/) in 3.4+ ([details](https://github.com/globalsign/mgo/pull/33)) -* Support for [collations](https://docs.mongodb.com/manual/reference/collation/) in 3.4+ ([details](https://github.com/globalsign/mgo/pull/37), [more](https://github.com/globalsign/mgo/pull/166)) -* Provide BSON constants for convenience/sanity ([details](https://github.com/globalsign/mgo/pull/41)) -* Consistently unmarshal time.Time values as UTC ([details](https://github.com/globalsign/mgo/pull/42)) -* Enforces best practise coding guidelines ([details](https://github.com/globalsign/mgo/pull/44)) -* GetBSON correctly handles structs with both fields and pointers ([details](https://github.com/globalsign/mgo/pull/40)) -* Improved bson.Raw unmarshalling performance ([details](https://github.com/globalsign/mgo/pull/49)) -* Minimise socket connection timeouts due to excessive locking ([details](https://github.com/globalsign/mgo/pull/52)) -* Natively support X509 client authentication ([details](https://github.com/globalsign/mgo/pull/55)) -* Gracefully recover from a temporarily unreachable server ([details](https://github.com/globalsign/mgo/pull/69)) -* Use JSON tags when no explicit BSON are tags set ([details](https://github.com/globalsign/mgo/pull/91)) -* Support [$changeStream](https://docs.mongodb.com/manual/changeStreams/) tailing on 3.6+ ([details](https://github.com/globalsign/mgo/pull/97)) -* Fix deadlock in cluster synchronisation ([details](https://github.com/globalsign/mgo/issues/120)) -* Implement `maxIdleTimeout` for pooled connections ([details](https://github.com/globalsign/mgo/pull/116)) -* Connection pool waiting improvements ([details](https://github.com/globalsign/mgo/pull/115)) -* Fixes BSON encoding for `$in` and friends ([details](https://github.com/globalsign/mgo/pull/128)) -* Add BSON stream encoders ([details](https://github.com/globalsign/mgo/pull/127)) -* Add integer map key support in the BSON encoder ([details](https://github.com/globalsign/mgo/pull/140)) -* Support aggregation [collations](https://docs.mongodb.com/manual/reference/collation/) ([details](https://github.com/globalsign/mgo/pull/144)) -* Support encoding of inline struct references ([details](https://github.com/globalsign/mgo/pull/146)) -* Improved windows test harness ([details](https://github.com/globalsign/mgo/pull/158)) -* Improved type and nil handling in the BSON codec ([details](https://github.com/globalsign/mgo/pull/147/files), [more](https://github.com/globalsign/mgo/pull/181)) -* Separated network read/write timeouts ([details](https://github.com/globalsign/mgo/pull/161)) -* Expanded dial string configuration options ([details](https://github.com/globalsign/mgo/pull/162)) -* Implement MongoTimestamp ([details](https://github.com/globalsign/mgo/pull/171)) -* Support setting `writeConcern` for `findAndModify` operations ([details](https://github.com/globalsign/mgo/pull/185)) -* Add `ssl` to the dial string options ([details](https://github.com/globalsign/mgo/pull/184)) - - ---- - -### Thanks to -* @aksentyev -* @bachue -* @bozaro -* @BenLubar -* @carldunham -* @carter2000 -* @cedric-cordenier -* @cezarsa -* @DaytonG -* @ddspog -* @drichelson -* @dvic -* @eaglerayp -* @feliixx -* @fmpwizard -* @gazoon -* @gedge -* @gnawux -* @idy -* @jameinel -* @jefferickson -* @johnlawsharrison -* @KJTsanaktsidis -* @larrycinnabar -* @mapete94 -* @maxnoel -* @mcspring -* @Mei-Zhao -* @peterdeka -* @Reenjii -* @roobre -* @smoya -* @steve-gray -* @tbruyelle -* @wgallagher diff --git a/vendor/github.com/globalsign/mgo/auth.go b/vendor/github.com/globalsign/mgo/auth.go deleted file mode 100644 index 75d2ebc3..00000000 --- a/vendor/github.com/globalsign/mgo/auth.go +++ /dev/null @@ -1,467 +0,0 @@ -// mgo - MongoDB driver for Go -// -// Copyright (c) 2010-2012 - Gustavo Niemeyer -// -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// -// 1. Redistributions of source code must retain the above copyright notice, this -// list of conditions and the following disclaimer. -// 2. Redistributions in binary form must reproduce the above copyright notice, -// this list of conditions and the following disclaimer in the documentation -// and/or other materials provided with the distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR -// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package mgo - -import ( - "crypto/md5" - "crypto/sha1" - "encoding/hex" - "errors" - "fmt" - "sync" - - "github.com/globalsign/mgo/bson" - "github.com/globalsign/mgo/internal/scram" -) - -type authCmd struct { - Authenticate int - - Nonce string - User string - Key string -} - -type startSaslCmd struct { - StartSASL int `bson:"startSasl"` -} - -type authResult struct { - ErrMsg string - Ok bool -} - -type getNonceCmd struct { - GetNonce int -} - -type getNonceResult struct { - Nonce string - Err string `bson:"$err"` - Code int -} - -type logoutCmd struct { - Logout int -} - -type saslCmd struct { - Start int `bson:"saslStart,omitempty"` - Continue int `bson:"saslContinue,omitempty"` - ConversationId int `bson:"conversationId,omitempty"` - Mechanism string `bson:"mechanism,omitempty"` - Payload []byte -} - -type saslResult struct { - Ok bool `bson:"ok"` - NotOk bool `bson:"code"` // Server <= 2.3.2 returns ok=1 & code>0 on errors (WTF?) - Done bool - - ConversationId int `bson:"conversationId"` - Payload []byte - ErrMsg string -} - -type saslStepper interface { - Step(serverData []byte) (clientData []byte, done bool, err error) - Close() -} - -func (socket *mongoSocket) getNonce() (nonce string, err error) { - socket.Lock() - for socket.cachedNonce == "" && socket.dead == nil { - debugf("Socket %p to %s: waiting for nonce", socket, socket.addr) - socket.gotNonce.Wait() - } - if socket.cachedNonce == "mongos" { - socket.Unlock() - return "", errors.New("Can't authenticate with mongos; see http://j.mp/mongos-auth") - } - debugf("Socket %p to %s: got nonce", socket, socket.addr) - nonce, err = socket.cachedNonce, socket.dead - socket.cachedNonce = "" - socket.Unlock() - if err != nil { - nonce = "" - } - return -} - -func (socket *mongoSocket) resetNonce() { - debugf("Socket %p to %s: requesting a new nonce", socket, socket.addr) - op := &queryOp{} - op.query = &getNonceCmd{GetNonce: 1} - op.collection = "admin.$cmd" - op.limit = -1 - op.replyFunc = func(err error, reply *replyOp, docNum int, docData []byte) { - if err != nil { - socket.kill(errors.New("getNonce: "+err.Error()), true) - return - } - result := &getNonceResult{} - err = bson.Unmarshal(docData, &result) - if err != nil { - socket.kill(errors.New("Failed to unmarshal nonce: "+err.Error()), true) - return - } - debugf("Socket %p to %s: nonce unmarshalled: %#v", socket, socket.addr, result) - if result.Code == 13390 { - // mongos doesn't yet support auth (see http://j.mp/mongos-auth) - result.Nonce = "mongos" - } else if result.Nonce == "" { - var msg string - if result.Err != "" { - msg = fmt.Sprintf("Got an empty nonce: %s (%d)", result.Err, result.Code) - } else { - msg = "Got an empty nonce" - } - socket.kill(errors.New(msg), true) - return - } - socket.Lock() - if socket.cachedNonce != "" { - socket.Unlock() - panic("resetNonce: nonce already cached") - } - socket.cachedNonce = result.Nonce - socket.gotNonce.Signal() - socket.Unlock() - } - err := socket.Query(op) - if err != nil { - socket.kill(errors.New("resetNonce: "+err.Error()), true) - } -} - -func (socket *mongoSocket) Login(cred Credential) error { - socket.Lock() - if cred.Mechanism == "" && socket.serverInfo.MaxWireVersion >= 3 { - cred.Mechanism = "SCRAM-SHA-1" - } - for _, sockCred := range socket.creds { - if sockCred == cred { - debugf("Socket %p to %s: login: db=%q user=%q (already logged in)", socket, socket.addr, cred.Source, cred.Username) - socket.Unlock() - return nil - } - } - if socket.dropLogout(cred) { - debugf("Socket %p to %s: login: db=%q user=%q (cached)", socket, socket.addr, cred.Source, cred.Username) - socket.creds = append(socket.creds, cred) - socket.Unlock() - return nil - } - socket.Unlock() - - debugf("Socket %p to %s: login: db=%q user=%q", socket, socket.addr, cred.Source, cred.Username) - - var err error - switch cred.Mechanism { - case "", "MONGODB-CR", "MONGO-CR": // Name changed to MONGODB-CR in SERVER-8501. - err = socket.loginClassic(cred) - case "PLAIN": - err = socket.loginPlain(cred) - case "MONGODB-X509": - err = socket.loginX509(cred) - default: - // Try SASL for everything else, if it is available. - err = socket.loginSASL(cred) - } - - if err != nil { - debugf("Socket %p to %s: login error: %s", socket, socket.addr, err) - } else { - debugf("Socket %p to %s: login successful", socket, socket.addr) - } - return err -} - -func (socket *mongoSocket) loginClassic(cred Credential) error { - // Note that this only works properly because this function is - // synchronous, which means the nonce won't get reset while we're - // using it and any other login requests will block waiting for a - // new nonce provided in the defer call below. - nonce, err := socket.getNonce() - if err != nil { - return err - } - defer socket.resetNonce() - - psum := md5.New() - psum.Write([]byte(cred.Username + ":mongo:" + cred.Password)) - - ksum := md5.New() - ksum.Write([]byte(nonce + cred.Username)) - ksum.Write([]byte(hex.EncodeToString(psum.Sum(nil)))) - - key := hex.EncodeToString(ksum.Sum(nil)) - - cmd := authCmd{Authenticate: 1, User: cred.Username, Nonce: nonce, Key: key} - res := authResult{} - return socket.loginRun(cred.Source, &cmd, &res, func() error { - if !res.Ok { - return errors.New(res.ErrMsg) - } - socket.Lock() - socket.dropAuth(cred.Source) - socket.creds = append(socket.creds, cred) - socket.Unlock() - return nil - }) -} - -type authX509Cmd struct { - Authenticate int - User string - Mechanism string -} - -func (socket *mongoSocket) loginX509(cred Credential) error { - cmd := authX509Cmd{Authenticate: 1, User: cred.Username, Mechanism: "MONGODB-X509"} - res := authResult{} - return socket.loginRun(cred.Source, &cmd, &res, func() error { - if !res.Ok { - return errors.New(res.ErrMsg) - } - socket.Lock() - socket.dropAuth(cred.Source) - socket.creds = append(socket.creds, cred) - socket.Unlock() - return nil - }) -} - -func (socket *mongoSocket) loginPlain(cred Credential) error { - cmd := saslCmd{Start: 1, Mechanism: "PLAIN", Payload: []byte("\x00" + cred.Username + "\x00" + cred.Password)} - res := authResult{} - return socket.loginRun(cred.Source, &cmd, &res, func() error { - if !res.Ok { - return errors.New(res.ErrMsg) - } - socket.Lock() - socket.dropAuth(cred.Source) - socket.creds = append(socket.creds, cred) - socket.Unlock() - return nil - }) -} - -func (socket *mongoSocket) loginSASL(cred Credential) error { - var sasl saslStepper - var err error - if cred.Mechanism == "SCRAM-SHA-1" { - // SCRAM is handled without external libraries. - sasl = saslNewScram(cred) - } else if len(cred.ServiceHost) > 0 { - sasl, err = saslNew(cred, cred.ServiceHost) - } else { - sasl, err = saslNew(cred, socket.Server().Addr) - } - if err != nil { - return err - } - defer sasl.Close() - - // The goal of this logic is to carry a locked socket until the - // local SASL step confirms the auth is valid; the socket needs to be - // locked so that concurrent action doesn't leave the socket in an - // auth state that doesn't reflect the operations that took place. - // As a simple case, imagine inverting login=>logout to logout=>login. - // - // The logic below works because the lock func isn't called concurrently. - locked := false - lock := func(b bool) { - if locked != b { - locked = b - if b { - socket.Lock() - } else { - socket.Unlock() - } - } - } - - lock(true) - defer lock(false) - - start := 1 - cmd := saslCmd{} - res := saslResult{} - for { - payload, done, err := sasl.Step(res.Payload) - if err != nil { - return err - } - if done && res.Done { - socket.dropAuth(cred.Source) - socket.creds = append(socket.creds, cred) - break - } - lock(false) - - cmd = saslCmd{ - Start: start, - Continue: 1 - start, - ConversationId: res.ConversationId, - Mechanism: cred.Mechanism, - Payload: payload, - } - start = 0 - err = socket.loginRun(cred.Source, &cmd, &res, func() error { - // See the comment on lock for why this is necessary. - lock(true) - if !res.Ok || res.NotOk { - return fmt.Errorf("server returned error on SASL authentication step: %s", res.ErrMsg) - } - return nil - }) - if err != nil { - return err - } - if done && res.Done { - socket.dropAuth(cred.Source) - socket.creds = append(socket.creds, cred) - break - } - } - - return nil -} - -func saslNewScram(cred Credential) *saslScram { - credsum := md5.New() - credsum.Write([]byte(cred.Username + ":mongo:" + cred.Password)) - client := scram.NewClient(sha1.New, cred.Username, hex.EncodeToString(credsum.Sum(nil))) - return &saslScram{cred: cred, client: client} -} - -type saslScram struct { - cred Credential - client *scram.Client -} - -func (s *saslScram) Close() {} - -func (s *saslScram) Step(serverData []byte) (clientData []byte, done bool, err error) { - more := s.client.Step(serverData) - return s.client.Out(), !more, s.client.Err() -} - -func (socket *mongoSocket) loginRun(db string, query, result interface{}, f func() error) error { - var mutex sync.Mutex - var replyErr error - mutex.Lock() - - op := queryOp{} - op.query = query - op.collection = db + ".$cmd" - op.limit = -1 - op.replyFunc = func(err error, reply *replyOp, docNum int, docData []byte) { - defer mutex.Unlock() - - if err != nil { - replyErr = err - return - } - - err = bson.Unmarshal(docData, result) - if err != nil { - replyErr = err - } else { - // Must handle this within the read loop for the socket, so - // that concurrent login requests are properly ordered. - replyErr = f() - } - } - - err := socket.Query(&op) - if err != nil { - return err - } - mutex.Lock() // Wait. - return replyErr -} - -func (socket *mongoSocket) Logout(db string) { - socket.Lock() - cred, found := socket.dropAuth(db) - if found { - debugf("Socket %p to %s: logout: db=%q (flagged)", socket, socket.addr, db) - socket.logout = append(socket.logout, cred) - } - socket.Unlock() -} - -func (socket *mongoSocket) LogoutAll() { - socket.Lock() - if l := len(socket.creds); l > 0 { - debugf("Socket %p to %s: logout all (flagged %d)", socket, socket.addr, l) - socket.logout = append(socket.logout, socket.creds...) - socket.creds = socket.creds[0:0] - } - socket.Unlock() -} - -func (socket *mongoSocket) flushLogout() (ops []interface{}) { - socket.Lock() - if l := len(socket.logout); l > 0 { - debugf("Socket %p to %s: logout all (flushing %d)", socket, socket.addr, l) - for i := 0; i != l; i++ { - op := queryOp{} - op.query = &logoutCmd{1} - op.collection = socket.logout[i].Source + ".$cmd" - op.limit = -1 - ops = append(ops, &op) - } - socket.logout = socket.logout[0:0] - } - socket.Unlock() - return -} - -func (socket *mongoSocket) dropAuth(db string) (cred Credential, found bool) { - for i, sockCred := range socket.creds { - if sockCred.Source == db { - copy(socket.creds[i:], socket.creds[i+1:]) - socket.creds = socket.creds[:len(socket.creds)-1] - return sockCred, true - } - } - return cred, false -} - -func (socket *mongoSocket) dropLogout(cred Credential) (found bool) { - for i, sockCred := range socket.logout { - if sockCred == cred { - copy(socket.logout[i:], socket.logout[i+1:]) - socket.logout = socket.logout[:len(socket.logout)-1] - return true - } - } - return false -} diff --git a/vendor/github.com/globalsign/mgo/bson/LICENSE b/vendor/github.com/globalsign/mgo/bson/LICENSE deleted file mode 100644 index 89032601..00000000 --- a/vendor/github.com/globalsign/mgo/bson/LICENSE +++ /dev/null @@ -1,25 +0,0 @@ -BSON library for Go - -Copyright (c) 2010-2012 - Gustavo Niemeyer - -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - -1. Redistributions of source code must retain the above copyright notice, this - list of conditions and the following disclaimer. -2. Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR -ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/globalsign/mgo/bson/README.md b/vendor/github.com/globalsign/mgo/bson/README.md deleted file mode 100644 index 5c5819e6..00000000 --- a/vendor/github.com/globalsign/mgo/bson/README.md +++ /dev/null @@ -1,12 +0,0 @@ -[![GoDoc](https://godoc.org/github.com/globalsign/mgo/bson?status.svg)](https://godoc.org/github.com/globalsign/mgo/bson) - -An Implementation of BSON for Go --------------------------------- - -Package bson is an implementation of the [BSON specification](http://bsonspec.org) for Go. - -While the BSON package implements the BSON spec as faithfully as possible, there -is some MongoDB specific behaviour (such as map keys `$in`, `$all`, etc) in the -`bson` package. The priority is for backwards compatibility for the `mgo` -driver, though fixes for obviously buggy behaviour is welcome (and features, etc -behind feature flags). diff --git a/vendor/github.com/globalsign/mgo/bson/bson.go b/vendor/github.com/globalsign/mgo/bson/bson.go deleted file mode 100644 index eb87ef62..00000000 --- a/vendor/github.com/globalsign/mgo/bson/bson.go +++ /dev/null @@ -1,836 +0,0 @@ -// BSON library for Go -// -// Copyright (c) 2010-2012 - Gustavo Niemeyer -// -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// -// 1. Redistributions of source code must retain the above copyright notice, this -// list of conditions and the following disclaimer. -// 2. Redistributions in binary form must reproduce the above copyright notice, -// this list of conditions and the following disclaimer in the documentation -// and/or other materials provided with the distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR -// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// Package bson is an implementation of the BSON specification for Go: -// -// http://bsonspec.org -// -// It was created as part of the mgo MongoDB driver for Go, but is standalone -// and may be used on its own without the driver. -package bson - -import ( - "bytes" - "crypto/md5" - "crypto/rand" - "encoding/binary" - "encoding/hex" - "encoding/json" - "errors" - "fmt" - "io" - "math" - "os" - "reflect" - "runtime" - "strings" - "sync" - "sync/atomic" - "time" -) - -//go:generate go run bson_corpus_spec_test_generator.go - -// -------------------------------------------------------------------------- -// The public API. - -// Element types constants from BSON specification. -const ( - ElementFloat64 byte = 0x01 - ElementString byte = 0x02 - ElementDocument byte = 0x03 - ElementArray byte = 0x04 - ElementBinary byte = 0x05 - Element06 byte = 0x06 - ElementObjectId byte = 0x07 - ElementBool byte = 0x08 - ElementDatetime byte = 0x09 - ElementNil byte = 0x0A - ElementRegEx byte = 0x0B - ElementDBPointer byte = 0x0C - ElementJavaScriptWithoutScope byte = 0x0D - ElementSymbol byte = 0x0E - ElementJavaScriptWithScope byte = 0x0F - ElementInt32 byte = 0x10 - ElementTimestamp byte = 0x11 - ElementInt64 byte = 0x12 - ElementDecimal128 byte = 0x13 - ElementMinKey byte = 0xFF - ElementMaxKey byte = 0x7F - - BinaryGeneric byte = 0x00 - BinaryFunction byte = 0x01 - BinaryBinaryOld byte = 0x02 - BinaryUUIDOld byte = 0x03 - BinaryUUID byte = 0x04 - BinaryMD5 byte = 0x05 - BinaryUserDefined byte = 0x80 -) - -// Getter interface: a value implementing the bson.Getter interface will have its GetBSON -// method called when the given value has to be marshalled, and the result -// of this method will be marshaled in place of the actual object. -// -// If GetBSON returns return a non-nil error, the marshalling procedure -// will stop and error out with the provided value. -type Getter interface { - GetBSON() (interface{}, error) -} - -// Setter interface: a value implementing the bson.Setter interface will receive the BSON -// value via the SetBSON method during unmarshaling, and the object -// itself will not be changed as usual. -// -// If setting the value works, the method should return nil or alternatively -// bson.ErrSetZero to set the respective field to its zero value (nil for -// pointer types). If SetBSON returns a value of type bson.TypeError, the -// BSON value will be omitted from a map or slice being decoded and the -// unmarshalling will continue. If it returns any other non-nil error, the -// unmarshalling procedure will stop and error out with the provided value. -// -// This interface is generally useful in pointer receivers, since the method -// will want to change the receiver. A type field that implements the Setter -// interface doesn't have to be a pointer, though. -// -// Unlike the usual behavior, unmarshalling onto a value that implements a -// Setter interface will NOT reset the value to its zero state. This allows -// the value to decide by itself how to be unmarshalled. -// -// For example: -// -// type MyString string -// -// func (s *MyString) SetBSON(raw bson.Raw) error { -// return raw.Unmarshal(s) -// } -// -type Setter interface { - SetBSON(raw Raw) error -} - -// ErrSetZero may be returned from a SetBSON method to have the value set to -// its respective zero value. When used in pointer values, this will set the -// field to nil rather than to the pre-allocated value. -var ErrSetZero = errors.New("set to zero") - -// M is a convenient alias for a map[string]interface{} map, useful for -// dealing with BSON in a native way. For instance: -// -// bson.M{"a": 1, "b": true} -// -// There's no special handling for this type in addition to what's done anyway -// for an equivalent map type. Elements in the map will be dumped in an -// undefined ordered. See also the bson.D type for an ordered alternative. -type M map[string]interface{} - -// D represents a BSON document containing ordered elements. For example: -// -// bson.D{{"a", 1}, {"b", true}} -// -// In some situations, such as when creating indexes for MongoDB, the order in -// which the elements are defined is important. If the order is not important, -// using a map is generally more comfortable. See bson.M and bson.RawD. -type D []DocElem - -// DocElem is an element of the bson.D document representation. -type DocElem struct { - Name string - Value interface{} -} - -// Map returns a map out of the ordered element name/value pairs in d. -func (d D) Map() (m M) { - m = make(M, len(d)) - for _, item := range d { - m[item.Name] = item.Value - } - return m -} - -// The Raw type represents raw unprocessed BSON documents and elements. -// Kind is the kind of element as defined per the BSON specification, and -// Data is the raw unprocessed data for the respective element. -// Using this type it is possible to unmarshal or marshal values partially. -// -// Relevant documentation: -// -// http://bsonspec.org/#/specification -// -type Raw struct { - Kind byte - Data []byte -} - -// RawD represents a BSON document containing raw unprocessed elements. -// This low-level representation may be useful when lazily processing -// documents of uncertain content, or when manipulating the raw content -// documents in general. -type RawD []RawDocElem - -// RawDocElem elements of RawD type. -type RawDocElem struct { - Name string - Value Raw -} - -// ObjectId is a unique ID identifying a BSON value. It must be exactly 12 bytes -// long. MongoDB objects by default have such a property set in their "_id" -// property. -// -// http://www.mongodb.org/display/DOCS/Object+Ids -type ObjectId string - -// ObjectIdHex returns an ObjectId from the provided hex representation. -// Calling this function with an invalid hex representation will -// cause a runtime panic. See the IsObjectIdHex function. -func ObjectIdHex(s string) ObjectId { - d, err := hex.DecodeString(s) - if err != nil || len(d) != 12 { - panic(fmt.Sprintf("invalid input to ObjectIdHex: %q", s)) - } - return ObjectId(d) -} - -// IsObjectIdHex returns whether s is a valid hex representation of -// an ObjectId. See the ObjectIdHex function. -func IsObjectIdHex(s string) bool { - if len(s) != 24 { - return false - } - _, err := hex.DecodeString(s) - return err == nil -} - -// objectIdCounter is atomically incremented when generating a new ObjectId -// using NewObjectId() function. It's used as a counter part of an id. -var objectIdCounter = readRandomUint32() - -// readRandomUint32 returns a random objectIdCounter. -func readRandomUint32() uint32 { - var b [4]byte - _, err := io.ReadFull(rand.Reader, b[:]) - if err != nil { - panic(fmt.Errorf("cannot read random object id: %v", err)) - } - return uint32((uint32(b[0]) << 0) | (uint32(b[1]) << 8) | (uint32(b[2]) << 16) | (uint32(b[3]) << 24)) -} - -// machineId stores machine id generated once and used in subsequent calls -// to NewObjectId function. -var machineId = readMachineId() -var processId = os.Getpid() - -// readMachineId generates and returns a machine id. -// If this function fails to get the hostname it will cause a runtime error. -func readMachineId() []byte { - var sum [3]byte - id := sum[:] - hostname, err1 := os.Hostname() - if err1 != nil { - _, err2 := io.ReadFull(rand.Reader, id) - if err2 != nil { - panic(fmt.Errorf("cannot get hostname: %v; %v", err1, err2)) - } - return id - } - hw := md5.New() - hw.Write([]byte(hostname)) - copy(id, hw.Sum(nil)) - return id -} - -// NewObjectId returns a new unique ObjectId. -func NewObjectId() ObjectId { - var b [12]byte - // Timestamp, 4 bytes, big endian - binary.BigEndian.PutUint32(b[:], uint32(time.Now().Unix())) - // Machine, first 3 bytes of md5(hostname) - b[4] = machineId[0] - b[5] = machineId[1] - b[6] = machineId[2] - // Pid, 2 bytes, specs don't specify endianness, but we use big endian. - b[7] = byte(processId >> 8) - b[8] = byte(processId) - // Increment, 3 bytes, big endian - i := atomic.AddUint32(&objectIdCounter, 1) - b[9] = byte(i >> 16) - b[10] = byte(i >> 8) - b[11] = byte(i) - return ObjectId(b[:]) -} - -// NewObjectIdWithTime returns a dummy ObjectId with the timestamp part filled -// with the provided number of seconds from epoch UTC, and all other parts -// filled with zeroes. It's not safe to insert a document with an id generated -// by this method, it is useful only for queries to find documents with ids -// generated before or after the specified timestamp. -func NewObjectIdWithTime(t time.Time) ObjectId { - var b [12]byte - binary.BigEndian.PutUint32(b[:4], uint32(t.Unix())) - return ObjectId(string(b[:])) -} - -// String returns a hex string representation of the id. -// Example: ObjectIdHex("4d88e15b60f486e428412dc9"). -func (id ObjectId) String() string { - return fmt.Sprintf(`ObjectIdHex("%x")`, string(id)) -} - -// Hex returns a hex representation of the ObjectId. -func (id ObjectId) Hex() string { - return hex.EncodeToString([]byte(id)) -} - -// MarshalJSON turns a bson.ObjectId into a json.Marshaller. -func (id ObjectId) MarshalJSON() ([]byte, error) { - return []byte(fmt.Sprintf(`"%x"`, string(id))), nil -} - -var nullBytes = []byte("null") - -// UnmarshalJSON turns *bson.ObjectId into a json.Unmarshaller. -func (id *ObjectId) UnmarshalJSON(data []byte) error { - if len(data) > 0 && (data[0] == '{' || data[0] == 'O') { - var v struct { - Id json.RawMessage `json:"$oid"` - Func struct { - Id json.RawMessage - } `json:"$oidFunc"` - } - err := jdec(data, &v) - if err == nil { - if len(v.Id) > 0 { - data = []byte(v.Id) - } else { - data = []byte(v.Func.Id) - } - } - } - if len(data) == 2 && data[0] == '"' && data[1] == '"' || bytes.Equal(data, nullBytes) { - *id = "" - return nil - } - if len(data) != 26 || data[0] != '"' || data[25] != '"' { - return fmt.Errorf("invalid ObjectId in JSON: %s", string(data)) - } - var buf [12]byte - _, err := hex.Decode(buf[:], data[1:25]) - if err != nil { - return fmt.Errorf("invalid ObjectId in JSON: %s (%s)", string(data), err) - } - *id = ObjectId(string(buf[:])) - return nil -} - -// MarshalText turns bson.ObjectId into an encoding.TextMarshaler. -func (id ObjectId) MarshalText() ([]byte, error) { - return []byte(fmt.Sprintf("%x", string(id))), nil -} - -// UnmarshalText turns *bson.ObjectId into an encoding.TextUnmarshaler. -func (id *ObjectId) UnmarshalText(data []byte) error { - if len(data) == 1 && data[0] == ' ' || len(data) == 0 { - *id = "" - return nil - } - if len(data) != 24 { - return fmt.Errorf("invalid ObjectId: %s", data) - } - var buf [12]byte - _, err := hex.Decode(buf[:], data[:]) - if err != nil { - return fmt.Errorf("invalid ObjectId: %s (%s)", data, err) - } - *id = ObjectId(string(buf[:])) - return nil -} - -// Valid returns true if id is valid. A valid id must contain exactly 12 bytes. -func (id ObjectId) Valid() bool { - return len(id) == 12 -} - -// byteSlice returns byte slice of id from start to end. -// Calling this function with an invalid id will cause a runtime panic. -func (id ObjectId) byteSlice(start, end int) []byte { - if len(id) != 12 { - panic(fmt.Sprintf("invalid ObjectId: %q", string(id))) - } - return []byte(string(id)[start:end]) -} - -// Time returns the timestamp part of the id. -// It's a runtime error to call this method with an invalid id. -func (id ObjectId) Time() time.Time { - // First 4 bytes of ObjectId is 32-bit big-endian seconds from epoch. - secs := int64(binary.BigEndian.Uint32(id.byteSlice(0, 4))) - return time.Unix(secs, 0) -} - -// Machine returns the 3-byte machine id part of the id. -// It's a runtime error to call this method with an invalid id. -func (id ObjectId) Machine() []byte { - return id.byteSlice(4, 7) -} - -// Pid returns the process id part of the id. -// It's a runtime error to call this method with an invalid id. -func (id ObjectId) Pid() uint16 { - return binary.BigEndian.Uint16(id.byteSlice(7, 9)) -} - -// Counter returns the incrementing value part of the id. -// It's a runtime error to call this method with an invalid id. -func (id ObjectId) Counter() int32 { - b := id.byteSlice(9, 12) - // Counter is stored as big-endian 3-byte value - return int32(uint32(b[0])<<16 | uint32(b[1])<<8 | uint32(b[2])) -} - -// The Symbol type is similar to a string and is used in languages with a -// distinct symbol type. -type Symbol string - -// Now returns the current time with millisecond precision. MongoDB stores -// timestamps with the same precision, so a Time returned from this method -// will not change after a roundtrip to the database. That's the only reason -// why this function exists. Using the time.Now function also works fine -// otherwise. -func Now() time.Time { - return time.Unix(0, time.Now().UnixNano()/1e6*1e6) -} - -// MongoTimestamp is a special internal type used by MongoDB that for some -// strange reason has its own datatype defined in BSON. -type MongoTimestamp int64 - -// Time returns the time part of ts which is stored with second precision. -func (ts MongoTimestamp) Time() time.Time { - return time.Unix(int64(uint64(ts)>>32), 0) -} - -// Counter returns the counter part of ts. -func (ts MongoTimestamp) Counter() uint32 { - return uint32(ts) -} - -// NewMongoTimestamp creates a timestamp using the given -// date `t` (with second precision) and counter `c` (unique for `t`). -// -// Returns an error if time `t` is not between 1970-01-01T00:00:00Z -// and 2106-02-07T06:28:15Z (inclusive). -// -// Note that two MongoTimestamps should never have the same (time, counter) combination: -// the caller must ensure the counter `c` is increased if creating multiple MongoTimestamp -// values for the same time `t` (ignoring fractions of seconds). -func NewMongoTimestamp(t time.Time, c uint32) (MongoTimestamp, error) { - u := t.Unix() - if u < 0 || u > math.MaxUint32 { - return -1, errors.New("invalid value for time") - } - - i := int64(u<<32 | int64(c)) - - return MongoTimestamp(i), nil -} - -type orderKey int64 - -// MaxKey is a special value that compares higher than all other possible BSON -// values in a MongoDB database. -var MaxKey = orderKey(1<<63 - 1) - -// MinKey is a special value that compares lower than all other possible BSON -// values in a MongoDB database. -var MinKey = orderKey(-1 << 63) - -type undefined struct{} - -// Undefined represents the undefined BSON value. -var Undefined undefined - -// Binary is a representation for non-standard binary values. Any kind should -// work, but the following are known as of this writing: -// -// 0x00 - Generic. This is decoded as []byte(data), not Binary{0x00, data}. -// 0x01 - Function (!?) -// 0x02 - Obsolete generic. -// 0x03 - UUID -// 0x05 - MD5 -// 0x80 - User defined. -// -type Binary struct { - Kind byte - Data []byte -} - -// RegEx represents a regular expression. The Options field may contain -// individual characters defining the way in which the pattern should be -// applied, and must be sorted. Valid options as of this writing are 'i' for -// case insensitive matching, 'm' for multi-line matching, 'x' for verbose -// mode, 'l' to make \w, \W, and similar be locale-dependent, 's' for dot-all -// mode (a '.' matches everything), and 'u' to make \w, \W, and similar match -// unicode. The value of the Options parameter is not verified before being -// marshaled into the BSON format. -type RegEx struct { - Pattern string - Options string -} - -// JavaScript is a type that holds JavaScript code. If Scope is non-nil, it -// will be marshaled as a mapping from identifiers to values that may be -// used when evaluating the provided Code. -type JavaScript struct { - Code string - Scope interface{} -} - -// DBPointer refers to a document id in a namespace. -// -// This type is deprecated in the BSON specification and should not be used -// except for backwards compatibility with ancient applications. -type DBPointer struct { - Namespace string - Id ObjectId -} - -const initialBufferSize = 64 - -func handleErr(err *error) { - if r := recover(); r != nil { - if _, ok := r.(runtime.Error); ok { - panic(r) - } else if _, ok := r.(externalPanic); ok { - panic(r) - } else if s, ok := r.(string); ok { - *err = errors.New(s) - } else if e, ok := r.(error); ok { - *err = e - } else { - panic(r) - } - } -} - -// Marshal serializes the in value, which may be a map or a struct value. -// In the case of struct values, only exported fields will be serialized, -// and the order of serialized fields will match that of the struct itself. -// The lowercased field name is used as the key for each exported field, -// but this behavior may be changed using the respective field tag. -// The tag may also contain flags to tweak the marshalling behavior for -// the field. The tag formats accepted are: -// -// "[][,[,]]" -// -// `(...) bson:"[][,[,]]" (...)` -// -// The following flags are currently supported: -// -// omitempty Only include the field if it's not set to the zero -// value for the type or to empty slices or maps. -// -// minsize Marshal an int64 value as an int32, if that's feasible -// while preserving the numeric value. -// -// inline Inline the field, which must be a struct or a map, -// causing all of its fields or keys to be processed as if -// they were part of the outer struct. For maps, keys must -// not conflict with the bson keys of other struct fields. -// -// Some examples: -// -// type T struct { -// A bool -// B int "myb" -// C string "myc,omitempty" -// D string `bson:",omitempty" json:"jsonkey"` -// E int64 ",minsize" -// F int64 "myf,omitempty,minsize" -// } -// -func Marshal(in interface{}) (out []byte, err error) { - return MarshalBuffer(in, make([]byte, 0, initialBufferSize)) -} - -// MarshalBuffer behaves the same way as Marshal, except that instead of -// allocating a new byte slice it tries to use the received byte slice and -// only allocates more memory if necessary to fit the marshaled value. -func MarshalBuffer(in interface{}, buf []byte) (out []byte, err error) { - defer handleErr(&err) - e := &encoder{buf} - e.addDoc(reflect.ValueOf(in)) - return e.out, nil -} - -// Unmarshal deserializes data from in into the out value. The out value -// must be a map, a pointer to a struct, or a pointer to a bson.D value. -// In the case of struct values, only exported fields will be deserialized. -// The lowercased field name is used as the key for each exported field, -// but this behavior may be changed using the respective field tag. -// The tag may also contain flags to tweak the marshalling behavior for -// the field. The tag formats accepted are: -// -// "[][,[,]]" -// -// `(...) bson:"[][,[,]]" (...)` -// -// The following flags are currently supported during unmarshal (see the -// Marshal method for other flags): -// -// inline Inline the field, which must be a struct or a map. -// Inlined structs are handled as if its fields were part -// of the outer struct. An inlined map causes keys that do -// not match any other struct field to be inserted in the -// map rather than being discarded as usual. -// -// The target field or element types of out may not necessarily match -// the BSON values of the provided data. The following conversions are -// made automatically: -// -// - Numeric types are converted if at least the integer part of the -// value would be preserved correctly -// - Bools are converted to numeric types as 1 or 0 -// - Numeric types are converted to bools as true if not 0 or false otherwise -// - Binary and string BSON data is converted to a string, array or byte slice -// -// If the value would not fit the type and cannot be converted, it's -// silently skipped. -// -// Pointer values are initialized when necessary. -func Unmarshal(in []byte, out interface{}) (err error) { - if raw, ok := out.(*Raw); ok { - raw.Kind = 3 - raw.Data = in - return nil - } - defer handleErr(&err) - v := reflect.ValueOf(out) - switch v.Kind() { - case reflect.Ptr: - fallthrough - case reflect.Map: - d := newDecoder(in) - d.readDocTo(v) - if d.i < len(d.in) { - return errors.New("document is corrupted") - } - case reflect.Struct: - return errors.New("unmarshal can't deal with struct values. Use a pointer") - default: - return errors.New("unmarshal needs a map or a pointer to a struct") - } - return nil -} - -// Unmarshal deserializes raw into the out value. If the out value type -// is not compatible with raw, a *bson.TypeError is returned. -// -// See the Unmarshal function documentation for more details on the -// unmarshalling process. -func (raw Raw) Unmarshal(out interface{}) (err error) { - defer handleErr(&err) - v := reflect.ValueOf(out) - switch v.Kind() { - case reflect.Ptr: - v = v.Elem() - fallthrough - case reflect.Map: - d := newDecoder(raw.Data) - good := d.readElemTo(v, raw.Kind) - if !good { - return &TypeError{v.Type(), raw.Kind} - } - case reflect.Struct: - return errors.New("raw Unmarshal can't deal with struct values. Use a pointer") - default: - return errors.New("raw Unmarshal needs a map or a valid pointer") - } - return nil -} - -// TypeError store details for type error occuring -// during unmarshaling -type TypeError struct { - Type reflect.Type - Kind byte -} - -func (e *TypeError) Error() string { - return fmt.Sprintf("BSON kind 0x%02x isn't compatible with type %s", e.Kind, e.Type.String()) -} - -// -------------------------------------------------------------------------- -// Maintain a mapping of keys to structure field indexes - -type structInfo struct { - FieldsMap map[string]fieldInfo - FieldsList []fieldInfo - InlineMap int - Zero reflect.Value -} - -type fieldInfo struct { - Key string - Num int - OmitEmpty bool - MinSize bool - Inline []int -} - -var structMap = make(map[reflect.Type]*structInfo) -var structMapMutex sync.RWMutex - -type externalPanic string - -func (e externalPanic) String() string { - return string(e) -} - -func getStructInfo(st reflect.Type) (*structInfo, error) { - structMapMutex.RLock() - sinfo, found := structMap[st] - structMapMutex.RUnlock() - if found { - return sinfo, nil - } - n := st.NumField() - fieldsMap := make(map[string]fieldInfo) - fieldsList := make([]fieldInfo, 0, n) - inlineMap := -1 - for i := 0; i != n; i++ { - field := st.Field(i) - if field.PkgPath != "" && !field.Anonymous { - continue // Private field - } - - info := fieldInfo{Num: i} - - tag := field.Tag.Get("bson") - - // Fall-back to JSON struct tag, if feature flag is set. - if tag == "" && useJSONTagFallback { - tag = field.Tag.Get("json") - } - - // If there's no bson/json tag available. - if tag == "" { - // If there's no tag, and also no tag: value splits (i.e. no colon) - // then assume the entire tag is the value - if strings.Index(string(field.Tag), ":") < 0 { - tag = string(field.Tag) - } - } - - if tag == "-" { - continue - } - - inline := false - fields := strings.Split(tag, ",") - if len(fields) > 1 { - for _, flag := range fields[1:] { - switch flag { - case "omitempty": - info.OmitEmpty = true - case "minsize": - info.MinSize = true - case "inline": - inline = true - default: - msg := fmt.Sprintf("Unsupported flag %q in tag %q of type %s", flag, tag, st) - panic(externalPanic(msg)) - } - } - tag = fields[0] - } - - if inline { - switch field.Type.Kind() { - case reflect.Map: - if inlineMap >= 0 { - return nil, errors.New("Multiple ,inline maps in struct " + st.String()) - } - if field.Type.Key() != reflect.TypeOf("") { - return nil, errors.New("Option ,inline needs a map with string keys in struct " + st.String()) - } - inlineMap = info.Num - case reflect.Ptr: - // allow only pointer to struct - if kind := field.Type.Elem().Kind(); kind != reflect.Struct { - return nil, errors.New("Option ,inline allows a pointer only to a struct, was given pointer to " + kind.String()) - } - - field.Type = field.Type.Elem() - fallthrough - case reflect.Struct: - sinfo, err := getStructInfo(field.Type) - if err != nil { - return nil, err - } - for _, finfo := range sinfo.FieldsList { - if _, found := fieldsMap[finfo.Key]; found { - msg := "Duplicated key '" + finfo.Key + "' in struct " + st.String() - return nil, errors.New(msg) - } - if finfo.Inline == nil { - finfo.Inline = []int{i, finfo.Num} - } else { - finfo.Inline = append([]int{i}, finfo.Inline...) - } - fieldsMap[finfo.Key] = finfo - fieldsList = append(fieldsList, finfo) - } - default: - panic("Option ,inline needs a struct value or a pointer to a struct or map field") - } - continue - } - - if tag != "" { - info.Key = tag - } else { - info.Key = strings.ToLower(field.Name) - } - - if _, found = fieldsMap[info.Key]; found { - msg := "Duplicated key '" + info.Key + "' in struct " + st.String() - return nil, errors.New(msg) - } - - fieldsList = append(fieldsList, info) - fieldsMap[info.Key] = info - } - sinfo = &structInfo{ - fieldsMap, - fieldsList, - inlineMap, - reflect.New(st).Elem(), - } - structMapMutex.Lock() - structMap[st] = sinfo - structMapMutex.Unlock() - return sinfo, nil -} diff --git a/vendor/github.com/globalsign/mgo/bson/compatibility.go b/vendor/github.com/globalsign/mgo/bson/compatibility.go deleted file mode 100644 index 66efd465..00000000 --- a/vendor/github.com/globalsign/mgo/bson/compatibility.go +++ /dev/null @@ -1,29 +0,0 @@ -package bson - -// Current state of the JSON tag fallback option. -var useJSONTagFallback = false -var useRespectNilValues = false - -// SetJSONTagFallback enables or disables the JSON-tag fallback for structure tagging. When this is enabled, structures -// without BSON tags on a field will fall-back to using the JSON tag (if present). -func SetJSONTagFallback(state bool) { - useJSONTagFallback = state -} - -// JSONTagFallbackState returns the current status of the JSON tag fallback compatability option. See SetJSONTagFallback -// for more information. -func JSONTagFallbackState() bool { - return useJSONTagFallback -} - -// SetRespectNilValues enables or disables serializing nil slices or maps to `null` values. -// In other words it enables `encoding/json` compatible behaviour. -func SetRespectNilValues(state bool) { - useRespectNilValues = state -} - -// RespectNilValuesState returns the current status of the JSON nil slices and maps fallback compatibility option. -// See SetRespectNilValues for more information. -func RespectNilValuesState() bool { - return useRespectNilValues -} diff --git a/vendor/github.com/globalsign/mgo/bson/decimal.go b/vendor/github.com/globalsign/mgo/bson/decimal.go deleted file mode 100644 index 672ba182..00000000 --- a/vendor/github.com/globalsign/mgo/bson/decimal.go +++ /dev/null @@ -1,312 +0,0 @@ -// BSON library for Go -// -// Copyright (c) 2010-2012 - Gustavo Niemeyer -// -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// -// 1. Redistributions of source code must retain the above copyright notice, this -// list of conditions and the following disclaimer. -// 2. Redistributions in binary form must reproduce the above copyright notice, -// this list of conditions and the following disclaimer in the documentation -// and/or other materials provided with the distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR -// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package bson - -import ( - "fmt" - "strconv" - "strings" -) - -// Decimal128 holds decimal128 BSON values. -type Decimal128 struct { - h, l uint64 -} - -func (d Decimal128) String() string { - var pos int // positive sign - var e int // exponent - var h, l uint64 // significand high/low - - if d.h>>63&1 == 0 { - pos = 1 - } - - switch d.h >> 58 & (1<<5 - 1) { - case 0x1F: - return "NaN" - case 0x1E: - return "-Inf"[pos:] - } - - l = d.l - if d.h>>61&3 == 3 { - // Bits: 1*sign 2*ignored 14*exponent 111*significand. - // Implicit 0b100 prefix in significand. - e = int(d.h>>47&(1<<14-1)) - 6176 - //h = 4<<47 | d.h&(1<<47-1) - // Spec says all of these values are out of range. - h, l = 0, 0 - } else { - // Bits: 1*sign 14*exponent 113*significand - e = int(d.h>>49&(1<<14-1)) - 6176 - h = d.h & (1<<49 - 1) - } - - // Would be handled by the logic below, but that's trivial and common. - if h == 0 && l == 0 && e == 0 { - return "-0"[pos:] - } - - var repr [48]byte // Loop 5 times over 9 digits plus dot, negative sign, and leading zero. - var last = len(repr) - var i = len(repr) - var dot = len(repr) + e - var rem uint32 -Loop: - for d9 := 0; d9 < 5; d9++ { - h, l, rem = divmod(h, l, 1e9) - for d1 := 0; d1 < 9; d1++ { - // Handle "-0.0", "0.00123400", "-1.00E-6", "1.050E+3", etc. - if i < len(repr) && (dot == i || l == 0 && h == 0 && rem > 0 && rem < 10 && (dot < i-6 || e > 0)) { - e += len(repr) - i - i-- - repr[i] = '.' - last = i - 1 - dot = len(repr) // Unmark. - } - c := '0' + byte(rem%10) - rem /= 10 - i-- - repr[i] = c - // Handle "0E+3", "1E+3", etc. - if l == 0 && h == 0 && rem == 0 && i == len(repr)-1 && (dot < i-5 || e > 0) { - last = i - break Loop - } - if c != '0' { - last = i - } - // Break early. Works without it, but why. - if dot > i && l == 0 && h == 0 && rem == 0 { - break Loop - } - } - } - repr[last-1] = '-' - last-- - - if e > 0 { - return string(repr[last+pos:]) + "E+" + strconv.Itoa(e) - } - if e < 0 { - return string(repr[last+pos:]) + "E" + strconv.Itoa(e) - } - return string(repr[last+pos:]) -} - -func divmod(h, l uint64, div uint32) (qh, ql uint64, rem uint32) { - div64 := uint64(div) - a := h >> 32 - aq := a / div64 - ar := a % div64 - b := ar<<32 + h&(1<<32-1) - bq := b / div64 - br := b % div64 - c := br<<32 + l>>32 - cq := c / div64 - cr := c % div64 - d := cr<<32 + l&(1<<32-1) - dq := d / div64 - dr := d % div64 - return (aq<<32 | bq), (cq<<32 | dq), uint32(dr) -} - -var dNaN = Decimal128{0x1F << 58, 0} -var dPosInf = Decimal128{0x1E << 58, 0} -var dNegInf = Decimal128{0x3E << 58, 0} - -func dErr(s string) (Decimal128, error) { - return dNaN, fmt.Errorf("cannot parse %q as a decimal128", s) -} - -// ParseDecimal128 parse a string and return the corresponding value as -// a decimal128 -func ParseDecimal128(s string) (Decimal128, error) { - orig := s - if s == "" { - return dErr(orig) - } - neg := s[0] == '-' - if neg || s[0] == '+' { - s = s[1:] - } - - if (len(s) == 3 || len(s) == 8) && (s[0] == 'N' || s[0] == 'n' || s[0] == 'I' || s[0] == 'i') { - if s == "NaN" || s == "nan" || strings.EqualFold(s, "nan") { - return dNaN, nil - } - if s == "Inf" || s == "inf" || strings.EqualFold(s, "inf") || strings.EqualFold(s, "infinity") { - if neg { - return dNegInf, nil - } - return dPosInf, nil - } - return dErr(orig) - } - - var h, l uint64 - var e int - - var add, ovr uint32 - var mul uint32 = 1 - var dot = -1 - var digits = 0 - var i = 0 - for i < len(s) { - c := s[i] - if mul == 1e9 { - h, l, ovr = muladd(h, l, mul, add) - mul, add = 1, 0 - if ovr > 0 || h&((1<<15-1)<<49) > 0 { - return dErr(orig) - } - } - if c >= '0' && c <= '9' { - i++ - if c > '0' || digits > 0 { - digits++ - } - if digits > 34 { - if c == '0' { - // Exact rounding. - e++ - continue - } - return dErr(orig) - } - mul *= 10 - add *= 10 - add += uint32(c - '0') - continue - } - if c == '.' { - i++ - if dot >= 0 || i == 1 && len(s) == 1 { - return dErr(orig) - } - if i == len(s) { - break - } - if s[i] < '0' || s[i] > '9' || e > 0 { - return dErr(orig) - } - dot = i - continue - } - break - } - if i == 0 { - return dErr(orig) - } - if mul > 1 { - h, l, ovr = muladd(h, l, mul, add) - if ovr > 0 || h&((1<<15-1)<<49) > 0 { - return dErr(orig) - } - } - if dot >= 0 { - e += dot - i - } - if i+1 < len(s) && (s[i] == 'E' || s[i] == 'e') { - i++ - eneg := s[i] == '-' - if eneg || s[i] == '+' { - i++ - if i == len(s) { - return dErr(orig) - } - } - n := 0 - for i < len(s) && n < 1e4 { - c := s[i] - i++ - if c < '0' || c > '9' { - return dErr(orig) - } - n *= 10 - n += int(c - '0') - } - if eneg { - n = -n - } - e += n - for e < -6176 { - // Subnormal. - var div uint32 = 1 - for div < 1e9 && e < -6176 { - div *= 10 - e++ - } - var rem uint32 - h, l, rem = divmod(h, l, div) - if rem > 0 { - return dErr(orig) - } - } - for e > 6111 { - // Clamped. - var mul uint32 = 1 - for mul < 1e9 && e > 6111 { - mul *= 10 - e-- - } - h, l, ovr = muladd(h, l, mul, 0) - if ovr > 0 || h&((1<<15-1)<<49) > 0 { - return dErr(orig) - } - } - if e < -6176 || e > 6111 { - return dErr(orig) - } - } - - if i < len(s) { - return dErr(orig) - } - - h |= uint64(e+6176) & uint64(1<<14-1) << 49 - if neg { - h |= 1 << 63 - } - return Decimal128{h, l}, nil -} - -func muladd(h, l uint64, mul uint32, add uint32) (resh, resl uint64, overflow uint32) { - mul64 := uint64(mul) - a := mul64 * (l & (1<<32 - 1)) - b := a>>32 + mul64*(l>>32) - c := b>>32 + mul64*(h&(1<<32-1)) - d := c>>32 + mul64*(h>>32) - - a = a&(1<<32-1) + uint64(add) - b = b&(1<<32-1) + a>>32 - c = c&(1<<32-1) + b>>32 - d = d&(1<<32-1) + c>>32 - - return (d<<32 | c&(1<<32-1)), (b<<32 | a&(1<<32-1)), uint32(d >> 32) -} diff --git a/vendor/github.com/globalsign/mgo/bson/decode.go b/vendor/github.com/globalsign/mgo/bson/decode.go deleted file mode 100644 index 658856ad..00000000 --- a/vendor/github.com/globalsign/mgo/bson/decode.go +++ /dev/null @@ -1,1055 +0,0 @@ -// BSON library for Go -// -// Copyright (c) 2010-2012 - Gustavo Niemeyer -// -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// -// 1. Redistributions of source code must retain the above copyright notice, this -// list of conditions and the following disclaimer. -// 2. Redistributions in binary form must reproduce the above copyright notice, -// this list of conditions and the following disclaimer in the documentation -// and/or other materials provided with the distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR -// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// gobson - BSON library for Go. - -package bson - -import ( - "errors" - "fmt" - "io" - "math" - "net/url" - "reflect" - "strconv" - "sync" - "time" -) - -type decoder struct { - in []byte - i int - docType reflect.Type -} - -var typeM = reflect.TypeOf(M{}) - -func newDecoder(in []byte) *decoder { - return &decoder{in, 0, typeM} -} - -// -------------------------------------------------------------------------- -// Some helper functions. - -func corrupted() { - panic("Document is corrupted") -} - -// -------------------------------------------------------------------------- -// Unmarshaling of documents. - -const ( - setterUnknown = iota - setterNone - setterType - setterAddr -) - -var setterStyles map[reflect.Type]int -var setterIface reflect.Type -var setterMutex sync.RWMutex - -func init() { - var iface Setter - setterIface = reflect.TypeOf(&iface).Elem() - setterStyles = make(map[reflect.Type]int) -} - -func setterStyle(outt reflect.Type) int { - setterMutex.RLock() - style := setterStyles[outt] - setterMutex.RUnlock() - if style != setterUnknown { - return style - } - - setterMutex.Lock() - defer setterMutex.Unlock() - if outt.Implements(setterIface) { - style = setterType - } else if reflect.PtrTo(outt).Implements(setterIface) { - style = setterAddr - } else { - style = setterNone - } - setterStyles[outt] = style - return style -} - -func getSetter(outt reflect.Type, out reflect.Value) Setter { - style := setterStyle(outt) - if style == setterNone { - return nil - } - if style == setterAddr { - if !out.CanAddr() { - return nil - } - out = out.Addr() - } else if outt.Kind() == reflect.Ptr && out.IsNil() { - out.Set(reflect.New(outt.Elem())) - } - return out.Interface().(Setter) -} - -func clearMap(m reflect.Value) { - var none reflect.Value - for _, k := range m.MapKeys() { - m.SetMapIndex(k, none) - } -} - -func (d *decoder) readDocTo(out reflect.Value) { - var elemType reflect.Type - outt := out.Type() - outk := outt.Kind() - - for { - if outk == reflect.Ptr && out.IsNil() { - out.Set(reflect.New(outt.Elem())) - } - if setter := getSetter(outt, out); setter != nil { - raw := d.readRaw(ElementDocument) - err := setter.SetBSON(raw) - if _, ok := err.(*TypeError); err != nil && !ok { - panic(err) - } - return - } - if outk == reflect.Ptr { - out = out.Elem() - outt = out.Type() - outk = out.Kind() - continue - } - break - } - - var fieldsMap map[string]fieldInfo - var inlineMap reflect.Value - if outt == typeRaw { - out.Set(reflect.ValueOf(d.readRaw(ElementDocument))) - return - } - - origout := out - if outk == reflect.Interface { - if d.docType.Kind() == reflect.Map { - mv := reflect.MakeMap(d.docType) - out.Set(mv) - out = mv - } else { - dv := reflect.New(d.docType).Elem() - out.Set(dv) - out = dv - } - outt = out.Type() - outk = outt.Kind() - } - - docType := d.docType - keyType := typeString - convertKey := false - switch outk { - case reflect.Map: - keyType = outt.Key() - if keyType != typeString { - convertKey = true - } - elemType = outt.Elem() - if elemType == typeIface { - d.docType = outt - } - if out.IsNil() { - out.Set(reflect.MakeMap(out.Type())) - } else if out.Len() > 0 { - clearMap(out) - } - case reflect.Struct: - sinfo, err := getStructInfo(out.Type()) - if err != nil { - panic(err) - } - fieldsMap = sinfo.FieldsMap - out.Set(sinfo.Zero) - if sinfo.InlineMap != -1 { - inlineMap = out.Field(sinfo.InlineMap) - if !inlineMap.IsNil() && inlineMap.Len() > 0 { - clearMap(inlineMap) - } - elemType = inlineMap.Type().Elem() - if elemType == typeIface { - d.docType = inlineMap.Type() - } - } - case reflect.Slice: - switch outt.Elem() { - case typeDocElem: - origout.Set(d.readDocElems(outt)) - return - case typeRawDocElem: - origout.Set(d.readRawDocElems(outt)) - return - } - fallthrough - default: - panic("Unsupported document type for unmarshalling: " + out.Type().String()) - } - - end := int(d.readInt32()) - end += d.i - 4 - if end <= d.i || end > len(d.in) || d.in[end-1] != '\x00' { - corrupted() - } - for d.in[d.i] != '\x00' { - kind := d.readByte() - name := d.readCStr() - if d.i >= end { - corrupted() - } - - switch outk { - case reflect.Map: - e := reflect.New(elemType).Elem() - if d.readElemTo(e, kind) { - k := reflect.ValueOf(name) - if convertKey { - mapKeyType := out.Type().Key() - mapKeyKind := mapKeyType.Kind() - - switch mapKeyKind { - case reflect.Int: - fallthrough - case reflect.Int8: - fallthrough - case reflect.Int16: - fallthrough - case reflect.Int32: - fallthrough - case reflect.Int64: - fallthrough - case reflect.Uint: - fallthrough - case reflect.Uint8: - fallthrough - case reflect.Uint16: - fallthrough - case reflect.Uint32: - fallthrough - case reflect.Uint64: - fallthrough - case reflect.Float32: - fallthrough - case reflect.Float64: - parsed := d.parseMapKeyAsFloat(k, mapKeyKind) - k = reflect.ValueOf(parsed) - case reflect.String: - mapKeyType = keyType - default: - panic("BSON map must have string or decimal keys. Got: " + outt.String()) - } - - k = k.Convert(mapKeyType) - } - out.SetMapIndex(k, e) - } - case reflect.Struct: - if info, ok := fieldsMap[name]; ok { - if info.Inline == nil { - d.readElemTo(out.Field(info.Num), kind) - } else { - d.readElemTo(out.FieldByIndex(info.Inline), kind) - } - } else if inlineMap.IsValid() { - if inlineMap.IsNil() { - inlineMap.Set(reflect.MakeMap(inlineMap.Type())) - } - e := reflect.New(elemType).Elem() - if d.readElemTo(e, kind) { - inlineMap.SetMapIndex(reflect.ValueOf(name), e) - } - } else { - d.dropElem(kind) - } - case reflect.Slice: - } - - if d.i >= end { - corrupted() - } - } - d.i++ // '\x00' - if d.i != end { - corrupted() - } - d.docType = docType -} - -func (decoder) parseMapKeyAsFloat(k reflect.Value, mapKeyKind reflect.Kind) float64 { - parsed, err := strconv.ParseFloat(k.String(), 64) - if err != nil { - panic("Map key is defined to be a decimal type (" + mapKeyKind.String() + ") but got error " + - err.Error()) - } - - return parsed -} - -func (d *decoder) readArrayDocTo(out reflect.Value) { - end := int(d.readInt32()) - end += d.i - 4 - if end <= d.i || end > len(d.in) || d.in[end-1] != '\x00' { - corrupted() - } - i := 0 - l := out.Len() - for d.in[d.i] != '\x00' { - if i >= l { - panic("Length mismatch on array field") - } - kind := d.readByte() - for d.i < end && d.in[d.i] != '\x00' { - d.i++ - } - if d.i >= end { - corrupted() - } - d.i++ - d.readElemTo(out.Index(i), kind) - if d.i >= end { - corrupted() - } - i++ - } - if i != l { - panic("Length mismatch on array field") - } - d.i++ // '\x00' - if d.i != end { - corrupted() - } -} - -func (d *decoder) readSliceDoc(t reflect.Type) interface{} { - tmp := make([]reflect.Value, 0, 8) - elemType := t.Elem() - if elemType == typeRawDocElem { - d.dropElem(ElementArray) - return reflect.Zero(t).Interface() - } - if elemType == typeRaw { - return d.readSliceOfRaw() - } - - end := int(d.readInt32()) - end += d.i - 4 - if end <= d.i || end > len(d.in) || d.in[end-1] != '\x00' { - corrupted() - } - for d.in[d.i] != '\x00' { - kind := d.readByte() - for d.i < end && d.in[d.i] != '\x00' { - d.i++ - } - if d.i >= end { - corrupted() - } - d.i++ - e := reflect.New(elemType).Elem() - if d.readElemTo(e, kind) { - tmp = append(tmp, e) - } - if d.i >= end { - corrupted() - } - } - d.i++ // '\x00' - if d.i != end { - corrupted() - } - - n := len(tmp) - slice := reflect.MakeSlice(t, n, n) - for i := 0; i != n; i++ { - slice.Index(i).Set(tmp[i]) - } - return slice.Interface() -} - -func BSONElementSize(kind byte, offset int, buffer []byte) (int, error) { - switch kind { - case ElementFloat64: // Float64 - return 8, nil - case ElementJavaScriptWithoutScope: // JavaScript without scope - fallthrough - case ElementSymbol: // Symbol - fallthrough - case ElementString: // UTF-8 string - size, err := getSize(offset, buffer) - if err != nil { - return 0, err - } - if size < 1 { - return 0, errors.New("String size can't be less then one byte") - } - size += 4 - if offset+size > len(buffer) { - return 0, io.ErrUnexpectedEOF - } - if buffer[offset+size-1] != 0 { - return 0, errors.New("Invalid string: non zero-terminated") - } - return size, nil - case ElementArray: // Array - fallthrough - case ElementDocument: // Document - size, err := getSize(offset, buffer) - if err != nil { - return 0, err - } - if size < 5 { - return 0, errors.New("Declared document size is too small") - } - return size, nil - case ElementBinary: // Binary - size, err := getSize(offset, buffer) - if err != nil { - return 0, err - } - if size < 0 { - return 0, errors.New("Binary data size can't be negative") - } - return size + 5, nil - case Element06: // Undefined (obsolete, but still seen in the wild) - return 0, nil - case ElementObjectId: // ObjectId - return 12, nil - case ElementBool: // Bool - return 1, nil - case ElementDatetime: // Timestamp - return 8, nil - case ElementNil: // Nil - return 0, nil - case ElementRegEx: // RegEx - end := offset - for i := 0; i < 2; i++ { - for end < len(buffer) && buffer[end] != '\x00' { - end++ - } - end++ - } - if end > len(buffer) { - return 0, io.ErrUnexpectedEOF - } - return end - offset, nil - case ElementDBPointer: // DBPointer - size, err := getSize(offset, buffer) - if err != nil { - return 0, err - } - if size < 1 { - return 0, errors.New("String size can't be less then one byte") - } - return size + 12 + 4, nil - case ElementJavaScriptWithScope: // JavaScript with scope - size, err := getSize(offset, buffer) - if err != nil { - return 0, err - } - if size < 4+5+5 { - return 0, errors.New("Declared document element is too small") - } - return size, nil - case ElementInt32: // Int32 - return 4, nil - case ElementTimestamp: // Mongo-specific timestamp - return 8, nil - case ElementInt64: // Int64 - return 8, nil - case ElementDecimal128: // Decimal128 - return 16, nil - case ElementMaxKey: // Max key - return 0, nil - case ElementMinKey: // Min key - return 0, nil - default: - return 0, errors.New(fmt.Sprintf("Unknown element kind (0x%02X)", kind)) - } -} - -func (d *decoder) readRaw(kind byte) Raw { - size, err := BSONElementSize(kind, d.i, d.in) - if err != nil { - corrupted() - } - if d.i+size > len(d.in) { - corrupted() - } - d.i += size - return Raw{ - Kind: kind, - Data: d.in[d.i-size : d.i], - } -} - -func (d *decoder) readSliceOfRaw() interface{} { - tmp := make([]Raw, 0, 8) - end := int(d.readInt32()) - end += d.i - 4 - if end <= d.i || end > len(d.in) || d.in[end-1] != '\x00' { - corrupted() - } - for d.in[d.i] != '\x00' { - kind := d.readByte() - for d.i < end && d.in[d.i] != '\x00' { - d.i++ - } - if d.i >= end { - corrupted() - } - d.i++ - e := d.readRaw(kind) - tmp = append(tmp, e) - if d.i >= end { - corrupted() - } - } - d.i++ // '\x00' - if d.i != end { - corrupted() - } - return tmp -} - -var typeSlice = reflect.TypeOf([]interface{}{}) -var typeIface = typeSlice.Elem() - -func (d *decoder) readDocElems(typ reflect.Type) reflect.Value { - docType := d.docType - d.docType = typ - slice := make([]DocElem, 0, 8) - d.readDocWith(func(kind byte, name string) { - e := DocElem{Name: name} - v := reflect.ValueOf(&e.Value) - if d.readElemTo(v.Elem(), kind) { - slice = append(slice, e) - } - }) - slicev := reflect.New(typ).Elem() - slicev.Set(reflect.ValueOf(slice)) - d.docType = docType - return slicev -} - -func (d *decoder) readRawDocElems(typ reflect.Type) reflect.Value { - docType := d.docType - d.docType = typ - slice := make([]RawDocElem, 0, 8) - d.readDocWith(func(kind byte, name string) { - e := RawDocElem{Name: name, Value: d.readRaw(kind)} - slice = append(slice, e) - }) - slicev := reflect.New(typ).Elem() - slicev.Set(reflect.ValueOf(slice)) - d.docType = docType - return slicev -} - -func (d *decoder) readDocWith(f func(kind byte, name string)) { - end := int(d.readInt32()) - end += d.i - 4 - if end <= d.i || end > len(d.in) || d.in[end-1] != '\x00' { - corrupted() - } - for d.in[d.i] != '\x00' { - kind := d.readByte() - name := d.readCStr() - if d.i >= end { - corrupted() - } - f(kind, name) - if d.i >= end { - corrupted() - } - } - d.i++ // '\x00' - if d.i != end { - corrupted() - } -} - -// -------------------------------------------------------------------------- -// Unmarshaling of individual elements within a document. -func (d *decoder) dropElem(kind byte) { - size, err := BSONElementSize(kind, d.i, d.in) - if err != nil { - corrupted() - } - if d.i+size > len(d.in) { - corrupted() - } - d.i += size -} - -// Attempt to decode an element from the document and put it into out. -// If the types are not compatible, the returned ok value will be -// false and out will be unchanged. -func (d *decoder) readElemTo(out reflect.Value, kind byte) (good bool) { - outt := out.Type() - - if outt == typeRaw { - out.Set(reflect.ValueOf(d.readRaw(kind))) - return true - } - - if outt == typeRawPtr { - raw := d.readRaw(kind) - out.Set(reflect.ValueOf(&raw)) - return true - } - - if kind == ElementDocument { - // Delegate unmarshaling of documents. - outt := out.Type() - outk := out.Kind() - switch outk { - case reflect.Interface, reflect.Ptr, reflect.Struct, reflect.Map: - d.readDocTo(out) - return true - } - if setterStyle(outt) != setterNone { - d.readDocTo(out) - return true - } - if outk == reflect.Slice { - switch outt.Elem() { - case typeDocElem: - out.Set(d.readDocElems(outt)) - case typeRawDocElem: - out.Set(d.readRawDocElems(outt)) - default: - d.dropElem(kind) - } - return true - } - d.dropElem(kind) - return true - } - - if setter := getSetter(outt, out); setter != nil { - err := setter.SetBSON(d.readRaw(kind)) - if err == ErrSetZero { - out.Set(reflect.Zero(outt)) - return true - } - if err == nil { - return true - } - if _, ok := err.(*TypeError); !ok { - panic(err) - } - return false - } - - var in interface{} - - switch kind { - case ElementFloat64: - in = d.readFloat64() - case ElementString: - in = d.readStr() - case ElementDocument: - panic("Can't happen. Handled above.") - case ElementArray: - outt := out.Type() - if setterStyle(outt) != setterNone { - // Skip the value so its data is handed to the setter below. - d.dropElem(kind) - break - } - for outt.Kind() == reflect.Ptr { - outt = outt.Elem() - } - switch outt.Kind() { - case reflect.Array: - d.readArrayDocTo(out) - return true - case reflect.Slice: - in = d.readSliceDoc(outt) - default: - in = d.readSliceDoc(typeSlice) - } - case ElementBinary: - b := d.readBinary() - if b.Kind == BinaryGeneric || b.Kind == BinaryBinaryOld { - in = b.Data - } else { - in = b - } - case Element06: // Undefined (obsolete, but still seen in the wild) - in = Undefined - case ElementObjectId: - in = ObjectId(d.readBytes(12)) - case ElementBool: - in = d.readBool() - case ElementDatetime: // Timestamp - // MongoDB handles timestamps as milliseconds. - i := d.readInt64() - if i == -62135596800000 { - in = time.Time{} // In UTC for convenience. - } else { - in = time.Unix(i/1e3, i%1e3*1e6).UTC() - } - case ElementNil: - in = nil - case ElementRegEx: - in = d.readRegEx() - case ElementDBPointer: - in = DBPointer{Namespace: d.readStr(), Id: ObjectId(d.readBytes(12))} - case ElementJavaScriptWithoutScope: - in = JavaScript{Code: d.readStr()} - case ElementSymbol: - in = Symbol(d.readStr()) - case ElementJavaScriptWithScope: - start := d.i - l := int(d.readInt32()) - js := JavaScript{d.readStr(), make(M)} - d.readDocTo(reflect.ValueOf(js.Scope)) - if d.i != start+l { - corrupted() - } - in = js - case ElementInt32: - in = int(d.readInt32()) - case ElementTimestamp: // Mongo-specific timestamp - in = MongoTimestamp(d.readInt64()) - case ElementInt64: - switch out.Type() { - case typeTimeDuration: - in = time.Duration(time.Duration(d.readInt64()) * time.Millisecond) - default: - in = d.readInt64() - } - case ElementDecimal128: - in = Decimal128{ - l: uint64(d.readInt64()), - h: uint64(d.readInt64()), - } - case ElementMaxKey: - in = MaxKey - case ElementMinKey: - in = MinKey - default: - panic(fmt.Sprintf("Unknown element kind (0x%02X)", kind)) - } - - if in == nil { - out.Set(reflect.Zero(outt)) - return true - } - - outk := outt.Kind() - - // Dereference and initialize pointer if necessary. - first := true - for outk == reflect.Ptr { - if !out.IsNil() { - out = out.Elem() - } else { - elem := reflect.New(outt.Elem()) - if first { - // Only set if value is compatible. - first = false - defer func(out, elem reflect.Value) { - if good { - out.Set(elem) - } - }(out, elem) - } else { - out.Set(elem) - } - out = elem - } - outt = out.Type() - outk = outt.Kind() - } - - inv := reflect.ValueOf(in) - if outt == inv.Type() { - out.Set(inv) - return true - } - - switch outk { - case reflect.Interface: - out.Set(inv) - return true - case reflect.String: - switch inv.Kind() { - case reflect.String: - out.SetString(inv.String()) - return true - case reflect.Slice: - if b, ok := in.([]byte); ok { - out.SetString(string(b)) - return true - } - case reflect.Int, reflect.Int64: - if outt == typeJSONNumber { - out.SetString(strconv.FormatInt(inv.Int(), 10)) - return true - } - case reflect.Float64: - if outt == typeJSONNumber { - out.SetString(strconv.FormatFloat(inv.Float(), 'f', -1, 64)) - return true - } - } - case reflect.Slice, reflect.Array: - // Remember, array (0x04) slices are built with the correct - // element type. If we are here, must be a cross BSON kind - // conversion (e.g. 0x05 unmarshalling on string). - if outt.Elem().Kind() != reflect.Uint8 { - break - } - switch inv.Kind() { - case reflect.String: - slice := []byte(inv.String()) - out.Set(reflect.ValueOf(slice)) - return true - case reflect.Slice: - switch outt.Kind() { - case reflect.Array: - reflect.Copy(out, inv) - case reflect.Slice: - out.SetBytes(inv.Bytes()) - } - return true - } - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - switch inv.Kind() { - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - out.SetInt(inv.Int()) - return true - case reflect.Float32, reflect.Float64: - out.SetInt(int64(inv.Float())) - return true - case reflect.Bool: - if inv.Bool() { - out.SetInt(1) - } else { - out.SetInt(0) - } - return true - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - panic("can't happen: no uint types in BSON (!?)") - } - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - switch inv.Kind() { - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - out.SetUint(uint64(inv.Int())) - return true - case reflect.Float32, reflect.Float64: - out.SetUint(uint64(inv.Float())) - return true - case reflect.Bool: - if inv.Bool() { - out.SetUint(1) - } else { - out.SetUint(0) - } - return true - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - panic("Can't happen. No uint types in BSON.") - } - case reflect.Float32, reflect.Float64: - switch inv.Kind() { - case reflect.Float32, reflect.Float64: - out.SetFloat(inv.Float()) - return true - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - out.SetFloat(float64(inv.Int())) - return true - case reflect.Bool: - if inv.Bool() { - out.SetFloat(1) - } else { - out.SetFloat(0) - } - return true - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - panic("Can't happen. No uint types in BSON?") - } - case reflect.Bool: - switch inv.Kind() { - case reflect.Bool: - out.SetBool(inv.Bool()) - return true - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - out.SetBool(inv.Int() != 0) - return true - case reflect.Float32, reflect.Float64: - out.SetBool(inv.Float() != 0) - return true - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - panic("Can't happen. No uint types in BSON?") - } - case reflect.Struct: - if outt == typeURL && inv.Kind() == reflect.String { - u, err := url.Parse(inv.String()) - if err != nil { - panic(err) - } - out.Set(reflect.ValueOf(u).Elem()) - return true - } - if outt == typeBinary { - if b, ok := in.([]byte); ok { - out.Set(reflect.ValueOf(Binary{Data: b})) - return true - } - } - } - - return false -} - -// -------------------------------------------------------------------------- -// Parsers of basic types. - -func (d *decoder) readRegEx() RegEx { - re := RegEx{} - re.Pattern = d.readCStr() - re.Options = d.readCStr() - return re -} - -func (d *decoder) readBinary() Binary { - l := d.readInt32() - b := Binary{} - b.Kind = d.readByte() - if b.Kind == BinaryBinaryOld && l > 4 { - // Weird obsolete format with redundant length. - rl := d.readInt32() - if rl != l-4 { - corrupted() - } - l = rl - } - b.Data = d.readBytes(l) - return b -} - -func (d *decoder) readStr() string { - l := d.readInt32() - b := d.readBytes(l - 1) - if d.readByte() != '\x00' { - corrupted() - } - return string(b) -} - -func (d *decoder) readCStr() string { - start := d.i - end := start - l := len(d.in) - for ; end != l; end++ { - if d.in[end] == '\x00' { - break - } - } - d.i = end + 1 - if d.i > l { - corrupted() - } - return string(d.in[start:end]) -} - -func (d *decoder) readBool() bool { - b := d.readByte() - if b == 0 { - return false - } - if b == 1 { - return true - } - panic(fmt.Sprintf("encoded boolean must be 1 or 0, found %d", b)) -} - -func (d *decoder) readFloat64() float64 { - return math.Float64frombits(uint64(d.readInt64())) -} - -func (d *decoder) readInt32() int32 { - b := d.readBytes(4) - return int32((uint32(b[0]) << 0) | - (uint32(b[1]) << 8) | - (uint32(b[2]) << 16) | - (uint32(b[3]) << 24)) -} - -func getSize(offset int, b []byte) (int, error) { - if offset+4 > len(b) { - return 0, io.ErrUnexpectedEOF - } - return int((uint32(b[offset]) << 0) | - (uint32(b[offset+1]) << 8) | - (uint32(b[offset+2]) << 16) | - (uint32(b[offset+3]) << 24)), nil -} - -func (d *decoder) readInt64() int64 { - b := d.readBytes(8) - return int64((uint64(b[0]) << 0) | - (uint64(b[1]) << 8) | - (uint64(b[2]) << 16) | - (uint64(b[3]) << 24) | - (uint64(b[4]) << 32) | - (uint64(b[5]) << 40) | - (uint64(b[6]) << 48) | - (uint64(b[7]) << 56)) -} - -func (d *decoder) readByte() byte { - i := d.i - d.i++ - if d.i > len(d.in) { - corrupted() - } - return d.in[i] -} - -func (d *decoder) readBytes(length int32) []byte { - if length < 0 { - corrupted() - } - start := d.i - d.i += int(length) - if d.i < start || d.i > len(d.in) { - corrupted() - } - return d.in[start : start+int(length)] -} diff --git a/vendor/github.com/globalsign/mgo/bson/encode.go b/vendor/github.com/globalsign/mgo/bson/encode.go deleted file mode 100644 index d0c6b2a8..00000000 --- a/vendor/github.com/globalsign/mgo/bson/encode.go +++ /dev/null @@ -1,645 +0,0 @@ -// BSON library for Go -// -// Copyright (c) 2010-2012 - Gustavo Niemeyer -// -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// -// 1. Redistributions of source code must retain the above copyright notice, this -// list of conditions and the following disclaimer. -// 2. Redistributions in binary form must reproduce the above copyright notice, -// this list of conditions and the following disclaimer in the documentation -// and/or other materials provided with the distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR -// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// gobson - BSON library for Go. - -package bson - -import ( - "encoding/json" - "fmt" - "math" - "net/url" - "reflect" - "sort" - "strconv" - "sync" - "time" -) - -// -------------------------------------------------------------------------- -// Some internal infrastructure. - -var ( - typeBinary = reflect.TypeOf(Binary{}) - typeObjectId = reflect.TypeOf(ObjectId("")) - typeDBPointer = reflect.TypeOf(DBPointer{"", ObjectId("")}) - typeSymbol = reflect.TypeOf(Symbol("")) - typeMongoTimestamp = reflect.TypeOf(MongoTimestamp(0)) - typeOrderKey = reflect.TypeOf(MinKey) - typeDocElem = reflect.TypeOf(DocElem{}) - typeRawDocElem = reflect.TypeOf(RawDocElem{}) - typeRaw = reflect.TypeOf(Raw{}) - typeRawPtr = reflect.PtrTo(reflect.TypeOf(Raw{})) - typeURL = reflect.TypeOf(url.URL{}) - typeTime = reflect.TypeOf(time.Time{}) - typeString = reflect.TypeOf("") - typeJSONNumber = reflect.TypeOf(json.Number("")) - typeTimeDuration = reflect.TypeOf(time.Duration(0)) -) - -var ( - // spec for []uint8 or []byte encoding - arrayOps = map[string]bool{ - "$in": true, - "$nin": true, - "$all": true, - } -) - -const itoaCacheSize = 32 - -const ( - getterUnknown = iota - getterNone - getterTypeVal - getterTypePtr - getterAddr -) - -var itoaCache []string - -var getterStyles map[reflect.Type]int -var getterIface reflect.Type -var getterMutex sync.RWMutex - -func init() { - itoaCache = make([]string, itoaCacheSize) - for i := 0; i != itoaCacheSize; i++ { - itoaCache[i] = strconv.Itoa(i) - } - var iface Getter - getterIface = reflect.TypeOf(&iface).Elem() - getterStyles = make(map[reflect.Type]int) -} - -func itoa(i int) string { - if i < itoaCacheSize { - return itoaCache[i] - } - return strconv.Itoa(i) -} - -func getterStyle(outt reflect.Type) int { - getterMutex.RLock() - style := getterStyles[outt] - getterMutex.RUnlock() - if style != getterUnknown { - return style - } - - getterMutex.Lock() - defer getterMutex.Unlock() - if outt.Implements(getterIface) { - vt := outt - for vt.Kind() == reflect.Ptr { - vt = vt.Elem() - } - if vt.Implements(getterIface) { - style = getterTypeVal - } else { - style = getterTypePtr - } - } else if reflect.PtrTo(outt).Implements(getterIface) { - style = getterAddr - } else { - style = getterNone - } - getterStyles[outt] = style - return style -} - -func getGetter(outt reflect.Type, out reflect.Value) Getter { - style := getterStyle(outt) - if style == getterNone { - return nil - } - if style == getterAddr { - if !out.CanAddr() { - return nil - } - return out.Addr().Interface().(Getter) - } - if style == getterTypeVal && out.Kind() == reflect.Ptr && out.IsNil() { - return nil - } - return out.Interface().(Getter) -} - -// -------------------------------------------------------------------------- -// Marshaling of the document value itself. - -type encoder struct { - out []byte -} - -func (e *encoder) addDoc(v reflect.Value) { - for { - if vi, ok := v.Interface().(Getter); ok { - getv, err := vi.GetBSON() - if err != nil { - panic(err) - } - v = reflect.ValueOf(getv) - continue - } - if v.Kind() == reflect.Ptr { - v = v.Elem() - continue - } - break - } - - if v.Type() == typeRaw { - raw := v.Interface().(Raw) - if raw.Kind != 0x03 && raw.Kind != 0x00 { - panic("Attempted to marshal Raw kind " + strconv.Itoa(int(raw.Kind)) + " as a document") - } - if len(raw.Data) == 0 { - panic("Attempted to marshal empty Raw document") - } - e.addBytes(raw.Data...) - return - } - - start := e.reserveInt32() - - switch v.Kind() { - case reflect.Map: - e.addMap(v) - case reflect.Struct: - e.addStruct(v) - case reflect.Array, reflect.Slice: - e.addSlice(v) - default: - panic("Can't marshal " + v.Type().String() + " as a BSON document") - } - - e.addBytes(0) - e.setInt32(start, int32(len(e.out)-start)) -} - -func (e *encoder) addMap(v reflect.Value) { - for _, k := range v.MapKeys() { - e.addElem(fmt.Sprint(k), v.MapIndex(k), false) - } -} - -func (e *encoder) addStruct(v reflect.Value) { - sinfo, err := getStructInfo(v.Type()) - if err != nil { - panic(err) - } - var value reflect.Value - if sinfo.InlineMap >= 0 { - m := v.Field(sinfo.InlineMap) - if m.Len() > 0 { - for _, k := range m.MapKeys() { - ks := k.String() - if _, found := sinfo.FieldsMap[ks]; found { - panic(fmt.Sprintf("Can't have key %q in inlined map; conflicts with struct field", ks)) - } - e.addElem(ks, m.MapIndex(k), false) - } - } - } - for _, info := range sinfo.FieldsList { - if info.Inline == nil { - value = v.Field(info.Num) - } else { - // as pointers to struct are allowed here, - // there is no guarantee that pointer won't be nil. - // - // It is expected allowed behaviour - // so info.Inline MAY consist index to a nil pointer - // and that is why we safely call v.FieldByIndex and just continue on panic - field, errField := safeFieldByIndex(v, info.Inline) - if errField != nil { - continue - } - - value = field - } - if info.OmitEmpty && isZero(value) { - continue - } - if useRespectNilValues && - (value.Kind() == reflect.Slice || value.Kind() == reflect.Map) && - value.IsNil() { - e.addElem(info.Key, reflect.ValueOf(nil), info.MinSize) - continue - } - e.addElem(info.Key, value, info.MinSize) - } -} - -func safeFieldByIndex(v reflect.Value, index []int) (result reflect.Value, err error) { - defer func() { - if recovered := recover(); recovered != nil { - switch r := recovered.(type) { - case string: - err = fmt.Errorf("%s", r) - case error: - err = r - } - } - }() - - result = v.FieldByIndex(index) - return -} - -func isZero(v reflect.Value) bool { - switch v.Kind() { - case reflect.String: - return len(v.String()) == 0 - case reflect.Ptr, reflect.Interface: - return v.IsNil() - case reflect.Slice: - return v.Len() == 0 - case reflect.Map: - return v.Len() == 0 - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return v.Int() == 0 - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - return v.Uint() == 0 - case reflect.Float32, reflect.Float64: - return v.Float() == 0 - case reflect.Bool: - return !v.Bool() - case reflect.Struct: - vt := v.Type() - if vt == typeTime { - return v.Interface().(time.Time).IsZero() - } - for i := 0; i < v.NumField(); i++ { - if vt.Field(i).PkgPath != "" && !vt.Field(i).Anonymous { - continue // Private field - } - if !isZero(v.Field(i)) { - return false - } - } - return true - } - return false -} - -func (e *encoder) addSlice(v reflect.Value) { - vi := v.Interface() - if d, ok := vi.(D); ok { - for _, elem := range d { - e.addElem(elem.Name, reflect.ValueOf(elem.Value), false) - } - return - } - if d, ok := vi.(RawD); ok { - for _, elem := range d { - e.addElem(elem.Name, reflect.ValueOf(elem.Value), false) - } - return - } - l := v.Len() - et := v.Type().Elem() - if et == typeDocElem { - for i := 0; i < l; i++ { - elem := v.Index(i).Interface().(DocElem) - e.addElem(elem.Name, reflect.ValueOf(elem.Value), false) - } - return - } - if et == typeRawDocElem { - for i := 0; i < l; i++ { - elem := v.Index(i).Interface().(RawDocElem) - e.addElem(elem.Name, reflect.ValueOf(elem.Value), false) - } - return - } - for i := 0; i < l; i++ { - e.addElem(itoa(i), v.Index(i), false) - } -} - -// -------------------------------------------------------------------------- -// Marshaling of elements in a document. - -func (e *encoder) addElemName(kind byte, name string) { - e.addBytes(kind) - e.addBytes([]byte(name)...) - e.addBytes(0) -} - -func (e *encoder) addElem(name string, v reflect.Value, minSize bool) { - - if !v.IsValid() { - e.addElemName(0x0A, name) - return - } - - if getter := getGetter(v.Type(), v); getter != nil { - getv, err := getter.GetBSON() - if err != nil { - panic(err) - } - e.addElem(name, reflect.ValueOf(getv), minSize) - return - } - - switch v.Kind() { - - case reflect.Interface: - e.addElem(name, v.Elem(), minSize) - - case reflect.Ptr: - e.addElem(name, v.Elem(), minSize) - - case reflect.String: - s := v.String() - switch v.Type() { - case typeObjectId: - if len(s) != 12 { - panic("ObjectIDs must be exactly 12 bytes long (got " + - strconv.Itoa(len(s)) + ")") - } - e.addElemName(0x07, name) - e.addBytes([]byte(s)...) - case typeSymbol: - e.addElemName(0x0E, name) - e.addStr(s) - case typeJSONNumber: - n := v.Interface().(json.Number) - if i, err := n.Int64(); err == nil { - e.addElemName(0x12, name) - e.addInt64(i) - } else if f, err := n.Float64(); err == nil { - e.addElemName(0x01, name) - e.addFloat64(f) - } else { - panic("failed to convert json.Number to a number: " + s) - } - default: - e.addElemName(0x02, name) - e.addStr(s) - } - - case reflect.Float32, reflect.Float64: - e.addElemName(0x01, name) - e.addFloat64(v.Float()) - - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - u := v.Uint() - if int64(u) < 0 { - panic("BSON has no uint64 type, and value is too large to fit correctly in an int64") - } else if u <= math.MaxInt32 && (minSize || v.Kind() <= reflect.Uint32) { - e.addElemName(0x10, name) - e.addInt32(int32(u)) - } else { - e.addElemName(0x12, name) - e.addInt64(int64(u)) - } - - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - switch v.Type() { - case typeMongoTimestamp: - e.addElemName(0x11, name) - e.addInt64(v.Int()) - - case typeOrderKey: - if v.Int() == int64(MaxKey) { - e.addElemName(0x7F, name) - } else { - e.addElemName(0xFF, name) - } - case typeTimeDuration: - // Stored as int64 - e.addElemName(0x12, name) - - e.addInt64(int64(v.Int() / 1e6)) - default: - i := v.Int() - if (minSize || v.Type().Kind() != reflect.Int64) && i >= math.MinInt32 && i <= math.MaxInt32 { - // It fits into an int32, encode as such. - e.addElemName(0x10, name) - e.addInt32(int32(i)) - } else { - e.addElemName(0x12, name) - e.addInt64(i) - } - } - - case reflect.Bool: - e.addElemName(0x08, name) - if v.Bool() { - e.addBytes(1) - } else { - e.addBytes(0) - } - - case reflect.Map: - e.addElemName(0x03, name) - e.addDoc(v) - - case reflect.Slice: - vt := v.Type() - et := vt.Elem() - if et.Kind() == reflect.Uint8 { - if arrayOps[name] { - e.addElemName(0x04, name) - e.addDoc(v) - } else { - e.addElemName(0x05, name) - e.addBinary(0x00, v.Bytes()) - } - } else if et == typeDocElem || et == typeRawDocElem { - e.addElemName(0x03, name) - e.addDoc(v) - } else { - e.addElemName(0x04, name) - e.addDoc(v) - } - - case reflect.Array: - et := v.Type().Elem() - if et.Kind() == reflect.Uint8 { - if arrayOps[name] { - e.addElemName(0x04, name) - e.addDoc(v) - } else { - e.addElemName(0x05, name) - if v.CanAddr() { - e.addBinary(0x00, v.Slice(0, v.Len()).Interface().([]byte)) - } else { - n := v.Len() - e.addInt32(int32(n)) - e.addBytes(0x00) - for i := 0; i < n; i++ { - el := v.Index(i) - e.addBytes(byte(el.Uint())) - } - } - } - } else { - e.addElemName(0x04, name) - e.addDoc(v) - } - - case reflect.Struct: - switch s := v.Interface().(type) { - - case Raw: - kind := s.Kind - if kind == 0x00 { - kind = 0x03 - } - if len(s.Data) == 0 && kind != 0x06 && kind != 0x0A && kind != 0xFF && kind != 0x7F { - panic("Attempted to marshal empty Raw document") - } - e.addElemName(kind, name) - e.addBytes(s.Data...) - - case Binary: - e.addElemName(0x05, name) - e.addBinary(s.Kind, s.Data) - - case Decimal128: - e.addElemName(0x13, name) - e.addInt64(int64(s.l)) - e.addInt64(int64(s.h)) - - case DBPointer: - e.addElemName(0x0C, name) - e.addStr(s.Namespace) - if len(s.Id) != 12 { - panic("ObjectIDs must be exactly 12 bytes long (got " + - strconv.Itoa(len(s.Id)) + ")") - } - e.addBytes([]byte(s.Id)...) - - case RegEx: - e.addElemName(0x0B, name) - e.addCStr(s.Pattern) - options := runes(s.Options) - sort.Sort(options) - e.addCStr(string(options)) - - case JavaScript: - if s.Scope == nil { - e.addElemName(0x0D, name) - e.addStr(s.Code) - } else { - e.addElemName(0x0F, name) - start := e.reserveInt32() - e.addStr(s.Code) - e.addDoc(reflect.ValueOf(s.Scope)) - e.setInt32(start, int32(len(e.out)-start)) - } - - case time.Time: - // MongoDB handles timestamps as milliseconds. - e.addElemName(0x09, name) - e.addInt64(s.Unix()*1000 + int64(s.Nanosecond()/1e6)) - - case url.URL: - e.addElemName(0x02, name) - e.addStr(s.String()) - - case undefined: - e.addElemName(0x06, name) - - default: - e.addElemName(0x03, name) - e.addDoc(v) - } - - default: - panic("Can't marshal " + v.Type().String() + " in a BSON document") - } -} - -// ------------- -// Helper method for sorting regex options -type runes []rune - -func (a runes) Len() int { return len(a) } -func (a runes) Swap(i, j int) { a[i], a[j] = a[j], a[i] } -func (a runes) Less(i, j int) bool { return a[i] < a[j] } - -// -------------------------------------------------------------------------- -// Marshaling of base types. - -func (e *encoder) addBinary(subtype byte, v []byte) { - if subtype == 0x02 { - // Wonder how that brilliant idea came to life. Obsolete, luckily. - e.addInt32(int32(len(v) + 4)) - e.addBytes(subtype) - e.addInt32(int32(len(v))) - } else { - e.addInt32(int32(len(v))) - e.addBytes(subtype) - } - e.addBytes(v...) -} - -func (e *encoder) addStr(v string) { - e.addInt32(int32(len(v) + 1)) - e.addCStr(v) -} - -func (e *encoder) addCStr(v string) { - e.addBytes([]byte(v)...) - e.addBytes(0) -} - -func (e *encoder) reserveInt32() (pos int) { - pos = len(e.out) - e.addBytes(0, 0, 0, 0) - return pos -} - -func (e *encoder) setInt32(pos int, v int32) { - e.out[pos+0] = byte(v) - e.out[pos+1] = byte(v >> 8) - e.out[pos+2] = byte(v >> 16) - e.out[pos+3] = byte(v >> 24) -} - -func (e *encoder) addInt32(v int32) { - u := uint32(v) - e.addBytes(byte(u), byte(u>>8), byte(u>>16), byte(u>>24)) -} - -func (e *encoder) addInt64(v int64) { - u := uint64(v) - e.addBytes(byte(u), byte(u>>8), byte(u>>16), byte(u>>24), - byte(u>>32), byte(u>>40), byte(u>>48), byte(u>>56)) -} - -func (e *encoder) addFloat64(v float64) { - e.addInt64(int64(math.Float64bits(v))) -} - -func (e *encoder) addBytes(v ...byte) { - e.out = append(e.out, v...) -} diff --git a/vendor/github.com/globalsign/mgo/bson/json.go b/vendor/github.com/globalsign/mgo/bson/json.go deleted file mode 100644 index 045c7130..00000000 --- a/vendor/github.com/globalsign/mgo/bson/json.go +++ /dev/null @@ -1,384 +0,0 @@ -package bson - -import ( - "bytes" - "encoding/base64" - "fmt" - "strconv" - "strings" - "time" - - "github.com/globalsign/mgo/internal/json" -) - -// UnmarshalJSON unmarshals a JSON value that may hold non-standard -// syntax as defined in BSON's extended JSON specification. -func UnmarshalJSON(data []byte, value interface{}) error { - d := json.NewDecoder(bytes.NewBuffer(data)) - d.Extend(&jsonExt) - return d.Decode(value) -} - -// MarshalJSON marshals a JSON value that may hold non-standard -// syntax as defined in BSON's extended JSON specification. -func MarshalJSON(value interface{}) ([]byte, error) { - var buf bytes.Buffer - e := json.NewEncoder(&buf) - e.Extend(&jsonExt) - err := e.Encode(value) - if err != nil { - return nil, err - } - return buf.Bytes(), nil -} - -// jdec is used internally by the JSON decoding functions -// so they may unmarshal functions without getting into endless -// recursion due to keyed objects. -func jdec(data []byte, value interface{}) error { - d := json.NewDecoder(bytes.NewBuffer(data)) - d.Extend(&funcExt) - return d.Decode(value) -} - -var jsonExt json.Extension -var funcExt json.Extension - -// TODO -// - Shell regular expressions ("/regexp/opts") - -func init() { - jsonExt.DecodeUnquotedKeys(true) - jsonExt.DecodeTrailingCommas(true) - - funcExt.DecodeFunc("BinData", "$binaryFunc", "$type", "$binary") - jsonExt.DecodeKeyed("$binary", jdecBinary) - jsonExt.DecodeKeyed("$binaryFunc", jdecBinary) - jsonExt.EncodeType([]byte(nil), jencBinarySlice) - jsonExt.EncodeType(Binary{}, jencBinaryType) - - funcExt.DecodeFunc("ISODate", "$dateFunc", "S") - funcExt.DecodeFunc("new Date", "$dateFunc", "S") - jsonExt.DecodeKeyed("$date", jdecDate) - jsonExt.DecodeKeyed("$dateFunc", jdecDate) - jsonExt.EncodeType(time.Time{}, jencDate) - - funcExt.DecodeFunc("Timestamp", "$timestamp", "t", "i") - jsonExt.DecodeKeyed("$timestamp", jdecTimestamp) - jsonExt.EncodeType(MongoTimestamp(0), jencTimestamp) - - funcExt.DecodeConst("undefined", Undefined) - - jsonExt.DecodeKeyed("$regex", jdecRegEx) - jsonExt.EncodeType(RegEx{}, jencRegEx) - - funcExt.DecodeFunc("ObjectId", "$oidFunc", "Id") - jsonExt.DecodeKeyed("$oid", jdecObjectId) - jsonExt.DecodeKeyed("$oidFunc", jdecObjectId) - jsonExt.EncodeType(ObjectId(""), jencObjectId) - - funcExt.DecodeFunc("DBRef", "$dbrefFunc", "$ref", "$id") - jsonExt.DecodeKeyed("$dbrefFunc", jdecDBRef) - - funcExt.DecodeFunc("NumberLong", "$numberLongFunc", "N") - jsonExt.DecodeKeyed("$numberLong", jdecNumberLong) - jsonExt.DecodeKeyed("$numberLongFunc", jdecNumberLong) - jsonExt.EncodeType(int64(0), jencNumberLong) - jsonExt.EncodeType(int(0), jencInt) - - funcExt.DecodeConst("MinKey", MinKey) - funcExt.DecodeConst("MaxKey", MaxKey) - jsonExt.DecodeKeyed("$minKey", jdecMinKey) - jsonExt.DecodeKeyed("$maxKey", jdecMaxKey) - jsonExt.EncodeType(orderKey(0), jencMinMaxKey) - - jsonExt.DecodeKeyed("$undefined", jdecUndefined) - jsonExt.EncodeType(Undefined, jencUndefined) - - jsonExt.Extend(&funcExt) -} - -func fbytes(format string, args ...interface{}) []byte { - var buf bytes.Buffer - fmt.Fprintf(&buf, format, args...) - return buf.Bytes() -} - -func jdecBinary(data []byte) (interface{}, error) { - var v struct { - Binary []byte `json:"$binary"` - Type string `json:"$type"` - Func struct { - Binary []byte `json:"$binary"` - Type int64 `json:"$type"` - } `json:"$binaryFunc"` - } - err := jdec(data, &v) - if err != nil { - return nil, err - } - - var binData []byte - var binKind int64 - if v.Type == "" && v.Binary == nil { - binData = v.Func.Binary - binKind = v.Func.Type - } else if v.Type == "" { - return v.Binary, nil - } else { - binData = v.Binary - binKind, err = strconv.ParseInt(v.Type, 0, 64) - if err != nil { - binKind = -1 - } - } - - if binKind == 0 { - return binData, nil - } - if binKind < 0 || binKind > 255 { - return nil, fmt.Errorf("invalid type in binary object: %s", data) - } - - return Binary{Kind: byte(binKind), Data: binData}, nil -} - -func jencBinarySlice(v interface{}) ([]byte, error) { - in := v.([]byte) - out := make([]byte, base64.StdEncoding.EncodedLen(len(in))) - base64.StdEncoding.Encode(out, in) - return fbytes(`{"$binary":"%s","$type":"0x0"}`, out), nil -} - -func jencBinaryType(v interface{}) ([]byte, error) { - in := v.(Binary) - out := make([]byte, base64.StdEncoding.EncodedLen(len(in.Data))) - base64.StdEncoding.Encode(out, in.Data) - return fbytes(`{"$binary":"%s","$type":"0x%x"}`, out, in.Kind), nil -} - -const jdateFormat = "2006-01-02T15:04:05.999Z07:00" - -func jdecDate(data []byte) (interface{}, error) { - var v struct { - S string `json:"$date"` - Func struct { - S string - } `json:"$dateFunc"` - } - _ = jdec(data, &v) - if v.S == "" { - v.S = v.Func.S - } - if v.S != "" { - var errs []string - for _, format := range []string{jdateFormat, "2006-01-02"} { - t, err := time.Parse(format, v.S) - if err == nil { - return t, nil - } - errs = append(errs, err.Error()) - } - return nil, fmt.Errorf("cannot parse date: %q [%s]", v.S, strings.Join(errs, ", ")) - } - - var vn struct { - Date struct { - N int64 `json:"$numberLong,string"` - } `json:"$date"` - Func struct { - S int64 - } `json:"$dateFunc"` - } - err := jdec(data, &vn) - if err != nil { - return nil, fmt.Errorf("cannot parse date: %q", data) - } - n := vn.Date.N - if n == 0 { - n = vn.Func.S - } - return time.Unix(n/1000, n%1000*1e6).UTC(), nil -} - -func jencDate(v interface{}) ([]byte, error) { - t := v.(time.Time) - return fbytes(`{"$date":%q}`, t.Format(jdateFormat)), nil -} - -func jdecTimestamp(data []byte) (interface{}, error) { - var v struct { - Func struct { - T int32 `json:"t"` - I int32 `json:"i"` - } `json:"$timestamp"` - } - err := jdec(data, &v) - if err != nil { - return nil, err - } - return MongoTimestamp(uint64(v.Func.T)<<32 | uint64(uint32(v.Func.I))), nil -} - -func jencTimestamp(v interface{}) ([]byte, error) { - ts := uint64(v.(MongoTimestamp)) - return fbytes(`{"$timestamp":{"t":%d,"i":%d}}`, ts>>32, uint32(ts)), nil -} - -func jdecRegEx(data []byte) (interface{}, error) { - var v struct { - Regex string `json:"$regex"` - Options string `json:"$options"` - } - err := jdec(data, &v) - if err != nil { - return nil, err - } - return RegEx{v.Regex, v.Options}, nil -} - -func jencRegEx(v interface{}) ([]byte, error) { - re := v.(RegEx) - type regex struct { - Regex string `json:"$regex"` - Options string `json:"$options"` - } - return json.Marshal(regex{re.Pattern, re.Options}) -} - -func jdecObjectId(data []byte) (interface{}, error) { - var v struct { - Id string `json:"$oid"` - Func struct { - Id string - } `json:"$oidFunc"` - } - err := jdec(data, &v) - if err != nil { - return nil, err - } - if v.Id == "" { - v.Id = v.Func.Id - } - return ObjectIdHex(v.Id), nil -} - -func jencObjectId(v interface{}) ([]byte, error) { - return fbytes(`{"$oid":"%s"}`, v.(ObjectId).Hex()), nil -} - -func jdecDBRef(data []byte) (interface{}, error) { - // TODO Support unmarshaling $ref and $id into the input value. - var v struct { - Obj map[string]interface{} `json:"$dbrefFunc"` - } - // TODO Fix this. Must not be required. - v.Obj = make(map[string]interface{}) - err := jdec(data, &v) - if err != nil { - return nil, err - } - return v.Obj, nil -} - -func jdecNumberLong(data []byte) (interface{}, error) { - var v struct { - N int64 `json:"$numberLong,string"` - Func struct { - N int64 `json:",string"` - } `json:"$numberLongFunc"` - } - var vn struct { - N int64 `json:"$numberLong"` - Func struct { - N int64 - } `json:"$numberLongFunc"` - } - err := jdec(data, &v) - if err != nil { - err = jdec(data, &vn) - v.N = vn.N - v.Func.N = vn.Func.N - } - if err != nil { - return nil, err - } - if v.N != 0 { - return v.N, nil - } - return v.Func.N, nil -} - -func jencNumberLong(v interface{}) ([]byte, error) { - n := v.(int64) - f := `{"$numberLong":"%d"}` - if n <= 1<<53 { - f = `{"$numberLong":%d}` - } - return fbytes(f, n), nil -} - -func jencInt(v interface{}) ([]byte, error) { - n := v.(int) - f := `{"$numberLong":"%d"}` - if int64(n) <= 1<<53 { - f = `%d` - } - return fbytes(f, n), nil -} - -func jdecMinKey(data []byte) (interface{}, error) { - var v struct { - N int64 `json:"$minKey"` - } - err := jdec(data, &v) - if err != nil { - return nil, err - } - if v.N != 1 { - return nil, fmt.Errorf("invalid $minKey object: %s", data) - } - return MinKey, nil -} - -func jdecMaxKey(data []byte) (interface{}, error) { - var v struct { - N int64 `json:"$maxKey"` - } - err := jdec(data, &v) - if err != nil { - return nil, err - } - if v.N != 1 { - return nil, fmt.Errorf("invalid $maxKey object: %s", data) - } - return MaxKey, nil -} - -func jencMinMaxKey(v interface{}) ([]byte, error) { - switch v.(orderKey) { - case MinKey: - return []byte(`{"$minKey":1}`), nil - case MaxKey: - return []byte(`{"$maxKey":1}`), nil - } - panic(fmt.Sprintf("invalid $minKey/$maxKey value: %d", v)) -} - -func jdecUndefined(data []byte) (interface{}, error) { - var v struct { - B bool `json:"$undefined"` - } - err := jdec(data, &v) - if err != nil { - return nil, err - } - if !v.B { - return nil, fmt.Errorf("invalid $undefined object: %s", data) - } - return Undefined, nil -} - -func jencUndefined(v interface{}) ([]byte, error) { - return []byte(`{"$undefined":true}`), nil -} diff --git a/vendor/github.com/globalsign/mgo/bson/stream.go b/vendor/github.com/globalsign/mgo/bson/stream.go deleted file mode 100644 index 46652845..00000000 --- a/vendor/github.com/globalsign/mgo/bson/stream.go +++ /dev/null @@ -1,90 +0,0 @@ -package bson - -import ( - "bytes" - "encoding/binary" - "fmt" - "io" -) - -const ( - // MinDocumentSize is the size of the smallest possible valid BSON document: - // an int32 size header + 0x00 (end of document). - MinDocumentSize = 5 - - // MaxDocumentSize is the largest possible size for a BSON document allowed by MongoDB, - // that is, 16 MiB (see https://docs.mongodb.com/manual/reference/limits/). - MaxDocumentSize = 16777216 -) - -// ErrInvalidDocumentSize is an error returned when a BSON document's header -// contains a size smaller than MinDocumentSize or greater than MaxDocumentSize. -type ErrInvalidDocumentSize struct { - DocumentSize int32 -} - -func (e ErrInvalidDocumentSize) Error() string { - return fmt.Sprintf("invalid document size %d", e.DocumentSize) -} - -// A Decoder reads and decodes BSON values from an input stream. -type Decoder struct { - source io.Reader -} - -// NewDecoder returns a new Decoder that reads from source. -// It does not add any extra buffering, and may not read data from source beyond the BSON values requested. -func NewDecoder(source io.Reader) *Decoder { - return &Decoder{source: source} -} - -// Decode reads the next BSON-encoded value from its input and stores it in the value pointed to by v. -// See the documentation for Unmarshal for details about the conversion of BSON into a Go value. -func (dec *Decoder) Decode(v interface{}) (err error) { - // BSON documents start with their size as a *signed* int32. - var docSize int32 - if err = binary.Read(dec.source, binary.LittleEndian, &docSize); err != nil { - return - } - - if docSize < MinDocumentSize || docSize > MaxDocumentSize { - return ErrInvalidDocumentSize{DocumentSize: docSize} - } - - docBuffer := bytes.NewBuffer(make([]byte, 0, docSize)) - if err = binary.Write(docBuffer, binary.LittleEndian, docSize); err != nil { - return - } - - // docSize is the *full* document's size (including the 4-byte size header, - // which has already been read). - if _, err = io.CopyN(docBuffer, dec.source, int64(docSize-4)); err != nil { - return - } - - // Let Unmarshal handle the rest. - defer handleErr(&err) - return Unmarshal(docBuffer.Bytes(), v) -} - -// An Encoder encodes and writes BSON values to an output stream. -type Encoder struct { - target io.Writer -} - -// NewEncoder returns a new Encoder that writes to target. -func NewEncoder(target io.Writer) *Encoder { - return &Encoder{target: target} -} - -// Encode encodes v to BSON, and if successful writes it to the Encoder's output stream. -// See the documentation for Marshal for details about the conversion of Go values to BSON. -func (enc *Encoder) Encode(v interface{}) error { - data, err := Marshal(v) - if err != nil { - return err - } - - _, err = enc.target.Write(data) - return err -} diff --git a/vendor/github.com/globalsign/mgo/bulk.go b/vendor/github.com/globalsign/mgo/bulk.go deleted file mode 100644 index c234fcce..00000000 --- a/vendor/github.com/globalsign/mgo/bulk.go +++ /dev/null @@ -1,366 +0,0 @@ -package mgo - -import ( - "bytes" - "sort" - "sync" - - "github.com/globalsign/mgo/bson" -) - -// Bulk represents an operation that can be prepared with several -// orthogonal changes before being delivered to the server. -// -// MongoDB servers older than version 2.6 do not have proper support for bulk -// operations, so the driver attempts to map its API as much as possible into -// the functionality that works. In particular, in those releases updates and -// removals are sent individually, and inserts are sent in bulk but have -// suboptimal error reporting compared to more recent versions of the server. -// See the documentation of BulkErrorCase for details on that. -// -// Relevant documentation: -// -// http://blog.mongodb.org/post/84922794768/mongodbs-new-bulk-api -// -type Bulk struct { - c *Collection - opcount int - actions []bulkAction - ordered bool -} - -type bulkOp int - -const ( - bulkInsert bulkOp = iota + 1 - bulkUpdate - bulkUpdateAll - bulkRemove -) - -type bulkAction struct { - op bulkOp - docs []interface{} - idxs []int -} - -type bulkUpdateOp []interface{} -type bulkDeleteOp []interface{} - -// BulkResult holds the results for a bulk operation. -type BulkResult struct { - Matched int - Modified int // Available only for MongoDB 2.6+ - - // Be conservative while we understand exactly how to report these - // results in a useful and convenient way, and also how to emulate - // them with prior servers. - private bool -} - -// BulkError holds an error returned from running a Bulk operation. -// Individual errors may be obtained and inspected via the Cases method. -type BulkError struct { - ecases []BulkErrorCase -} - -func (e *BulkError) Error() string { - if len(e.ecases) == 0 { - return "invalid BulkError instance: no errors" - } - if len(e.ecases) == 1 { - return e.ecases[0].Err.Error() - } - msgs := make([]string, 0, len(e.ecases)) - seen := make(map[string]bool) - for _, ecase := range e.ecases { - msg := ecase.Err.Error() - if !seen[msg] { - seen[msg] = true - msgs = append(msgs, msg) - } - } - if len(msgs) == 1 { - return msgs[0] - } - var buf bytes.Buffer - buf.WriteString("multiple errors in bulk operation:\n") - for _, msg := range msgs { - buf.WriteString(" - ") - buf.WriteString(msg) - buf.WriteByte('\n') - } - return buf.String() -} - -type bulkErrorCases []BulkErrorCase - -func (slice bulkErrorCases) Len() int { return len(slice) } -func (slice bulkErrorCases) Less(i, j int) bool { return slice[i].Index < slice[j].Index } -func (slice bulkErrorCases) Swap(i, j int) { slice[i], slice[j] = slice[j], slice[i] } - -// BulkErrorCase holds an individual error found while attempting a single change -// within a bulk operation, and the position in which it was enqueued. -// -// MongoDB servers older than version 2.6 do not have proper support for bulk -// operations, so the driver attempts to map its API as much as possible into -// the functionality that works. In particular, only the last error is reported -// for bulk inserts and without any positional information, so the Index -// field is set to -1 in these cases. -type BulkErrorCase struct { - Index int // Position of operation that failed, or -1 if unknown. - Err error -} - -// Cases returns all individual errors found while attempting the requested changes. -// -// See the documentation of BulkErrorCase for limitations in older MongoDB releases. -func (e *BulkError) Cases() []BulkErrorCase { - return e.ecases -} - -var actionPool = sync.Pool{ - New: func() interface{} { - return &bulkAction{ - docs: make([]interface{}, 0), - idxs: make([]int, 0), - } - }, -} - -// Bulk returns a value to prepare the execution of a bulk operation. -func (c *Collection) Bulk() *Bulk { - return &Bulk{c: c, ordered: true} -} - -// Unordered puts the bulk operation in unordered mode. -// -// In unordered mode the indvidual operations may be sent -// out of order, which means latter operations may proceed -// even if prior ones have failed. -func (b *Bulk) Unordered() { - b.ordered = false -} - -func (b *Bulk) action(op bulkOp, opcount int) *bulkAction { - var action *bulkAction - if len(b.actions) > 0 && b.actions[len(b.actions)-1].op == op { - action = &b.actions[len(b.actions)-1] - } else if !b.ordered { - for i := range b.actions { - if b.actions[i].op == op { - action = &b.actions[i] - break - } - } - } - if action == nil { - a := actionPool.Get().(*bulkAction) - a.op = op - b.actions = append(b.actions, *a) - action = &b.actions[len(b.actions)-1] - } - for i := 0; i < opcount; i++ { - action.idxs = append(action.idxs, b.opcount) - b.opcount++ - } - return action -} - -// Insert queues up the provided documents for insertion. -func (b *Bulk) Insert(docs ...interface{}) { - action := b.action(bulkInsert, len(docs)) - action.docs = append(action.docs, docs...) -} - -// Remove queues up the provided selectors for removing matching documents. -// Each selector will remove only a single matching document. -func (b *Bulk) Remove(selectors ...interface{}) { - action := b.action(bulkRemove, len(selectors)) - for _, selector := range selectors { - if selector == nil { - selector = bson.D{} - } - action.docs = append(action.docs, &deleteOp{ - Collection: b.c.FullName, - Selector: selector, - Flags: 1, - Limit: 1, - }) - } -} - -// RemoveAll queues up the provided selectors for removing all matching documents. -// Each selector will remove all matching documents. -func (b *Bulk) RemoveAll(selectors ...interface{}) { - action := b.action(bulkRemove, len(selectors)) - for _, selector := range selectors { - if selector == nil { - selector = bson.D{} - } - action.docs = append(action.docs, &deleteOp{ - Collection: b.c.FullName, - Selector: selector, - Flags: 0, - Limit: 0, - }) - } -} - -// Update queues up the provided pairs of updating instructions. -// The first element of each pair selects which documents must be -// updated, and the second element defines how to update it. -// Each pair matches exactly one document for updating at most. -func (b *Bulk) Update(pairs ...interface{}) { - if len(pairs)%2 != 0 { - panic("Bulk.Update requires an even number of parameters") - } - action := b.action(bulkUpdate, len(pairs)/2) - for i := 0; i < len(pairs); i += 2 { - selector := pairs[i] - if selector == nil { - selector = bson.D{} - } - action.docs = append(action.docs, &updateOp{ - Collection: b.c.FullName, - Selector: selector, - Update: pairs[i+1], - }) - } -} - -// UpdateAll queues up the provided pairs of updating instructions. -// The first element of each pair selects which documents must be -// updated, and the second element defines how to update it. -// Each pair updates all documents matching the selector. -func (b *Bulk) UpdateAll(pairs ...interface{}) { - if len(pairs)%2 != 0 { - panic("Bulk.UpdateAll requires an even number of parameters") - } - action := b.action(bulkUpdate, len(pairs)/2) - for i := 0; i < len(pairs); i += 2 { - selector := pairs[i] - if selector == nil { - selector = bson.D{} - } - action.docs = append(action.docs, &updateOp{ - Collection: b.c.FullName, - Selector: selector, - Update: pairs[i+1], - Flags: 2, - Multi: true, - }) - } -} - -// Upsert queues up the provided pairs of upserting instructions. -// The first element of each pair selects which documents must be -// updated, and the second element defines how to update it. -// Each pair matches exactly one document for updating at most. -func (b *Bulk) Upsert(pairs ...interface{}) { - if len(pairs)%2 != 0 { - panic("Bulk.Update requires an even number of parameters") - } - action := b.action(bulkUpdate, len(pairs)/2) - for i := 0; i < len(pairs); i += 2 { - selector := pairs[i] - if selector == nil { - selector = bson.D{} - } - action.docs = append(action.docs, &updateOp{ - Collection: b.c.FullName, - Selector: selector, - Update: pairs[i+1], - Flags: 1, - Upsert: true, - }) - } -} - -// Run runs all the operations queued up. -// -// If an error is reported on an unordered bulk operation, the error value may -// be an aggregation of all issues observed. As an exception to that, Insert -// operations running on MongoDB versions prior to 2.6 will report the last -// error only due to a limitation in the wire protocol. -func (b *Bulk) Run() (*BulkResult, error) { - var result BulkResult - var berr BulkError - var failed bool - for i := range b.actions { - action := &b.actions[i] - var ok bool - switch action.op { - case bulkInsert: - ok = b.runInsert(action, &result, &berr) - case bulkUpdate: - ok = b.runUpdate(action, &result, &berr) - case bulkRemove: - ok = b.runRemove(action, &result, &berr) - default: - panic("unknown bulk operation") - } - action.idxs = action.idxs[0:0] - action.docs = action.docs[0:0] - actionPool.Put(action) - if !ok { - failed = true - if b.ordered { - break - } - } - } - if failed { - sort.Sort(bulkErrorCases(berr.ecases)) - return nil, &berr - } - return &result, nil -} - -func (b *Bulk) runInsert(action *bulkAction, result *BulkResult, berr *BulkError) bool { - op := &insertOp{b.c.FullName, action.docs, 0} - if !b.ordered { - op.flags = 1 // ContinueOnError - } - lerr, err := b.c.writeOp(op, b.ordered) - return b.checkSuccess(action, berr, lerr, err) -} - -func (b *Bulk) runUpdate(action *bulkAction, result *BulkResult, berr *BulkError) bool { - lerr, err := b.c.writeOp(bulkUpdateOp(action.docs), b.ordered) - if lerr != nil { - result.Matched += lerr.N - result.Modified += lerr.modified - } - return b.checkSuccess(action, berr, lerr, err) -} - -func (b *Bulk) runRemove(action *bulkAction, result *BulkResult, berr *BulkError) bool { - lerr, err := b.c.writeOp(bulkDeleteOp(action.docs), b.ordered) - if lerr != nil { - result.Matched += lerr.N - result.Modified += lerr.modified - } - return b.checkSuccess(action, berr, lerr, err) -} - -func (b *Bulk) checkSuccess(action *bulkAction, berr *BulkError, lerr *LastError, err error) bool { - if lerr != nil && len(lerr.ecases) > 0 { - for i := 0; i < len(lerr.ecases); i++ { - // Map back from the local error index into the visible one. - ecase := lerr.ecases[i] - idx := ecase.Index - if idx >= 0 { - idx = action.idxs[idx] - } - berr.ecases = append(berr.ecases, BulkErrorCase{idx, ecase.Err}) - } - return false - } else if err != nil { - for i := 0; i < len(action.idxs); i++ { - berr.ecases = append(berr.ecases, BulkErrorCase{action.idxs[i], err}) - } - return false - } - return true -} diff --git a/vendor/github.com/globalsign/mgo/changestreams.go b/vendor/github.com/globalsign/mgo/changestreams.go deleted file mode 100644 index 5c2279c6..00000000 --- a/vendor/github.com/globalsign/mgo/changestreams.go +++ /dev/null @@ -1,357 +0,0 @@ -package mgo - -import ( - "errors" - "fmt" - "reflect" - "sync" - "time" - - "github.com/globalsign/mgo/bson" -) - -type FullDocument string - -const ( - Default = "default" - UpdateLookup = "updateLookup" -) - -type ChangeStream struct { - iter *Iter - isClosed bool - options ChangeStreamOptions - pipeline interface{} - resumeToken *bson.Raw - collection *Collection - readPreference *ReadPreference - err error - m sync.Mutex - sessionCopied bool -} - -type ChangeStreamOptions struct { - - // FullDocument controls the amount of data that the server will return when - // returning a changes document. - FullDocument FullDocument - - // ResumeAfter specifies the logical starting point for the new change stream. - ResumeAfter *bson.Raw - - // MaxAwaitTimeMS specifies the maximum amount of time for the server to wait - // on new documents to satisfy a change stream query. - MaxAwaitTimeMS time.Duration - - // BatchSize specifies the number of documents to return per batch. - BatchSize int - - // Collation specifies the way the server should collate returned data. - //TODO Collation *Collation -} - -var errMissingResumeToken = errors.New("resume token missing from result") - -// Watch constructs a new ChangeStream capable of receiving continuing data -// from the database. -func (coll *Collection) Watch(pipeline interface{}, - options ChangeStreamOptions) (*ChangeStream, error) { - - if pipeline == nil { - pipeline = []bson.M{} - } - - csPipe := constructChangeStreamPipeline(pipeline, options) - pipe := coll.Pipe(&csPipe) - if options.MaxAwaitTimeMS > 0 { - pipe.SetMaxTime(options.MaxAwaitTimeMS) - } - if options.BatchSize > 0 { - pipe.Batch(options.BatchSize) - } - pIter := pipe.Iter() - - // check that there was no issue creating the iterator. - // this will fail immediately with an error from the server if running against - // a standalone. - if err := pIter.Err(); err != nil { - return nil, err - } - - pIter.isChangeStream = true - return &ChangeStream{ - iter: pIter, - collection: coll, - resumeToken: nil, - options: options, - pipeline: pipeline, - }, nil -} - -// Next retrieves the next document from the change stream, blocking if necessary. -// Next returns true if a document was successfully unmarshalled into result, -// and false if an error occured. When Next returns false, the Err method should -// be called to check what error occurred during iteration. If there were no events -// available (ErrNotFound), the Err method returns nil so the user can retry the invocaton. -// -// For example: -// -// pipeline := []bson.M{} -// -// changeStream := collection.Watch(pipeline, ChangeStreamOptions{}) -// for changeStream.Next(&changeDoc) { -// fmt.Printf("Change: %v\n", changeDoc) -// } -// -// if err := changeStream.Close(); err != nil { -// return err -// } -// -// If the pipeline used removes the _id field from the result, Next will error -// because the _id field is needed to resume iteration when an error occurs. -// -func (changeStream *ChangeStream) Next(result interface{}) bool { - // the err field is being constantly overwritten and we don't want the user to - // attempt to read it at this point so we lock. - changeStream.m.Lock() - - defer changeStream.m.Unlock() - - // if we are in a state of error, then don't continue. - if changeStream.err != nil { - return false - } - - if changeStream.isClosed { - changeStream.err = fmt.Errorf("illegal use of a closed ChangeStream") - return false - } - - var err error - - // attempt to fetch the change stream result. - err = changeStream.fetchResultSet(result) - if err == nil { - return true - } - - // if we get no results we return false with no errors so the user can call Next - // again, resuming is not needed as the iterator is simply timed out as no events happened. - // The user will call Timeout in order to understand if this was the case. - if err == ErrNotFound { - return false - } - - // check if the error is resumable - if !isResumableError(err) { - // error is not resumable, give up and return it to the user. - changeStream.err = err - return false - } - - // try to resume. - err = changeStream.resume() - if err != nil { - // we've not been able to successfully resume and should only try once, - // so we give up. - changeStream.err = err - return false - } - - // we've successfully resumed the changestream. - // try to fetch the next result. - err = changeStream.fetchResultSet(result) - if err != nil { - changeStream.err = err - return false - } - - return true -} - -// Err returns nil if no errors happened during iteration, or the actual -// error otherwise. -func (changeStream *ChangeStream) Err() error { - changeStream.m.Lock() - defer changeStream.m.Unlock() - return changeStream.err -} - -// Close kills the server cursor used by the iterator, if any, and returns -// nil if no errors happened during iteration, or the actual error otherwise. -func (changeStream *ChangeStream) Close() error { - changeStream.m.Lock() - defer changeStream.m.Unlock() - changeStream.isClosed = true - err := changeStream.iter.Close() - if err != nil { - changeStream.err = err - } - if changeStream.sessionCopied { - changeStream.iter.session.Close() - changeStream.sessionCopied = false - } - return err -} - -// ResumeToken returns a copy of the current resume token held by the change stream. -// This token should be treated as an opaque token that can be provided to instantiate -// a new change stream. -func (changeStream *ChangeStream) ResumeToken() *bson.Raw { - changeStream.m.Lock() - defer changeStream.m.Unlock() - if changeStream.resumeToken == nil { - return nil - } - var tokenCopy = *changeStream.resumeToken - return &tokenCopy -} - -// Timeout returns true if the last call of Next returned false because of an iterator timeout. -func (changeStream *ChangeStream) Timeout() bool { - return changeStream.iter.Timeout() -} - -func constructChangeStreamPipeline(pipeline interface{}, - options ChangeStreamOptions) interface{} { - pipelinev := reflect.ValueOf(pipeline) - - // ensure that the pipeline passed in is a slice. - if pipelinev.Kind() != reflect.Slice { - panic("pipeline argument must be a slice") - } - - // construct the options to be used by the change notification - // pipeline stage. - changeStreamStageOptions := bson.M{} - - if options.FullDocument != "" { - changeStreamStageOptions["fullDocument"] = options.FullDocument - } - if options.ResumeAfter != nil { - changeStreamStageOptions["resumeAfter"] = options.ResumeAfter - } - - changeStreamStage := bson.M{"$changeStream": changeStreamStageOptions} - - pipeOfInterfaces := make([]interface{}, pipelinev.Len()+1) - - // insert the change notification pipeline stage at the beginning of the - // aggregation. - pipeOfInterfaces[0] = changeStreamStage - - // convert the passed in slice to a slice of interfaces. - for i := 0; i < pipelinev.Len(); i++ { - pipeOfInterfaces[1+i] = pipelinev.Index(i).Addr().Interface() - } - var pipelineAsInterface interface{} = pipeOfInterfaces - return pipelineAsInterface -} - -func (changeStream *ChangeStream) resume() error { - // copy the information for the new socket. - - // Thanks to Copy() future uses will acquire a new socket against the newly selected DB. - newSession := changeStream.iter.session.Copy() - - // fetch the cursor from the iterator and use it to run a killCursors - // on the connection. - cursorId := changeStream.iter.op.cursorId - err := runKillCursorsOnSession(newSession, cursorId) - if err != nil { - return err - } - - // change out the old connection to the database with the new connection. - if changeStream.sessionCopied { - changeStream.collection.Database.Session.Close() - } - changeStream.collection.Database.Session = newSession - changeStream.sessionCopied = true - - opts := changeStream.options - if changeStream.resumeToken != nil { - opts.ResumeAfter = changeStream.resumeToken - } - // make a new pipeline containing the resume token. - changeStreamPipeline := constructChangeStreamPipeline(changeStream.pipeline, opts) - - // generate the new iterator with the new connection. - newPipe := changeStream.collection.Pipe(changeStreamPipeline) - changeStream.iter = newPipe.Iter() - if err := changeStream.iter.Err(); err != nil { - return err - } - changeStream.iter.isChangeStream = true - return nil -} - -// fetchResumeToken unmarshals the _id field from the document, setting an error -// on the changeStream if it is unable to. -func (changeStream *ChangeStream) fetchResumeToken(rawResult *bson.Raw) error { - changeStreamResult := struct { - ResumeToken *bson.Raw `bson:"_id,omitempty"` - }{} - - err := rawResult.Unmarshal(&changeStreamResult) - if err != nil { - return err - } - - if changeStreamResult.ResumeToken == nil { - return errMissingResumeToken - } - - changeStream.resumeToken = changeStreamResult.ResumeToken - return nil -} - -func (changeStream *ChangeStream) fetchResultSet(result interface{}) error { - rawResult := bson.Raw{} - - // fetch the next set of documents from the cursor. - gotNext := changeStream.iter.Next(&rawResult) - err := changeStream.iter.Err() - if err != nil { - return err - } - - if !gotNext && err == nil { - // If the iter.Err() method returns nil despite us not getting a next batch, - // it is becuase iter.Err() silences this case. - return ErrNotFound - } - - // grab the resumeToken from the results - if err := changeStream.fetchResumeToken(&rawResult); err != nil { - return err - } - - // put the raw results into the data structure the user provided. - if err := rawResult.Unmarshal(result); err != nil { - return err - } - return nil -} - -func isResumableError(err error) bool { - _, isQueryError := err.(*QueryError) - // if it is not a database error OR it is a database error, - // but the error is a notMaster error - //and is not a missingResumeToken error (caused by the user provided pipeline) - return (!isQueryError || isNotMasterError(err)) && (err != errMissingResumeToken) -} - -func runKillCursorsOnSession(session *Session, cursorId int64) error { - socket, err := session.acquireSocket(true) - if err != nil { - return err - } - err = socket.Query(&killCursorsOp{[]int64{cursorId}}) - if err != nil { - return err - } - socket.Release() - - return nil -} diff --git a/vendor/github.com/globalsign/mgo/cluster.go b/vendor/github.com/globalsign/mgo/cluster.go deleted file mode 100644 index ff431cac..00000000 --- a/vendor/github.com/globalsign/mgo/cluster.go +++ /dev/null @@ -1,704 +0,0 @@ -// mgo - MongoDB driver for Go -// -// Copyright (c) 2010-2012 - Gustavo Niemeyer -// -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// -// 1. Redistributions of source code must retain the above copyright notice, this -// list of conditions and the following disclaimer. -// 2. Redistributions in binary form must reproduce the above copyright notice, -// this list of conditions and the following disclaimer in the documentation -// and/or other materials provided with the distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR -// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package mgo - -import ( - "errors" - "fmt" - "net" - "runtime" - "strconv" - "strings" - "sync" - "time" - - "github.com/globalsign/mgo/bson" -) - -// --------------------------------------------------------------------------- -// Mongo cluster encapsulation. -// -// A cluster enables the communication with one or more servers participating -// in a mongo cluster. This works with individual servers, a replica set, -// a replica pair, one or multiple mongos routers, etc. - -type mongoCluster struct { - sync.RWMutex - serverSynced sync.Cond - userSeeds []string - dynaSeeds []string - servers mongoServers - masters mongoServers - references int - syncing bool - syncCount uint - cachedIndex map[string]bool - sync chan bool - dial dialer - dialInfo *DialInfo -} - -func newCluster(userSeeds []string, info *DialInfo) *mongoCluster { - cluster := &mongoCluster{ - userSeeds: userSeeds, - references: 1, - dial: dialer{info.Dial, info.DialServer}, - dialInfo: info, - } - cluster.serverSynced.L = cluster.RWMutex.RLocker() - cluster.sync = make(chan bool, 1) - stats.cluster(+1) - go cluster.syncServersLoop() - return cluster -} - -// Acquire increases the reference count for the cluster. -func (cluster *mongoCluster) Acquire() { - cluster.Lock() - cluster.references++ - debugf("Cluster %p acquired (refs=%d)", cluster, cluster.references) - cluster.Unlock() -} - -// Release decreases the reference count for the cluster. Once -// it reaches zero, all servers will be closed. -func (cluster *mongoCluster) Release() { - cluster.Lock() - if cluster.references == 0 { - panic("cluster.Release() with references == 0") - } - cluster.references-- - debugf("Cluster %p released (refs=%d)", cluster, cluster.references) - if cluster.references == 0 { - for _, server := range cluster.servers.Slice() { - server.Close() - } - // Wake up the sync loop so it can die. - cluster.syncServers() - stats.cluster(-1) - } - cluster.Unlock() -} - -func (cluster *mongoCluster) LiveServers() (servers []string) { - cluster.RLock() - for _, serv := range cluster.servers.Slice() { - servers = append(servers, serv.Addr) - } - cluster.RUnlock() - return servers -} - -func (cluster *mongoCluster) removeServer(server *mongoServer) { - cluster.Lock() - cluster.masters.Remove(server) - other := cluster.servers.Remove(server) - cluster.Unlock() - if other != nil { - other.CloseIdle() - log("Removed server ", server.Addr, " from cluster.") - } - server.CloseIdle() -} - -type isMasterResult struct { - IsMaster bool - Secondary bool - Primary string - Hosts []string - Passives []string - Tags bson.D - Msg string - SetName string `bson:"setName"` - MaxWireVersion int `bson:"maxWireVersion"` -} - -func (cluster *mongoCluster) isMaster(socket *mongoSocket, result *isMasterResult) error { - // Monotonic let's it talk to a slave and still hold the socket. - session := newSession(Monotonic, cluster, cluster.dialInfo) - session.setSocket(socket) - - var cmd = bson.D{{Name: "isMaster", Value: 1}} - - // Send client metadata to the server to identify this socket if this is - // the first isMaster call only. - // - // isMaster commands issued after the initial connection handshake MUST NOT contain handshake arguments - // https://github.com/mongodb/specifications/blob/master/source/mongodb-handshake/handshake.rst#connection-handshake - // - socket.sendMeta.Do(func() { - var meta = bson.M{ - "driver": bson.M{ - "name": "mgo", - "version": "globalsign", - }, - "os": bson.M{ - "type": runtime.GOOS, - "architecture": runtime.GOARCH, - }, - } - - // Include the application name if set - if cluster.dialInfo.AppName != "" { - meta["application"] = bson.M{"name": cluster.dialInfo.AppName} - } - - cmd = append(cmd, bson.DocElem{ - Name: "client", - Value: meta, - }) - }) - - err := session.runOnSocket(socket, cmd, result) - session.Close() - return err -} - -type possibleTimeout interface { - Timeout() bool -} - -func (cluster *mongoCluster) syncServer(server *mongoServer) (info *mongoServerInfo, hosts []string, err error) { - addr := server.Addr - log("SYNC Processing ", addr, "...") - - // Retry a few times to avoid knocking a server down for a hiccup. - var result isMasterResult - var tryerr error - for retry := 0; ; retry++ { - if retry == 3 || retry == 1 && cluster.dialInfo.FailFast { - return nil, nil, tryerr - } - if retry > 0 { - // Don't abuse the server needlessly if there's something actually wrong. - if err, ok := tryerr.(possibleTimeout); ok && err.Timeout() { - // Give a chance for waiters to timeout as well. - cluster.serverSynced.Broadcast() - } - time.Sleep(syncShortDelay) - } - - // Don't ever hit the pool limit for syncing - config := cluster.dialInfo.Copy() - config.PoolLimit = 0 - - socket, _, err := server.AcquireSocket(config) - if err != nil { - tryerr = err - logf("SYNC Failed to get socket to %s: %v", addr, err) - continue - } - err = cluster.isMaster(socket, &result) - - // Restore the correct dial config before returning it to the pool - socket.dialInfo = cluster.dialInfo - socket.Release() - - if err != nil { - tryerr = err - logf("SYNC Command 'ismaster' to %s failed: %v", addr, err) - continue - } - debugf("SYNC Result of 'ismaster' from %s: %#v", addr, result) - break - } - - if cluster.dialInfo.ReplicaSetName != "" && result.SetName != cluster.dialInfo.ReplicaSetName { - logf("SYNC Server %s is not a member of replica set %q", addr, cluster.dialInfo.ReplicaSetName) - return nil, nil, fmt.Errorf("server %s is not a member of replica set %q", addr, cluster.dialInfo.ReplicaSetName) - } - - if result.IsMaster { - debugf("SYNC %s is a master.", addr) - if !server.info.Master { - // Made an incorrect assumption above, so fix stats. - stats.conn(-1, false) - stats.conn(+1, true) - } - } else if result.Secondary { - debugf("SYNC %s is a slave.", addr) - } else if cluster.dialInfo.Direct { - logf("SYNC %s in unknown state. Pretending it's a slave due to direct connection.", addr) - } else { - logf("SYNC %s is neither a master nor a slave.", addr) - // Let stats track it as whatever was known before. - return nil, nil, errors.New(addr + " is not a master nor slave") - } - - info = &mongoServerInfo{ - Master: result.IsMaster, - Mongos: result.Msg == "isdbgrid", - Tags: result.Tags, - SetName: result.SetName, - MaxWireVersion: result.MaxWireVersion, - } - - hosts = make([]string, 0, 1+len(result.Hosts)+len(result.Passives)) - if result.Primary != "" { - // First in the list to speed up master discovery. - hosts = append(hosts, result.Primary) - } - hosts = append(hosts, result.Hosts...) - hosts = append(hosts, result.Passives...) - - debugf("SYNC %s knows about the following peers: %#v", addr, hosts) - return info, hosts, nil -} - -type syncKind bool - -const ( - completeSync syncKind = true - partialSync syncKind = false -) - -func (cluster *mongoCluster) addServer(server *mongoServer, info *mongoServerInfo, syncKind syncKind) { - cluster.Lock() - current := cluster.servers.Search(server.ResolvedAddr) - if current == nil { - if syncKind == partialSync { - cluster.Unlock() - server.Close() - log("SYNC Discarding unknown server ", server.Addr, " due to partial sync.") - return - } - cluster.servers.Add(server) - if info.Master { - cluster.masters.Add(server) - log("SYNC Adding ", server.Addr, " to cluster as a master.") - } else { - log("SYNC Adding ", server.Addr, " to cluster as a slave.") - } - } else { - if server != current { - panic("addServer attempting to add duplicated server") - } - if server.Info().Master != info.Master { - if info.Master { - log("SYNC Server ", server.Addr, " is now a master.") - cluster.masters.Add(server) - } else { - log("SYNC Server ", server.Addr, " is now a slave.") - cluster.masters.Remove(server) - } - } - } - server.SetInfo(info) - debugf("SYNC Broadcasting availability of server %s", server.Addr) - cluster.serverSynced.Broadcast() - cluster.Unlock() -} - -func (cluster *mongoCluster) getKnownAddrs() []string { - cluster.RLock() - max := len(cluster.userSeeds) + len(cluster.dynaSeeds) + cluster.servers.Len() - seen := make(map[string]bool, max) - known := make([]string, 0, max) - - add := func(addr string) { - if _, found := seen[addr]; !found { - seen[addr] = true - known = append(known, addr) - } - } - - for _, addr := range cluster.userSeeds { - add(addr) - } - for _, addr := range cluster.dynaSeeds { - add(addr) - } - for _, serv := range cluster.servers.Slice() { - add(serv.Addr) - } - cluster.RUnlock() - - return known -} - -// syncServers injects a value into the cluster.sync channel to force -// an iteration of the syncServersLoop function. -func (cluster *mongoCluster) syncServers() { - select { - case cluster.sync <- true: - default: - } -} - -// How long to wait for a checkup of the cluster topology if nothing -// else kicks a synchronization before that. -const syncServersDelay = 30 * time.Second -const syncShortDelay = 500 * time.Millisecond - -// syncServersLoop loops while the cluster is alive to keep its idea of -// the server topology up-to-date. It must be called just once from -// newCluster. The loop iterates once syncServersDelay has passed, or -// if somebody injects a value into the cluster.sync channel to force a -// synchronization. A loop iteration will contact all servers in -// parallel, ask them about known peers and their own role within the -// cluster, and then attempt to do the same with all the peers -// retrieved. -func (cluster *mongoCluster) syncServersLoop() { - for { - debugf("SYNC Cluster %p is starting a sync loop iteration.", cluster) - - cluster.Lock() - if cluster.references == 0 { - cluster.Unlock() - break - } - cluster.references++ // Keep alive while syncing. - direct := cluster.dialInfo.Direct - cluster.Unlock() - - cluster.syncServersIteration(direct) - - // We just synchronized, so consume any outstanding requests. - select { - case <-cluster.sync: - default: - } - - cluster.Release() - - // Hold off before allowing another sync. No point in - // burning CPU looking for down servers. - if !cluster.dialInfo.FailFast { - time.Sleep(syncShortDelay) - } - - cluster.Lock() - if cluster.references == 0 { - cluster.Unlock() - break - } - cluster.syncCount++ - // Poke all waiters so they have a chance to timeout or - // restart syncing if they wish to. - cluster.serverSynced.Broadcast() - // Check if we have to restart immediately either way. - restart := !direct && cluster.masters.Empty() || cluster.servers.Empty() - cluster.Unlock() - - if restart { - log("SYNC No masters found. Will synchronize again.") - time.Sleep(syncShortDelay) - continue - } - - debugf("SYNC Cluster %p waiting for next requested or scheduled sync.", cluster) - - // Hold off until somebody explicitly requests a synchronization - // or it's time to check for a cluster topology change again. - select { - case <-cluster.sync: - case <-time.After(syncServersDelay): - } - } - debugf("SYNC Cluster %p is stopping its sync loop.", cluster) -} - -func (cluster *mongoCluster) server(addr string, tcpaddr *net.TCPAddr) *mongoServer { - cluster.RLock() - server := cluster.servers.Search(tcpaddr.String()) - cluster.RUnlock() - if server != nil { - return server - } - return newServer(addr, tcpaddr, cluster.sync, cluster.dial, cluster.dialInfo) -} - -func resolveAddr(addr string) (*net.TCPAddr, error) { - // Simple cases that do not need actual resolution. Works with IPv4 and v6. - if host, port, err := net.SplitHostPort(addr); err == nil { - if port, _ := strconv.Atoi(port); port > 0 { - zone := "" - if i := strings.LastIndex(host, "%"); i >= 0 { - zone = host[i+1:] - host = host[:i] - } - ip := net.ParseIP(host) - if ip != nil { - return &net.TCPAddr{IP: ip, Port: port, Zone: zone}, nil - } - } - } - - // Attempt to resolve IPv4 and v6 concurrently. - addrChan := make(chan *net.TCPAddr, 2) - for _, network := range []string{"udp4", "udp6"} { - network := network - go func() { - // The unfortunate UDP dialing hack allows having a timeout on address resolution. - conn, err := net.DialTimeout(network, addr, 10*time.Second) - if err != nil { - addrChan <- nil - } else { - addrChan <- (*net.TCPAddr)(conn.RemoteAddr().(*net.UDPAddr)) - conn.Close() - } - }() - } - - // Wait for the result of IPv4 and v6 resolution. Use IPv4 if available. - tcpaddr := <-addrChan - if tcpaddr == nil || len(tcpaddr.IP) != 4 { - var timeout <-chan time.Time - if tcpaddr != nil { - // Don't wait too long if an IPv6 address is known. - timeout = time.After(50 * time.Millisecond) - } - select { - case <-timeout: - case tcpaddr2 := <-addrChan: - if tcpaddr == nil || tcpaddr2 != nil { - // It's an IPv4 address or the only known address. Use it. - tcpaddr = tcpaddr2 - } - } - } - - if tcpaddr == nil { - log("SYNC Failed to resolve server address: ", addr) - return nil, errors.New("failed to resolve server address: " + addr) - } - if tcpaddr.String() != addr { - debug("SYNC Address ", addr, " resolved as ", tcpaddr.String()) - } - return tcpaddr, nil -} - -type pendingAdd struct { - server *mongoServer - info *mongoServerInfo -} - -func (cluster *mongoCluster) syncServersIteration(direct bool) { - log("SYNC Starting full topology synchronization...") - - var wg sync.WaitGroup - var m sync.Mutex - notYetAdded := make(map[string]pendingAdd) - addIfFound := make(map[string]bool) - seen := make(map[string]bool) - syncKind := partialSync - - var spawnSync func(addr string, byMaster bool) - spawnSync = func(addr string, byMaster bool) { - wg.Add(1) - go func() { - defer wg.Done() - - tcpaddr, err := resolveAddr(addr) - if err != nil { - log("SYNC Failed to start sync of ", addr, ": ", err.Error()) - return - } - resolvedAddr := tcpaddr.String() - - m.Lock() - if byMaster { - if pending, ok := notYetAdded[resolvedAddr]; ok { - delete(notYetAdded, resolvedAddr) - m.Unlock() - cluster.addServer(pending.server, pending.info, completeSync) - return - } - addIfFound[resolvedAddr] = true - } - if seen[resolvedAddr] { - m.Unlock() - return - } - seen[resolvedAddr] = true - m.Unlock() - - server := cluster.server(addr, tcpaddr) - info, hosts, err := cluster.syncServer(server) - if err != nil { - cluster.removeServer(server) - return - } - - m.Lock() - add := direct || info.Master || addIfFound[resolvedAddr] - if add { - syncKind = completeSync - } else { - notYetAdded[resolvedAddr] = pendingAdd{server, info} - } - m.Unlock() - if add { - cluster.addServer(server, info, completeSync) - } - if !direct { - for _, addr := range hosts { - spawnSync(addr, info.Master) - } - } - }() - } - - knownAddrs := cluster.getKnownAddrs() - for _, addr := range knownAddrs { - spawnSync(addr, false) - } - wg.Wait() - - if syncKind == completeSync { - logf("SYNC Synchronization was complete (got data from primary).") - for _, pending := range notYetAdded { - cluster.removeServer(pending.server) - } - } else { - logf("SYNC Synchronization was partial (cannot talk to primary).") - for _, pending := range notYetAdded { - cluster.addServer(pending.server, pending.info, partialSync) - } - } - - cluster.Lock() - mastersLen := cluster.masters.Len() - logf("SYNC Synchronization completed: %d master(s) and %d slave(s) alive.", mastersLen, cluster.servers.Len()-mastersLen) - - // Update dynamic seeds, but only if we have any good servers. Otherwise, - // leave them alone for better chances of a successful sync in the future. - if syncKind == completeSync { - dynaSeeds := make([]string, cluster.servers.Len()) - for i, server := range cluster.servers.Slice() { - dynaSeeds[i] = server.Addr - } - cluster.dynaSeeds = dynaSeeds - debugf("SYNC New dynamic seeds: %#v\n", dynaSeeds) - } - cluster.Unlock() -} - -// AcquireSocketWithPoolTimeout returns a socket to a server in the cluster. If slaveOk is -// true, it will attempt to return a socket to a slave server. If it is -// false, the socket will necessarily be to a master server. -func (cluster *mongoCluster) AcquireSocketWithPoolTimeout(mode Mode, slaveOk bool, syncTimeout time.Duration, serverTags []bson.D, info *DialInfo) (s *mongoSocket, err error) { - var started time.Time - var syncCount uint - for { - cluster.RLock() - for { - mastersLen := cluster.masters.Len() - slavesLen := cluster.servers.Len() - mastersLen - debugf("Cluster has %d known masters and %d known slaves.", mastersLen, slavesLen) - if mastersLen > 0 && !(slaveOk && mode == Secondary) || slavesLen > 0 && slaveOk { - break - } - if mastersLen > 0 && mode == Secondary && cluster.masters.HasMongos() { - break - } - if started.IsZero() { - // Initialize after fast path above. - started = time.Now() - syncCount = cluster.syncCount - } else if syncTimeout != 0 && started.Before(time.Now().Add(-syncTimeout)) || cluster.dialInfo.FailFast && cluster.syncCount != syncCount { - cluster.RUnlock() - return nil, errors.New("no reachable servers") - } - log("Waiting for servers to synchronize...") - cluster.syncServers() - - // Remember: this will release and reacquire the lock. - cluster.serverSynced.Wait() - } - - var server *mongoServer - if slaveOk { - server = cluster.servers.BestFit(mode, serverTags) - } else { - server = cluster.masters.BestFit(mode, nil) - } - cluster.RUnlock() - - if server == nil { - // Must have failed the requested tags. Sleep to avoid spinning. - time.Sleep(1e8) - continue - } - - s, abended, err := server.AcquireSocketWithBlocking(info) - if err == errPoolTimeout { - // No need to remove servers from the topology if acquiring a socket fails for this reason. - return nil, err - } - if err != nil { - cluster.removeServer(server) - cluster.syncServers() - continue - } - if abended && !slaveOk { - var result isMasterResult - err := cluster.isMaster(s, &result) - if err != nil || !result.IsMaster { - logf("Cannot confirm server %s as master (%v)", server.Addr, err) - s.Release() - cluster.syncServers() - time.Sleep(100 * time.Millisecond) - continue - } else { - // We've managed to successfully reconnect to the master, we are no longer abnormaly ended - server.Lock() - server.abended = false - server.Unlock() - } - } - return s, nil - } -} - -func (cluster *mongoCluster) CacheIndex(cacheKey string, exists bool) { - cluster.Lock() - if cluster.cachedIndex == nil { - cluster.cachedIndex = make(map[string]bool) - } - if exists { - cluster.cachedIndex[cacheKey] = true - } else { - delete(cluster.cachedIndex, cacheKey) - } - cluster.Unlock() -} - -func (cluster *mongoCluster) HasCachedIndex(cacheKey string) (result bool) { - cluster.RLock() - if cluster.cachedIndex != nil { - result = cluster.cachedIndex[cacheKey] - } - cluster.RUnlock() - return -} - -func (cluster *mongoCluster) ResetIndexCache() { - cluster.Lock() - cluster.cachedIndex = make(map[string]bool) - cluster.Unlock() -} diff --git a/vendor/github.com/globalsign/mgo/coarse_time.go b/vendor/github.com/globalsign/mgo/coarse_time.go deleted file mode 100644 index e54dd17c..00000000 --- a/vendor/github.com/globalsign/mgo/coarse_time.go +++ /dev/null @@ -1,62 +0,0 @@ -package mgo - -import ( - "sync" - "sync/atomic" - "time" -) - -// coarseTimeProvider provides a periodically updated (approximate) time value to -// amortise the cost of frequent calls to time.Now. -// -// A read throughput increase of ~6% was measured when using coarseTimeProvider with the -// high-precision event timer (HPET) on FreeBSD 11.1 and Go 1.10.1 after merging -// #116. -// -// Calling Now returns a time.Time that is updated at the configured interval, -// however due to scheduling the value may be marginally older than expected. -// -// coarseTimeProvider is safe for concurrent use. -type coarseTimeProvider struct { - once sync.Once - stop chan struct{} - last atomic.Value -} - -// Now returns the most recently acquired time.Time value. -func (t *coarseTimeProvider) Now() time.Time { - return t.last.Load().(time.Time) -} - -// Close stops the periodic update of t. -// -// Any subsequent calls to Now will return the same value forever. -func (t *coarseTimeProvider) Close() { - t.once.Do(func() { - close(t.stop) - }) -} - -// newcoarseTimeProvider returns a coarseTimeProvider configured to update at granularity. -func newcoarseTimeProvider(granularity time.Duration) *coarseTimeProvider { - t := &coarseTimeProvider{ - stop: make(chan struct{}), - } - - t.last.Store(time.Now()) - - go func() { - ticker := time.NewTicker(granularity) - for { - select { - case <-t.stop: - ticker.Stop() - return - case <-ticker.C: - t.last.Store(time.Now()) - } - } - }() - - return t -} diff --git a/vendor/github.com/globalsign/mgo/doc.go b/vendor/github.com/globalsign/mgo/doc.go deleted file mode 100644 index f3f373bf..00000000 --- a/vendor/github.com/globalsign/mgo/doc.go +++ /dev/null @@ -1,35 +0,0 @@ -// Package mgo (pronounced as "mango") offers a rich MongoDB driver for Go. -// -// Detailed documentation of the API is available at GoDoc: -// -// https://godoc.org/github.com/globalsign/mgo -// -// Usage of the driver revolves around the concept of sessions. To -// get started, obtain a session using the Dial function: -// -// session, err := mgo.Dial(url) -// -// This will establish one or more connections with the cluster of -// servers defined by the url parameter. From then on, the cluster -// may be queried with multiple consistency rules (see SetMode) and -// documents retrieved with statements such as: -// -// c := session.DB(database).C(collection) -// err := c.Find(query).One(&result) -// -// New sessions are typically created by calling session.Copy on the -// initial session obtained at dial time. These new sessions will share -// the same cluster information and connection pool, and may be easily -// handed into other methods and functions for organizing logic. -// Every session created must have its Close method called at the end -// of its life time, so its resources may be put back in the pool or -// collected, depending on the case. -// -// There is a sub-package that provides support for BSON, which can be -// used by itself as well: -// -// https://godoc.org/github.com/globalsign/mgo/bson -// -// For more details, see the documentation for the types and methods. -// -package mgo diff --git a/vendor/github.com/globalsign/mgo/gridfs.go b/vendor/github.com/globalsign/mgo/gridfs.go deleted file mode 100644 index 0954b166..00000000 --- a/vendor/github.com/globalsign/mgo/gridfs.go +++ /dev/null @@ -1,782 +0,0 @@ -// mgo - MongoDB driver for Go -// -// Copyright (c) 2010-2012 - Gustavo Niemeyer -// -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// -// 1. Redistributions of source code must retain the above copyright notice, this -// list of conditions and the following disclaimer. -// 2. Redistributions in binary form must reproduce the above copyright notice, -// this list of conditions and the following disclaimer in the documentation -// and/or other materials provided with the distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR -// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package mgo - -import ( - "crypto/md5" - "encoding/hex" - "errors" - "hash" - "io" - "os" - "sync" - "time" - - "github.com/globalsign/mgo/bson" -) - -// GridFS stores files in two collections: -// -// - chunks stores the binary chunks. For details, see the chunks Collection. -// - files stores the file’s metadata. For details, see the files Collection. -// -// GridFS places the collections in a common bucket by prefixing each with the bucket name. -// By default, GridFS uses two collections with a bucket named fs: -// -// - fs.files -// - fs.chunks -// -// You can choose a different bucket name, as well as create multiple buckets in a single database. -// The full collection name, which includes the bucket name, is subject to the namespace length limit. -// -// Relevant documentation: -// -// https://docs.mongodb.com/manual/core/gridfs/ -// https://docs.mongodb.com/manual/core/gridfs/#gridfs-chunks-collection -// https://docs.mongodb.com/manual/core/gridfs/#gridfs-files-collection -// -type GridFS struct { - Files *Collection - Chunks *Collection -} - -type gfsFileMode int - -const ( - gfsClosed gfsFileMode = 0 - gfsReading gfsFileMode = 1 - gfsWriting gfsFileMode = 2 -) - -// GridFile document in files collection -type GridFile struct { - m sync.Mutex - c sync.Cond - gfs *GridFS - mode gfsFileMode - err error - - chunk int - offset int64 - - wpending int - wbuf []byte - wsum hash.Hash - - rbuf []byte - rcache *gfsCachedChunk - - doc gfsFile -} - -type gfsFile struct { - Id interface{} `bson:"_id"` - ChunkSize int `bson:"chunkSize"` - UploadDate time.Time `bson:"uploadDate"` - Length int64 `bson:",minsize"` - MD5 string - Filename string `bson:",omitempty"` - ContentType string `bson:"contentType,omitempty"` - Metadata *bson.Raw `bson:",omitempty"` -} - -type gfsChunk struct { - Id interface{} `bson:"_id"` - FilesId interface{} `bson:"files_id"` - N int - Data []byte -} - -type gfsCachedChunk struct { - wait sync.Mutex - n int - data []byte - err error -} - -func newGridFS(db *Database, prefix string) *GridFS { - return &GridFS{db.C(prefix + ".files"), db.C(prefix + ".chunks")} -} - -func (gfs *GridFS) newFile() *GridFile { - file := &GridFile{gfs: gfs} - file.c.L = &file.m - //runtime.SetFinalizer(file, finalizeFile) - return file -} - -func finalizeFile(file *GridFile) { - file.Close() -} - -// Create creates a new file with the provided name in the GridFS. If the file -// name already exists, a new version will be inserted with an up-to-date -// uploadDate that will cause it to be atomically visible to the Open and -// OpenId methods. If the file name is not important, an empty name may be -// provided and the file Id used instead. -// -// It's important to Close files whether they are being written to -// or read from, and to check the err result to ensure the operation -// completed successfully. -// -// A simple example inserting a new file: -// -// func check(err error) { -// if err != nil { -// panic(err.String()) -// } -// } -// file, err := db.GridFS("fs").Create("myfile.txt") -// check(err) -// n, err := file.Write([]byte("Hello world!")) -// check(err) -// err = file.Close() -// check(err) -// fmt.Printf("%d bytes written\n", n) -// -// The io.Writer interface is implemented by *GridFile and may be used to -// help on the file creation. For example: -// -// file, err := db.GridFS("fs").Create("myfile.txt") -// check(err) -// messages, err := os.Open("/var/log/messages") -// check(err) -// defer messages.Close() -// err = io.Copy(file, messages) -// check(err) -// err = file.Close() -// check(err) -// -func (gfs *GridFS) Create(name string) (file *GridFile, err error) { - file = gfs.newFile() - file.mode = gfsWriting - file.wsum = md5.New() - file.doc = gfsFile{Id: bson.NewObjectId(), ChunkSize: 255 * 1024, Filename: name} - return -} - -// OpenId returns the file with the provided id, for reading. -// If the file isn't found, err will be set to mgo.ErrNotFound. -// -// It's important to Close files whether they are being written to -// or read from, and to check the err result to ensure the operation -// completed successfully. -// -// The following example will print the first 8192 bytes from the file: -// -// func check(err error) { -// if err != nil { -// panic(err.String()) -// } -// } -// file, err := db.GridFS("fs").OpenId(objid) -// check(err) -// b := make([]byte, 8192) -// n, err := file.Read(b) -// check(err) -// fmt.Println(string(b)) -// check(err) -// err = file.Close() -// check(err) -// fmt.Printf("%d bytes read\n", n) -// -// The io.Reader interface is implemented by *GridFile and may be used to -// deal with it. As an example, the following snippet will dump the whole -// file into the standard output: -// -// file, err := db.GridFS("fs").OpenId(objid) -// check(err) -// err = io.Copy(os.Stdout, file) -// check(err) -// err = file.Close() -// check(err) -// -func (gfs *GridFS) OpenId(id interface{}) (file *GridFile, err error) { - var doc gfsFile - err = gfs.Files.Find(bson.M{"_id": id}).One(&doc) - if err != nil { - return - } - file = gfs.newFile() - file.mode = gfsReading - file.doc = doc - return -} - -// Open returns the most recently uploaded file with the provided -// name, for reading. If the file isn't found, err will be set -// to mgo.ErrNotFound. -// -// It's important to Close files whether they are being written to -// or read from, and to check the err result to ensure the operation -// completed successfully. -// -// The following example will print the first 8192 bytes from the file: -// -// file, err := db.GridFS("fs").Open("myfile.txt") -// check(err) -// b := make([]byte, 8192) -// n, err := file.Read(b) -// check(err) -// fmt.Println(string(b)) -// check(err) -// err = file.Close() -// check(err) -// fmt.Printf("%d bytes read\n", n) -// -// The io.Reader interface is implemented by *GridFile and may be used to -// deal with it. As an example, the following snippet will dump the whole -// file into the standard output: -// -// file, err := db.GridFS("fs").Open("myfile.txt") -// check(err) -// err = io.Copy(os.Stdout, file) -// check(err) -// err = file.Close() -// check(err) -// -func (gfs *GridFS) Open(name string) (file *GridFile, err error) { - var doc gfsFile - err = gfs.Files.Find(bson.M{"filename": name}).Sort("-uploadDate").One(&doc) - if err != nil { - return - } - file = gfs.newFile() - file.mode = gfsReading - file.doc = doc - return -} - -// OpenNext opens the next file from iter for reading, sets *file to it, -// and returns true on the success case. If no more documents are available -// on iter or an error occurred, *file is set to nil and the result is false. -// Errors will be available via iter.Err(). -// -// The iter parameter must be an iterator on the GridFS files collection. -// Using the GridFS.Find method is an easy way to obtain such an iterator, -// but any iterator on the collection will work. -// -// If the provided *file is non-nil, OpenNext will close it before attempting -// to iterate to the next element. This means that in a loop one only -// has to worry about closing files when breaking out of the loop early -// (break, return, or panic). -// -// For example: -// -// gfs := db.GridFS("fs") -// query := gfs.Find(nil).Sort("filename") -// iter := query.Iter() -// var f *mgo.GridFile -// for gfs.OpenNext(iter, &f) { -// fmt.Printf("Filename: %s\n", f.Name()) -// } -// if iter.Close() != nil { -// panic(iter.Close()) -// } -// -func (gfs *GridFS) OpenNext(iter *Iter, file **GridFile) bool { - if *file != nil { - // Ignoring the error here shouldn't be a big deal - // as we're reading the file and the loop iteration - // for this file is finished. - _ = (*file).Close() - } - var doc gfsFile - if !iter.Next(&doc) { - *file = nil - return false - } - f := gfs.newFile() - f.mode = gfsReading - f.doc = doc - *file = f - return true -} - -// Find runs query on GridFS's files collection and returns -// the resulting Query. -// -// This logic: -// -// gfs := db.GridFS("fs") -// iter := gfs.Find(nil).Iter() -// -// Is equivalent to: -// -// files := db.C("fs" + ".files") -// iter := files.Find(nil).Iter() -// -func (gfs *GridFS) Find(query interface{}) *Query { - return gfs.Files.Find(query) -} - -// RemoveId deletes the file with the provided id from the GridFS. -func (gfs *GridFS) RemoveId(id interface{}) error { - err := gfs.Files.Remove(bson.M{"_id": id}) - if err != nil { - return err - } - _, err = gfs.Chunks.RemoveAll(bson.D{{Name: "files_id", Value: id}}) - return err -} - -type gfsDocId struct { - Id interface{} `bson:"_id"` -} - -// Remove deletes all files with the provided name from the GridFS. -func (gfs *GridFS) Remove(name string) (err error) { - iter := gfs.Files.Find(bson.M{"filename": name}).Select(bson.M{"_id": 1}).Iter() - var doc gfsDocId - for iter.Next(&doc) { - if e := gfs.RemoveId(doc.Id); e != nil { - err = e - } - } - if err == nil { - err = iter.Close() - } - return err -} - -func (file *GridFile) assertMode(mode gfsFileMode) { - switch file.mode { - case mode: - return - case gfsWriting: - panic("GridFile is open for writing") - case gfsReading: - panic("GridFile is open for reading") - case gfsClosed: - panic("GridFile is closed") - default: - panic("internal error: missing GridFile mode") - } -} - -// SetChunkSize sets size of saved chunks. Once the file is written to, it -// will be split in blocks of that size and each block saved into an -// independent chunk document. The default chunk size is 255kb. -// -// It is a runtime error to call this function once the file has started -// being written to. -func (file *GridFile) SetChunkSize(bytes int) { - file.assertMode(gfsWriting) - debugf("GridFile %p: setting chunk size to %d", file, bytes) - file.m.Lock() - file.doc.ChunkSize = bytes - file.m.Unlock() -} - -// Id returns the current file Id. -func (file *GridFile) Id() interface{} { - return file.doc.Id -} - -// SetId changes the current file Id. -// -// It is a runtime error to call this function once the file has started -// being written to, or when the file is not open for writing. -func (file *GridFile) SetId(id interface{}) { - file.assertMode(gfsWriting) - file.m.Lock() - file.doc.Id = id - file.m.Unlock() -} - -// Name returns the optional file name. An empty string will be returned -// in case it is unset. -func (file *GridFile) Name() string { - return file.doc.Filename -} - -// SetName changes the optional file name. An empty string may be used to -// unset it. -// -// It is a runtime error to call this function when the file is not open -// for writing. -func (file *GridFile) SetName(name string) { - file.assertMode(gfsWriting) - file.m.Lock() - file.doc.Filename = name - file.m.Unlock() -} - -// ContentType returns the optional file content type. An empty string will be -// returned in case it is unset. -func (file *GridFile) ContentType() string { - return file.doc.ContentType -} - -// SetContentType changes the optional file content type. An empty string may be -// used to unset it. -// -// It is a runtime error to call this function when the file is not open -// for writing. -func (file *GridFile) SetContentType(ctype string) { - file.assertMode(gfsWriting) - file.m.Lock() - file.doc.ContentType = ctype - file.m.Unlock() -} - -// GetMeta unmarshals the optional "metadata" field associated with the -// file into the result parameter. The meaning of keys under that field -// is user-defined. For example: -// -// result := struct{ INode int }{} -// err = file.GetMeta(&result) -// if err != nil { -// panic(err.String()) -// } -// fmt.Printf("inode: %d\n", result.INode) -// -func (file *GridFile) GetMeta(result interface{}) (err error) { - file.m.Lock() - if file.doc.Metadata != nil { - err = bson.Unmarshal(file.doc.Metadata.Data, result) - } - file.m.Unlock() - return -} - -// SetMeta changes the optional "metadata" field associated with the -// file. The meaning of keys under that field is user-defined. -// For example: -// -// file.SetMeta(bson.M{"inode": inode}) -// -// It is a runtime error to call this function when the file is not open -// for writing. -func (file *GridFile) SetMeta(metadata interface{}) { - file.assertMode(gfsWriting) - data, err := bson.Marshal(metadata) - file.m.Lock() - if err != nil && file.err == nil { - file.err = err - } else { - file.doc.Metadata = &bson.Raw{Data: data} - } - file.m.Unlock() -} - -// Size returns the file size in bytes. -func (file *GridFile) Size() (bytes int64) { - file.m.Lock() - bytes = file.doc.Length - file.m.Unlock() - return -} - -// MD5 returns the file MD5 as a hex-encoded string. -func (file *GridFile) MD5() (md5 string) { - return file.doc.MD5 -} - -// UploadDate returns the file upload time. -func (file *GridFile) UploadDate() time.Time { - return file.doc.UploadDate -} - -// SetUploadDate changes the file upload time. -// -// It is a runtime error to call this function when the file is not open -// for writing. -func (file *GridFile) SetUploadDate(t time.Time) { - file.assertMode(gfsWriting) - file.m.Lock() - file.doc.UploadDate = t - file.m.Unlock() -} - -// Close flushes any pending changes in case the file is being written -// to, waits for any background operations to finish, and closes the file. -// -// It's important to Close files whether they are being written to -// or read from, and to check the err result to ensure the operation -// completed successfully. -func (file *GridFile) Close() (err error) { - file.m.Lock() - defer file.m.Unlock() - if file.mode == gfsWriting { - if len(file.wbuf) > 0 && file.err == nil { - file.insertChunk(file.wbuf) - file.wbuf = file.wbuf[0:0] - } - file.completeWrite() - } else if file.mode == gfsReading && file.rcache != nil { - file.rcache.wait.Lock() - file.rcache = nil - } - file.mode = gfsClosed - debugf("GridFile %p: closed", file) - return file.err -} - -func (file *GridFile) completeWrite() { - for file.wpending > 0 { - debugf("GridFile %p: waiting for %d pending chunks to complete file write", file, file.wpending) - file.c.Wait() - } - if file.err == nil { - hexsum := hex.EncodeToString(file.wsum.Sum(nil)) - if file.doc.UploadDate.IsZero() { - file.doc.UploadDate = bson.Now() - } - file.doc.MD5 = hexsum - file.err = file.gfs.Files.Insert(file.doc) - } - if file.err != nil { - file.gfs.Chunks.RemoveAll(bson.D{{Name: "files_id", Value: file.doc.Id}}) - } - if file.err == nil { - index := Index{ - Key: []string{"files_id", "n"}, - Unique: true, - } - file.err = file.gfs.Chunks.EnsureIndex(index) - } -} - -// Abort cancels an in-progress write, preventing the file from being -// automically created and ensuring previously written chunks are -// removed when the file is closed. -// -// It is a runtime error to call Abort when the file was not opened -// for writing. -func (file *GridFile) Abort() { - if file.mode != gfsWriting { - panic("file.Abort must be called on file opened for writing") - } - file.err = errors.New("write aborted") -} - -// Write writes the provided data to the file and returns the -// number of bytes written and an error in case something -// wrong happened. -// -// The file will internally cache the data so that all but the last -// chunk sent to the database have the size defined by SetChunkSize. -// This also means that errors may be deferred until a future call -// to Write or Close. -// -// The parameters and behavior of this function turn the file -// into an io.Writer. -func (file *GridFile) Write(data []byte) (n int, err error) { - file.assertMode(gfsWriting) - file.m.Lock() - debugf("GridFile %p: writing %d bytes", file, len(data)) - defer file.m.Unlock() - - if file.err != nil { - return 0, file.err - } - - n = len(data) - file.doc.Length += int64(n) - chunkSize := file.doc.ChunkSize - - if len(file.wbuf)+len(data) < chunkSize { - file.wbuf = append(file.wbuf, data...) - return - } - - // First, flush file.wbuf complementing with data. - if len(file.wbuf) > 0 { - missing := chunkSize - len(file.wbuf) - if missing > len(data) { - missing = len(data) - } - file.wbuf = append(file.wbuf, data[:missing]...) - data = data[missing:] - file.insertChunk(file.wbuf) - file.wbuf = file.wbuf[0:0] - } - - // Then, flush all chunks from data without copying. - for len(data) > chunkSize { - size := chunkSize - if size > len(data) { - size = len(data) - } - file.insertChunk(data[:size]) - data = data[size:] - } - - // And append the rest for a future call. - file.wbuf = append(file.wbuf, data...) - - return n, file.err -} - -func (file *GridFile) insertChunk(data []byte) { - n := file.chunk - file.chunk++ - debugf("GridFile %p: adding to checksum: %q", file, string(data)) - file.wsum.Write(data) - - for file.doc.ChunkSize*file.wpending >= 1024*1024 { - // Hold on.. we got a MB pending. - file.c.Wait() - if file.err != nil { - return - } - } - - file.wpending++ - - debugf("GridFile %p: inserting chunk %d with %d bytes", file, n, len(data)) - - // We may not own the memory of data, so rather than - // simply copying it, we'll marshal the document ahead of time. - data, err := bson.Marshal(gfsChunk{bson.NewObjectId(), file.doc.Id, n, data}) - if err != nil { - file.err = err - return - } - - go func() { - err := file.gfs.Chunks.Insert(bson.Raw{Data: data}) - file.m.Lock() - file.wpending-- - if err != nil && file.err == nil { - file.err = err - } - file.c.Broadcast() - file.m.Unlock() - }() -} - -// Seek sets the offset for the next Read or Write on file to -// offset, interpreted according to whence: 0 means relative to -// the origin of the file, 1 means relative to the current offset, -// and 2 means relative to the end. It returns the new offset and -// an error, if any. -func (file *GridFile) Seek(offset int64, whence int) (pos int64, err error) { - file.m.Lock() - debugf("GridFile %p: seeking for %s (whence=%d)", file, offset, whence) - defer file.m.Unlock() - switch whence { - case os.SEEK_SET: - case os.SEEK_CUR: - offset += file.offset - case os.SEEK_END: - offset += file.doc.Length - default: - panic("unsupported whence value") - } - if offset > file.doc.Length { - return file.offset, errors.New("seek past end of file") - } - if offset == file.doc.Length { - // If we're seeking to the end of the file, - // no need to read anything. This enables - // a client to find the size of the file using only the - // io.ReadSeeker interface with low overhead. - file.offset = offset - return file.offset, nil - } - chunk := int(offset / int64(file.doc.ChunkSize)) - if chunk+1 == file.chunk && offset >= file.offset { - file.rbuf = file.rbuf[int(offset-file.offset):] - file.offset = offset - return file.offset, nil - } - file.offset = offset - file.chunk = chunk - file.rbuf = nil - file.rbuf, err = file.getChunk() - if err == nil { - file.rbuf = file.rbuf[int(file.offset-int64(chunk)*int64(file.doc.ChunkSize)):] - } - return file.offset, err -} - -// Read reads into b the next available data from the file and -// returns the number of bytes written and an error in case -// something wrong happened. At the end of the file, n will -// be zero and err will be set to io.EOF. -// -// The parameters and behavior of this function turn the file -// into an io.Reader. -func (file *GridFile) Read(b []byte) (n int, err error) { - file.assertMode(gfsReading) - file.m.Lock() - debugf("GridFile %p: reading at offset %d into buffer of length %d", file, file.offset, len(b)) - defer file.m.Unlock() - if file.offset == file.doc.Length { - return 0, io.EOF - } - for err == nil { - i := copy(b, file.rbuf) - n += i - file.offset += int64(i) - file.rbuf = file.rbuf[i:] - if i == len(b) || file.offset == file.doc.Length { - break - } - b = b[i:] - file.rbuf, err = file.getChunk() - } - return n, err -} - -func (file *GridFile) getChunk() (data []byte, err error) { - cache := file.rcache - file.rcache = nil - if cache != nil && cache.n == file.chunk { - debugf("GridFile %p: Getting chunk %d from cache", file, file.chunk) - cache.wait.Lock() - data, err = cache.data, cache.err - } else { - debugf("GridFile %p: Fetching chunk %d", file, file.chunk) - var doc gfsChunk - err = file.gfs.Chunks.Find(bson.D{{Name: "files_id", Value: file.doc.Id}, {Name: "n", Value: file.chunk}}).One(&doc) - data = doc.Data - } - file.chunk++ - if int64(file.chunk)*int64(file.doc.ChunkSize) < file.doc.Length { - // Read the next one in background. - cache = &gfsCachedChunk{n: file.chunk} - cache.wait.Lock() - debugf("GridFile %p: Scheduling chunk %d for background caching", file, file.chunk) - // Clone the session to avoid having it closed in between. - chunks := file.gfs.Chunks - session := chunks.Database.Session.Clone() - go func(id interface{}, n int) { - defer session.Close() - chunks = chunks.With(session) - var doc gfsChunk - cache.err = chunks.Find(bson.D{{Name: "files_id", Value: id}, {Name: "n", Value: n}}).One(&doc) - cache.data = doc.Data - cache.wait.Unlock() - }(file.doc.Id, file.chunk) - file.rcache = cache - } - debugf("Returning err: %#v", err) - return -} diff --git a/vendor/github.com/globalsign/mgo/internal/json/LICENSE b/vendor/github.com/globalsign/mgo/internal/json/LICENSE deleted file mode 100644 index 74487567..00000000 --- a/vendor/github.com/globalsign/mgo/internal/json/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2012 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/globalsign/mgo/internal/json/decode.go b/vendor/github.com/globalsign/mgo/internal/json/decode.go deleted file mode 100644 index d5ca1f9a..00000000 --- a/vendor/github.com/globalsign/mgo/internal/json/decode.go +++ /dev/null @@ -1,1685 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Represents JSON data structure using native Go types: booleans, floats, -// strings, arrays, and maps. - -package json - -import ( - "bytes" - "encoding" - "encoding/base64" - "errors" - "fmt" - "reflect" - "runtime" - "strconv" - "unicode" - "unicode/utf16" - "unicode/utf8" -) - -// Unmarshal parses the JSON-encoded data and stores the result -// in the value pointed to by v. -// -// Unmarshal uses the inverse of the encodings that -// Marshal uses, allocating maps, slices, and pointers as necessary, -// with the following additional rules: -// -// To unmarshal JSON into a pointer, Unmarshal first handles the case of -// the JSON being the JSON literal null. In that case, Unmarshal sets -// the pointer to nil. Otherwise, Unmarshal unmarshals the JSON into -// the value pointed at by the pointer. If the pointer is nil, Unmarshal -// allocates a new value for it to point to. -// -// To unmarshal JSON into a struct, Unmarshal matches incoming object -// keys to the keys used by Marshal (either the struct field name or its tag), -// preferring an exact match but also accepting a case-insensitive match. -// Unmarshal will only set exported fields of the struct. -// -// To unmarshal JSON into an interface value, -// Unmarshal stores one of these in the interface value: -// -// bool, for JSON booleans -// float64, for JSON numbers -// string, for JSON strings -// []interface{}, for JSON arrays -// map[string]interface{}, for JSON objects -// nil for JSON null -// -// To unmarshal a JSON array into a slice, Unmarshal resets the slice length -// to zero and then appends each element to the slice. -// As a special case, to unmarshal an empty JSON array into a slice, -// Unmarshal replaces the slice with a new empty slice. -// -// To unmarshal a JSON array into a Go array, Unmarshal decodes -// JSON array elements into corresponding Go array elements. -// If the Go array is smaller than the JSON array, -// the additional JSON array elements are discarded. -// If the JSON array is smaller than the Go array, -// the additional Go array elements are set to zero values. -// -// To unmarshal a JSON object into a map, Unmarshal first establishes a map to -// use, If the map is nil, Unmarshal allocates a new map. Otherwise Unmarshal -// reuses the existing map, keeping existing entries. Unmarshal then stores key- -// value pairs from the JSON object into the map. The map's key type must -// either be a string or implement encoding.TextUnmarshaler. -// -// If a JSON value is not appropriate for a given target type, -// or if a JSON number overflows the target type, Unmarshal -// skips that field and completes the unmarshaling as best it can. -// If no more serious errors are encountered, Unmarshal returns -// an UnmarshalTypeError describing the earliest such error. -// -// The JSON null value unmarshals into an interface, map, pointer, or slice -// by setting that Go value to nil. Because null is often used in JSON to mean -// ``not present,'' unmarshaling a JSON null into any other Go type has no effect -// on the value and produces no error. -// -// When unmarshaling quoted strings, invalid UTF-8 or -// invalid UTF-16 surrogate pairs are not treated as an error. -// Instead, they are replaced by the Unicode replacement -// character U+FFFD. -// -func Unmarshal(data []byte, v interface{}) error { - // Check for well-formedness. - // Avoids filling out half a data structure - // before discovering a JSON syntax error. - var d decodeState - err := checkValid(data, &d.scan) - if err != nil { - return err - } - - d.init(data) - return d.unmarshal(v) -} - -// Unmarshaler is the interface implemented by types -// that can unmarshal a JSON description of themselves. -// The input can be assumed to be a valid encoding of -// a JSON value. UnmarshalJSON must copy the JSON data -// if it wishes to retain the data after returning. -type Unmarshaler interface { - UnmarshalJSON([]byte) error -} - -// An UnmarshalTypeError describes a JSON value that was -// not appropriate for a value of a specific Go type. -type UnmarshalTypeError struct { - Value string // description of JSON value - "bool", "array", "number -5" - Type reflect.Type // type of Go value it could not be assigned to - Offset int64 // error occurred after reading Offset bytes -} - -func (e *UnmarshalTypeError) Error() string { - return "json: cannot unmarshal " + e.Value + " into Go value of type " + e.Type.String() -} - -// An UnmarshalFieldError describes a JSON object key that -// led to an unexported (and therefore unwritable) struct field. -// (No longer used; kept for compatibility.) -type UnmarshalFieldError struct { - Key string - Type reflect.Type - Field reflect.StructField -} - -func (e *UnmarshalFieldError) Error() string { - return "json: cannot unmarshal object key " + strconv.Quote(e.Key) + " into unexported field " + e.Field.Name + " of type " + e.Type.String() -} - -// An InvalidUnmarshalError describes an invalid argument passed to Unmarshal. -// (The argument to Unmarshal must be a non-nil pointer.) -type InvalidUnmarshalError struct { - Type reflect.Type -} - -func (e *InvalidUnmarshalError) Error() string { - if e.Type == nil { - return "json: Unmarshal(nil)" - } - - if e.Type.Kind() != reflect.Ptr { - return "json: Unmarshal(non-pointer " + e.Type.String() + ")" - } - return "json: Unmarshal(nil " + e.Type.String() + ")" -} - -func (d *decodeState) unmarshal(v interface{}) (err error) { - defer func() { - if r := recover(); r != nil { - if _, ok := r.(runtime.Error); ok { - panic(r) - } - err = r.(error) - } - }() - - rv := reflect.ValueOf(v) - if rv.Kind() != reflect.Ptr || rv.IsNil() { - return &InvalidUnmarshalError{reflect.TypeOf(v)} - } - - d.scan.reset() - // We decode rv not rv.Elem because the Unmarshaler interface - // test must be applied at the top level of the value. - d.value(rv) - return d.savedError -} - -// A Number represents a JSON number literal. -type Number string - -// String returns the literal text of the number. -func (n Number) String() string { return string(n) } - -// Float64 returns the number as a float64. -func (n Number) Float64() (float64, error) { - return strconv.ParseFloat(string(n), 64) -} - -// Int64 returns the number as an int64. -func (n Number) Int64() (int64, error) { - return strconv.ParseInt(string(n), 10, 64) -} - -// isValidNumber reports whether s is a valid JSON number literal. -func isValidNumber(s string) bool { - // This function implements the JSON numbers grammar. - // See https://tools.ietf.org/html/rfc7159#section-6 - // and http://json.org/number.gif - - if s == "" { - return false - } - - // Optional - - if s[0] == '-' { - s = s[1:] - if s == "" { - return false - } - } - - // Digits - switch { - default: - return false - - case s[0] == '0': - s = s[1:] - - case '1' <= s[0] && s[0] <= '9': - s = s[1:] - for len(s) > 0 && '0' <= s[0] && s[0] <= '9' { - s = s[1:] - } - } - - // . followed by 1 or more digits. - if len(s) >= 2 && s[0] == '.' && '0' <= s[1] && s[1] <= '9' { - s = s[2:] - for len(s) > 0 && '0' <= s[0] && s[0] <= '9' { - s = s[1:] - } - } - - // e or E followed by an optional - or + and - // 1 or more digits. - if len(s) >= 2 && (s[0] == 'e' || s[0] == 'E') { - s = s[1:] - if s[0] == '+' || s[0] == '-' { - s = s[1:] - if s == "" { - return false - } - } - for len(s) > 0 && '0' <= s[0] && s[0] <= '9' { - s = s[1:] - } - } - - // Make sure we are at the end. - return s == "" -} - -// decodeState represents the state while decoding a JSON value. -type decodeState struct { - data []byte - off int // read offset in data - scan scanner - nextscan scanner // for calls to nextValue - savedError error - useNumber bool - ext Extension -} - -// errPhase is used for errors that should not happen unless -// there is a bug in the JSON decoder or something is editing -// the data slice while the decoder executes. -var errPhase = errors.New("JSON decoder out of sync - data changing underfoot?") - -func (d *decodeState) init(data []byte) *decodeState { - d.data = data - d.off = 0 - d.savedError = nil - return d -} - -// error aborts the decoding by panicking with err. -func (d *decodeState) error(err error) { - panic(err) -} - -// saveError saves the first err it is called with, -// for reporting at the end of the unmarshal. -func (d *decodeState) saveError(err error) { - if d.savedError == nil { - d.savedError = err - } -} - -// next cuts off and returns the next full JSON value in d.data[d.off:]. -// The next value is known to be an object or array, not a literal. -func (d *decodeState) next() []byte { - c := d.data[d.off] - item, rest, err := nextValue(d.data[d.off:], &d.nextscan) - if err != nil { - d.error(err) - } - d.off = len(d.data) - len(rest) - - // Our scanner has seen the opening brace/bracket - // and thinks we're still in the middle of the object. - // invent a closing brace/bracket to get it out. - if c == '{' { - d.scan.step(&d.scan, '}') - } else if c == '[' { - d.scan.step(&d.scan, ']') - } else { - // Was inside a function name. Get out of it. - d.scan.step(&d.scan, '(') - d.scan.step(&d.scan, ')') - } - - return item -} - -// scanWhile processes bytes in d.data[d.off:] until it -// receives a scan code not equal to op. -// It updates d.off and returns the new scan code. -func (d *decodeState) scanWhile(op int) int { - var newOp int - for { - if d.off >= len(d.data) { - newOp = d.scan.eof() - d.off = len(d.data) + 1 // mark processed EOF with len+1 - } else { - c := d.data[d.off] - d.off++ - newOp = d.scan.step(&d.scan, c) - } - if newOp != op { - break - } - } - return newOp -} - -// value decodes a JSON value from d.data[d.off:] into the value. -// it updates d.off to point past the decoded value. -func (d *decodeState) value(v reflect.Value) { - if !v.IsValid() { - _, rest, err := nextValue(d.data[d.off:], &d.nextscan) - if err != nil { - d.error(err) - } - d.off = len(d.data) - len(rest) - - // d.scan thinks we're still at the beginning of the item. - // Feed in an empty string - the shortest, simplest value - - // so that it knows we got to the end of the value. - if d.scan.redo { - // rewind. - d.scan.redo = false - d.scan.step = stateBeginValue - } - d.scan.step(&d.scan, '"') - d.scan.step(&d.scan, '"') - - n := len(d.scan.parseState) - if n > 0 && d.scan.parseState[n-1] == parseObjectKey { - // d.scan thinks we just read an object key; finish the object - d.scan.step(&d.scan, ':') - d.scan.step(&d.scan, '"') - d.scan.step(&d.scan, '"') - d.scan.step(&d.scan, '}') - } - - return - } - - switch op := d.scanWhile(scanSkipSpace); op { - default: - d.error(errPhase) - - case scanBeginArray: - d.array(v) - - case scanBeginObject: - d.object(v) - - case scanBeginLiteral: - d.literal(v) - - case scanBeginName: - d.name(v) - } -} - -type unquotedValue struct{} - -// valueQuoted is like value but decodes a -// quoted string literal or literal null into an interface value. -// If it finds anything other than a quoted string literal or null, -// valueQuoted returns unquotedValue{}. -func (d *decodeState) valueQuoted() interface{} { - switch op := d.scanWhile(scanSkipSpace); op { - default: - d.error(errPhase) - - case scanBeginArray: - d.array(reflect.Value{}) - - case scanBeginObject: - d.object(reflect.Value{}) - - case scanBeginName: - switch v := d.nameInterface().(type) { - case nil, string: - return v - } - - case scanBeginLiteral: - switch v := d.literalInterface().(type) { - case nil, string: - return v - } - } - return unquotedValue{} -} - -// indirect walks down v allocating pointers as needed, -// until it gets to a non-pointer. -// if it encounters an Unmarshaler, indirect stops and returns that. -// if decodingNull is true, indirect stops at the last pointer so it can be set to nil. -func (d *decodeState) indirect(v reflect.Value, decodingNull bool) (Unmarshaler, encoding.TextUnmarshaler, reflect.Value) { - // If v is a named type and is addressable, - // start with its address, so that if the type has pointer methods, - // we find them. - if v.Kind() != reflect.Ptr && v.Type().Name() != "" && v.CanAddr() { - v = v.Addr() - } - for { - // Load value from interface, but only if the result will be - // usefully addressable. - if v.Kind() == reflect.Interface && !v.IsNil() { - e := v.Elem() - if e.Kind() == reflect.Ptr && !e.IsNil() && (!decodingNull || e.Elem().Kind() == reflect.Ptr) { - v = e - continue - } - } - - if v.Kind() != reflect.Ptr { - break - } - - if v.Elem().Kind() != reflect.Ptr && decodingNull && v.CanSet() { - break - } - if v.IsNil() { - v.Set(reflect.New(v.Type().Elem())) - } - if v.Type().NumMethod() > 0 { - if u, ok := v.Interface().(Unmarshaler); ok { - return u, nil, v - } - if u, ok := v.Interface().(encoding.TextUnmarshaler); ok { - return nil, u, v - } - } - v = v.Elem() - } - return nil, nil, v -} - -// array consumes an array from d.data[d.off-1:], decoding into the value v. -// the first byte of the array ('[') has been read already. -func (d *decodeState) array(v reflect.Value) { - // Check for unmarshaler. - u, ut, pv := d.indirect(v, false) - if u != nil { - d.off-- - err := u.UnmarshalJSON(d.next()) - if err != nil { - d.error(err) - } - return - } - if ut != nil { - d.saveError(&UnmarshalTypeError{"array", v.Type(), int64(d.off)}) - d.off-- - d.next() - return - } - - v = pv - - // Check type of target. - switch v.Kind() { - case reflect.Interface: - if v.NumMethod() == 0 { - // Decoding into nil interface? Switch to non-reflect code. - v.Set(reflect.ValueOf(d.arrayInterface())) - return - } - // Otherwise it's invalid. - fallthrough - default: - d.saveError(&UnmarshalTypeError{"array", v.Type(), int64(d.off)}) - d.off-- - d.next() - return - case reflect.Array: - case reflect.Slice: - break - } - - i := 0 - for { - // Look ahead for ] - can only happen on first iteration. - op := d.scanWhile(scanSkipSpace) - if op == scanEndArray { - break - } - - // Back up so d.value can have the byte we just read. - d.off-- - d.scan.undo(op) - - // Get element of array, growing if necessary. - if v.Kind() == reflect.Slice { - // Grow slice if necessary - if i >= v.Cap() { - newcap := v.Cap() + v.Cap()/2 - if newcap < 4 { - newcap = 4 - } - newv := reflect.MakeSlice(v.Type(), v.Len(), newcap) - reflect.Copy(newv, v) - v.Set(newv) - } - if i >= v.Len() { - v.SetLen(i + 1) - } - } - - if i < v.Len() { - // Decode into element. - d.value(v.Index(i)) - } else { - // Ran out of fixed array: skip. - d.value(reflect.Value{}) - } - i++ - - // Next token must be , or ]. - op = d.scanWhile(scanSkipSpace) - if op == scanEndArray { - break - } - if op != scanArrayValue { - d.error(errPhase) - } - } - - if i < v.Len() { - if v.Kind() == reflect.Array { - // Array. Zero the rest. - z := reflect.Zero(v.Type().Elem()) - for ; i < v.Len(); i++ { - v.Index(i).Set(z) - } - } else { - v.SetLen(i) - } - } - if i == 0 && v.Kind() == reflect.Slice { - v.Set(reflect.MakeSlice(v.Type(), 0, 0)) - } -} - -var nullLiteral = []byte("null") -var textUnmarshalerType = reflect.TypeOf(new(encoding.TextUnmarshaler)).Elem() - -// object consumes an object from d.data[d.off-1:], decoding into the value v. -// the first byte ('{') of the object has been read already. -func (d *decodeState) object(v reflect.Value) { - // Check for unmarshaler. - u, ut, pv := d.indirect(v, false) - if d.storeKeyed(pv) { - return - } - if u != nil { - d.off-- - err := u.UnmarshalJSON(d.next()) - if err != nil { - d.error(err) - } - return - } - if ut != nil { - d.saveError(&UnmarshalTypeError{"object", v.Type(), int64(d.off)}) - d.off-- - d.next() // skip over { } in input - return - } - v = pv - - // Decoding into nil interface? Switch to non-reflect code. - if v.Kind() == reflect.Interface && v.NumMethod() == 0 { - v.Set(reflect.ValueOf(d.objectInterface())) - return - } - - // Check type of target: - // struct or - // map[string]T or map[encoding.TextUnmarshaler]T - switch v.Kind() { - case reflect.Map: - // Map key must either have string kind or be an encoding.TextUnmarshaler. - t := v.Type() - if t.Key().Kind() != reflect.String && - !reflect.PtrTo(t.Key()).Implements(textUnmarshalerType) { - d.saveError(&UnmarshalTypeError{"object", v.Type(), int64(d.off)}) - d.off-- - d.next() // skip over { } in input - return - } - if v.IsNil() { - v.Set(reflect.MakeMap(t)) - } - case reflect.Struct: - - default: - d.saveError(&UnmarshalTypeError{"object", v.Type(), int64(d.off)}) - d.off-- - d.next() // skip over { } in input - return - } - - var mapElem reflect.Value - - empty := true - for { - // Read opening " of string key or closing }. - op := d.scanWhile(scanSkipSpace) - if op == scanEndObject { - if !empty && !d.ext.trailingCommas { - d.syntaxError("beginning of object key string") - } - break - } - empty = false - if op == scanBeginName { - if !d.ext.unquotedKeys { - d.syntaxError("beginning of object key string") - } - } else if op != scanBeginLiteral { - d.error(errPhase) - } - unquotedKey := op == scanBeginName - - // Read key. - start := d.off - 1 - op = d.scanWhile(scanContinue) - item := d.data[start : d.off-1] - var key []byte - if unquotedKey { - key = item - // TODO Fix code below to quote item when necessary. - } else { - var ok bool - key, ok = unquoteBytes(item) - if !ok { - d.error(errPhase) - } - } - - // Figure out field corresponding to key. - var subv reflect.Value - destring := false // whether the value is wrapped in a string to be decoded first - - if v.Kind() == reflect.Map { - elemType := v.Type().Elem() - if !mapElem.IsValid() { - mapElem = reflect.New(elemType).Elem() - } else { - mapElem.Set(reflect.Zero(elemType)) - } - subv = mapElem - } else { - var f *field - fields := cachedTypeFields(v.Type()) - for i := range fields { - ff := &fields[i] - if bytes.Equal(ff.nameBytes, key) { - f = ff - break - } - if f == nil && ff.equalFold(ff.nameBytes, key) { - f = ff - } - } - if f != nil { - subv = v - destring = f.quoted - for _, i := range f.index { - if subv.Kind() == reflect.Ptr { - if subv.IsNil() { - subv.Set(reflect.New(subv.Type().Elem())) - } - subv = subv.Elem() - } - subv = subv.Field(i) - } - } - } - - // Read : before value. - if op == scanSkipSpace { - op = d.scanWhile(scanSkipSpace) - } - if op != scanObjectKey { - d.error(errPhase) - } - - // Read value. - if destring { - switch qv := d.valueQuoted().(type) { - case nil: - d.literalStore(nullLiteral, subv, false) - case string: - d.literalStore([]byte(qv), subv, true) - default: - d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal unquoted value into %v", subv.Type())) - } - } else { - d.value(subv) - } - - // Write value back to map; - // if using struct, subv points into struct already. - if v.Kind() == reflect.Map { - kt := v.Type().Key() - var kv reflect.Value - switch { - case kt.Kind() == reflect.String: - kv = reflect.ValueOf(key).Convert(v.Type().Key()) - case reflect.PtrTo(kt).Implements(textUnmarshalerType): - kv = reflect.New(v.Type().Key()) - d.literalStore(item, kv, true) - kv = kv.Elem() - default: - panic("json: Unexpected key type") // should never occur - } - v.SetMapIndex(kv, subv) - } - - // Next token must be , or }. - op = d.scanWhile(scanSkipSpace) - if op == scanEndObject { - break - } - if op != scanObjectValue { - d.error(errPhase) - } - } -} - -// isNull returns whether there's a null literal at the provided offset. -func (d *decodeState) isNull(off int) bool { - if off+4 >= len(d.data) || d.data[off] != 'n' || d.data[off+1] != 'u' || d.data[off+2] != 'l' || d.data[off+3] != 'l' { - return false - } - d.nextscan.reset() - for i, c := range d.data[off:] { - if i > 4 { - return false - } - switch d.nextscan.step(&d.nextscan, c) { - case scanContinue, scanBeginName: - continue - } - break - } - return true -} - -// name consumes a const or function from d.data[d.off-1:], decoding into the value v. -// the first byte of the function name has been read already. -func (d *decodeState) name(v reflect.Value) { - if d.isNull(d.off - 1) { - d.literal(v) - return - } - - // Check for unmarshaler. - u, ut, pv := d.indirect(v, false) - if d.storeKeyed(pv) { - return - } - if u != nil { - d.off-- - err := u.UnmarshalJSON(d.next()) - if err != nil { - d.error(err) - } - return - } - if ut != nil { - d.saveError(&UnmarshalTypeError{"object", v.Type(), int64(d.off)}) - d.off-- - d.next() // skip over function in input - return - } - v = pv - - // Decoding into nil interface? Switch to non-reflect code. - if v.Kind() == reflect.Interface && v.NumMethod() == 0 { - out := d.nameInterface() - if out == nil { - v.Set(reflect.Zero(v.Type())) - } else { - v.Set(reflect.ValueOf(out)) - } - return - } - - nameStart := d.off - 1 - - op := d.scanWhile(scanContinue) - - name := d.data[nameStart : d.off-1] - if op != scanParam { - // Back up so the byte just read is consumed next. - d.off-- - d.scan.undo(op) - if l, ok := d.convertLiteral(name); ok { - d.storeValue(v, l) - return - } - d.error(&SyntaxError{fmt.Sprintf("json: unknown constant %q", name), int64(d.off)}) - } - - funcName := string(name) - funcData := d.ext.funcs[funcName] - if funcData.key == "" { - d.error(fmt.Errorf("json: unknown function %q", funcName)) - } - - // Check type of target: - // struct or - // map[string]T or map[encoding.TextUnmarshaler]T - switch v.Kind() { - case reflect.Map: - // Map key must either have string kind or be an encoding.TextUnmarshaler. - t := v.Type() - if t.Key().Kind() != reflect.String && - !reflect.PtrTo(t.Key()).Implements(textUnmarshalerType) { - d.saveError(&UnmarshalTypeError{"object", v.Type(), int64(d.off)}) - d.off-- - d.next() // skip over { } in input - return - } - if v.IsNil() { - v.Set(reflect.MakeMap(t)) - } - case reflect.Struct: - - default: - d.saveError(&UnmarshalTypeError{"object", v.Type(), int64(d.off)}) - d.off-- - d.next() // skip over { } in input - return - } - - // TODO Fix case of func field as map. - //topv := v - - // Figure out field corresponding to function. - key := []byte(funcData.key) - if v.Kind() == reflect.Map { - elemType := v.Type().Elem() - v = reflect.New(elemType).Elem() - } else { - var f *field - fields := cachedTypeFields(v.Type()) - for i := range fields { - ff := &fields[i] - if bytes.Equal(ff.nameBytes, key) { - f = ff - break - } - if f == nil && ff.equalFold(ff.nameBytes, key) { - f = ff - } - } - if f != nil { - for _, i := range f.index { - if v.Kind() == reflect.Ptr { - if v.IsNil() { - v.Set(reflect.New(v.Type().Elem())) - } - v = v.Elem() - } - v = v.Field(i) - } - if v.Kind() == reflect.Ptr { - if v.IsNil() { - v.Set(reflect.New(v.Type().Elem())) - } - v = v.Elem() - } - } - } - - // Check for unmarshaler on func field itself. - u, _, _ = d.indirect(v, false) - if u != nil { - d.off = nameStart - err := u.UnmarshalJSON(d.next()) - if err != nil { - d.error(err) - } - return - } - - var mapElem reflect.Value - - // Parse function arguments. - for i := 0; ; i++ { - // closing ) - can only happen on first iteration. - op := d.scanWhile(scanSkipSpace) - if op == scanEndParams { - break - } - - // Back up so d.value can have the byte we just read. - d.off-- - d.scan.undo(op) - - if i >= len(funcData.args) { - d.error(fmt.Errorf("json: too many arguments for function %s", funcName)) - } - key := []byte(funcData.args[i]) - - // Figure out field corresponding to key. - var subv reflect.Value - destring := false // whether the value is wrapped in a string to be decoded first - - if v.Kind() == reflect.Map { - elemType := v.Type().Elem() - if !mapElem.IsValid() { - mapElem = reflect.New(elemType).Elem() - } else { - mapElem.Set(reflect.Zero(elemType)) - } - subv = mapElem - } else { - var f *field - fields := cachedTypeFields(v.Type()) - for i := range fields { - ff := &fields[i] - if bytes.Equal(ff.nameBytes, key) { - f = ff - break - } - if f == nil && ff.equalFold(ff.nameBytes, key) { - f = ff - } - } - if f != nil { - subv = v - destring = f.quoted - for _, i := range f.index { - if subv.Kind() == reflect.Ptr { - if subv.IsNil() { - subv.Set(reflect.New(subv.Type().Elem())) - } - subv = subv.Elem() - } - subv = subv.Field(i) - } - } - } - - // Read value. - if destring { - switch qv := d.valueQuoted().(type) { - case nil: - d.literalStore(nullLiteral, subv, false) - case string: - d.literalStore([]byte(qv), subv, true) - default: - d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal unquoted value into %v", subv.Type())) - } - } else { - d.value(subv) - } - - // Write value back to map; - // if using struct, subv points into struct already. - if v.Kind() == reflect.Map { - kt := v.Type().Key() - var kv reflect.Value - switch { - case kt.Kind() == reflect.String: - kv = reflect.ValueOf(key).Convert(v.Type().Key()) - case reflect.PtrTo(kt).Implements(textUnmarshalerType): - kv = reflect.New(v.Type().Key()) - d.literalStore(key, kv, true) - kv = kv.Elem() - default: - panic("json: Unexpected key type") // should never occur - } - v.SetMapIndex(kv, subv) - } - - // Next token must be , or ). - op = d.scanWhile(scanSkipSpace) - if op == scanEndParams { - break - } - if op != scanParam { - d.error(errPhase) - } - } -} - -// keyed attempts to decode an object or function using a keyed doc extension, -// and returns the value and true on success, or nil and false otherwise. -func (d *decodeState) keyed() (interface{}, bool) { - if len(d.ext.keyed) == 0 { - return nil, false - } - - unquote := false - - // Look-ahead first key to check for a keyed document extension. - d.nextscan.reset() - var start, end int - for i, c := range d.data[d.off-1:] { - switch op := d.nextscan.step(&d.nextscan, c); op { - case scanSkipSpace, scanContinue, scanBeginObject: - continue - case scanBeginLiteral, scanBeginName: - unquote = op == scanBeginLiteral - start = i - continue - } - end = i - break - } - - name := bytes.Trim(d.data[d.off-1+start:d.off-1+end], " \n\t") - - var key []byte - var ok bool - if unquote { - key, ok = unquoteBytes(name) - if !ok { - d.error(errPhase) - } - } else { - funcData, ok := d.ext.funcs[string(name)] - if !ok { - return nil, false - } - key = []byte(funcData.key) - } - - decode, ok := d.ext.keyed[string(key)] - if !ok { - return nil, false - } - - d.off-- - out, err := decode(d.next()) - if err != nil { - d.error(err) - } - return out, true -} - -func (d *decodeState) storeKeyed(v reflect.Value) bool { - keyed, ok := d.keyed() - if !ok { - return false - } - d.storeValue(v, keyed) - return true -} - -var ( - trueBytes = []byte("true") - falseBytes = []byte("false") - nullBytes = []byte("null") -) - -func (d *decodeState) storeValue(v reflect.Value, from interface{}) { - switch from { - case nil: - d.literalStore(nullBytes, v, false) - return - case true: - d.literalStore(trueBytes, v, false) - return - case false: - d.literalStore(falseBytes, v, false) - return - } - fromv := reflect.ValueOf(from) - for fromv.Kind() == reflect.Ptr && !fromv.IsNil() { - fromv = fromv.Elem() - } - fromt := fromv.Type() - for v.Kind() == reflect.Ptr && !v.IsNil() { - v = v.Elem() - } - vt := v.Type() - if fromt.AssignableTo(vt) { - v.Set(fromv) - } else if fromt.ConvertibleTo(vt) { - v.Set(fromv.Convert(vt)) - } else { - d.saveError(&UnmarshalTypeError{"object", v.Type(), int64(d.off)}) - } -} - -func (d *decodeState) convertLiteral(name []byte) (interface{}, bool) { - if len(name) == 0 { - return nil, false - } - switch name[0] { - case 't': - if bytes.Equal(name, trueBytes) { - return true, true - } - case 'f': - if bytes.Equal(name, falseBytes) { - return false, true - } - case 'n': - if bytes.Equal(name, nullBytes) { - return nil, true - } - } - if l, ok := d.ext.consts[string(name)]; ok { - return l, true - } - return nil, false -} - -// literal consumes a literal from d.data[d.off-1:], decoding into the value v. -// The first byte of the literal has been read already -// (that's how the caller knows it's a literal). -func (d *decodeState) literal(v reflect.Value) { - // All bytes inside literal return scanContinue op code. - start := d.off - 1 - op := d.scanWhile(scanContinue) - - // Scan read one byte too far; back up. - d.off-- - d.scan.undo(op) - - d.literalStore(d.data[start:d.off], v, false) -} - -// convertNumber converts the number literal s to a float64 or a Number -// depending on the setting of d.useNumber. -func (d *decodeState) convertNumber(s string) (interface{}, error) { - if d.useNumber { - return Number(s), nil - } - f, err := strconv.ParseFloat(s, 64) - if err != nil { - return nil, &UnmarshalTypeError{"number " + s, reflect.TypeOf(0.0), int64(d.off)} - } - return f, nil -} - -var numberType = reflect.TypeOf(Number("")) - -// literalStore decodes a literal stored in item into v. -// -// fromQuoted indicates whether this literal came from unwrapping a -// string from the ",string" struct tag option. this is used only to -// produce more helpful error messages. -func (d *decodeState) literalStore(item []byte, v reflect.Value, fromQuoted bool) { - // Check for unmarshaler. - if len(item) == 0 { - //Empty string given - d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) - return - } - wantptr := item[0] == 'n' // null - u, ut, pv := d.indirect(v, wantptr) - if u != nil { - err := u.UnmarshalJSON(item) - if err != nil { - d.error(err) - } - return - } - if ut != nil { - if item[0] != '"' { - if fromQuoted { - d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) - } else { - d.saveError(&UnmarshalTypeError{"string", v.Type(), int64(d.off)}) - } - return - } - s, ok := unquoteBytes(item) - if !ok { - if fromQuoted { - d.error(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) - } else { - d.error(errPhase) - } - } - err := ut.UnmarshalText(s) - if err != nil { - d.error(err) - } - return - } - - v = pv - - switch c := item[0]; c { - case 'n': // null - switch v.Kind() { - case reflect.Interface, reflect.Ptr, reflect.Map, reflect.Slice: - v.Set(reflect.Zero(v.Type())) - // otherwise, ignore null for primitives/string - } - case 't', 'f': // true, false - value := c == 't' - switch v.Kind() { - default: - if fromQuoted { - d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) - } else { - d.saveError(&UnmarshalTypeError{"bool", v.Type(), int64(d.off)}) - } - case reflect.Bool: - v.SetBool(value) - case reflect.Interface: - if v.NumMethod() == 0 { - v.Set(reflect.ValueOf(value)) - } else { - d.saveError(&UnmarshalTypeError{"bool", v.Type(), int64(d.off)}) - } - } - - case '"': // string - s, ok := unquoteBytes(item) - if !ok { - if fromQuoted { - d.error(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) - } else { - d.error(errPhase) - } - } - switch v.Kind() { - default: - d.saveError(&UnmarshalTypeError{"string", v.Type(), int64(d.off)}) - case reflect.Slice: - if v.Type().Elem().Kind() != reflect.Uint8 { - d.saveError(&UnmarshalTypeError{"string", v.Type(), int64(d.off)}) - break - } - b := make([]byte, base64.StdEncoding.DecodedLen(len(s))) - n, err := base64.StdEncoding.Decode(b, s) - if err != nil { - d.saveError(err) - break - } - v.SetBytes(b[:n]) - case reflect.String: - v.SetString(string(s)) - case reflect.Interface: - if v.NumMethod() == 0 { - v.Set(reflect.ValueOf(string(s))) - } else { - d.saveError(&UnmarshalTypeError{"string", v.Type(), int64(d.off)}) - } - } - - default: // number - if c != '-' && (c < '0' || c > '9') { - if fromQuoted { - d.error(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) - } else { - d.error(errPhase) - } - } - s := string(item) - switch v.Kind() { - default: - if v.Kind() == reflect.String && v.Type() == numberType { - v.SetString(s) - if !isValidNumber(s) { - d.error(fmt.Errorf("json: invalid number literal, trying to unmarshal %q into Number", item)) - } - break - } - if fromQuoted { - d.error(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) - } else { - d.error(&UnmarshalTypeError{"number", v.Type(), int64(d.off)}) - } - case reflect.Interface: - n, err := d.convertNumber(s) - if err != nil { - d.saveError(err) - break - } - if v.NumMethod() != 0 { - d.saveError(&UnmarshalTypeError{"number", v.Type(), int64(d.off)}) - break - } - v.Set(reflect.ValueOf(n)) - - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - n, err := strconv.ParseInt(s, 10, 64) - if err != nil || v.OverflowInt(n) { - d.saveError(&UnmarshalTypeError{"number " + s, v.Type(), int64(d.off)}) - break - } - v.SetInt(n) - - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - n, err := strconv.ParseUint(s, 10, 64) - if err != nil || v.OverflowUint(n) { - d.saveError(&UnmarshalTypeError{"number " + s, v.Type(), int64(d.off)}) - break - } - v.SetUint(n) - - case reflect.Float32, reflect.Float64: - n, err := strconv.ParseFloat(s, v.Type().Bits()) - if err != nil || v.OverflowFloat(n) { - d.saveError(&UnmarshalTypeError{"number " + s, v.Type(), int64(d.off)}) - break - } - v.SetFloat(n) - } - } -} - -// The xxxInterface routines build up a value to be stored -// in an empty interface. They are not strictly necessary, -// but they avoid the weight of reflection in this common case. - -// valueInterface is like value but returns interface{} -func (d *decodeState) valueInterface() interface{} { - switch d.scanWhile(scanSkipSpace) { - default: - d.error(errPhase) - panic("unreachable") - case scanBeginArray: - return d.arrayInterface() - case scanBeginObject: - return d.objectInterface() - case scanBeginLiteral: - return d.literalInterface() - case scanBeginName: - return d.nameInterface() - } -} - -func (d *decodeState) syntaxError(expected string) { - msg := fmt.Sprintf("invalid character '%c' looking for %s", d.data[d.off-1], expected) - d.error(&SyntaxError{msg, int64(d.off)}) -} - -// arrayInterface is like array but returns []interface{}. -func (d *decodeState) arrayInterface() []interface{} { - var v = make([]interface{}, 0) - for { - // Look ahead for ] - can only happen on first iteration. - op := d.scanWhile(scanSkipSpace) - if op == scanEndArray { - if len(v) > 0 && !d.ext.trailingCommas { - d.syntaxError("beginning of value") - } - break - } - - // Back up so d.value can have the byte we just read. - d.off-- - d.scan.undo(op) - - v = append(v, d.valueInterface()) - - // Next token must be , or ]. - op = d.scanWhile(scanSkipSpace) - if op == scanEndArray { - break - } - if op != scanArrayValue { - d.error(errPhase) - } - } - return v -} - -// objectInterface is like object but returns map[string]interface{}. -func (d *decodeState) objectInterface() interface{} { - v, ok := d.keyed() - if ok { - return v - } - - m := make(map[string]interface{}) - for { - // Read opening " of string key or closing }. - op := d.scanWhile(scanSkipSpace) - if op == scanEndObject { - if len(m) > 0 && !d.ext.trailingCommas { - d.syntaxError("beginning of object key string") - } - break - } - if op == scanBeginName { - if !d.ext.unquotedKeys { - d.syntaxError("beginning of object key string") - } - } else if op != scanBeginLiteral { - d.error(errPhase) - } - unquotedKey := op == scanBeginName - - // Read string key. - start := d.off - 1 - op = d.scanWhile(scanContinue) - item := d.data[start : d.off-1] - var key string - if unquotedKey { - key = string(item) - } else { - var ok bool - key, ok = unquote(item) - if !ok { - d.error(errPhase) - } - } - - // Read : before value. - if op == scanSkipSpace { - op = d.scanWhile(scanSkipSpace) - } - if op != scanObjectKey { - d.error(errPhase) - } - - // Read value. - m[key] = d.valueInterface() - - // Next token must be , or }. - op = d.scanWhile(scanSkipSpace) - if op == scanEndObject { - break - } - if op != scanObjectValue { - d.error(errPhase) - } - } - return m -} - -// literalInterface is like literal but returns an interface value. -func (d *decodeState) literalInterface() interface{} { - // All bytes inside literal return scanContinue op code. - start := d.off - 1 - op := d.scanWhile(scanContinue) - - // Scan read one byte too far; back up. - d.off-- - d.scan.undo(op) - item := d.data[start:d.off] - - switch c := item[0]; c { - case 'n': // null - return nil - - case 't', 'f': // true, false - return c == 't' - - case '"': // string - s, ok := unquote(item) - if !ok { - d.error(errPhase) - } - return s - - default: // number - if c != '-' && (c < '0' || c > '9') { - d.error(errPhase) - } - n, err := d.convertNumber(string(item)) - if err != nil { - d.saveError(err) - } - return n - } -} - -// nameInterface is like function but returns map[string]interface{}. -func (d *decodeState) nameInterface() interface{} { - v, ok := d.keyed() - if ok { - return v - } - - nameStart := d.off - 1 - - op := d.scanWhile(scanContinue) - - name := d.data[nameStart : d.off-1] - if op != scanParam { - // Back up so the byte just read is consumed next. - d.off-- - d.scan.undo(op) - if l, ok := d.convertLiteral(name); ok { - return l - } - d.error(&SyntaxError{fmt.Sprintf("json: unknown constant %q", name), int64(d.off)}) - } - - funcName := string(name) - funcData := d.ext.funcs[funcName] - if funcData.key == "" { - d.error(fmt.Errorf("json: unknown function %q", funcName)) - } - - m := make(map[string]interface{}) - for i := 0; ; i++ { - // Look ahead for ) - can only happen on first iteration. - op := d.scanWhile(scanSkipSpace) - if op == scanEndParams { - break - } - - // Back up so d.value can have the byte we just read. - d.off-- - d.scan.undo(op) - - if i >= len(funcData.args) { - d.error(fmt.Errorf("json: too many arguments for function %s", funcName)) - } - m[funcData.args[i]] = d.valueInterface() - - // Next token must be , or ). - op = d.scanWhile(scanSkipSpace) - if op == scanEndParams { - break - } - if op != scanParam { - d.error(errPhase) - } - } - return map[string]interface{}{funcData.key: m} -} - -// getu4 decodes \uXXXX from the beginning of s, returning the hex value, -// or it returns -1. -func getu4(s []byte) rune { - if len(s) < 6 || s[0] != '\\' || s[1] != 'u' { - return -1 - } - r, err := strconv.ParseUint(string(s[2:6]), 16, 64) - if err != nil { - return -1 - } - return rune(r) -} - -// unquote converts a quoted JSON string literal s into an actual string t. -// The rules are different than for Go, so cannot use strconv.Unquote. -func unquote(s []byte) (t string, ok bool) { - s, ok = unquoteBytes(s) - t = string(s) - return -} - -func unquoteBytes(s []byte) (t []byte, ok bool) { - if len(s) < 2 || s[0] != '"' || s[len(s)-1] != '"' { - return - } - s = s[1 : len(s)-1] - - // Check for unusual characters. If there are none, - // then no unquoting is needed, so return a slice of the - // original bytes. - r := 0 - for r < len(s) { - c := s[r] - if c == '\\' || c == '"' || c < ' ' { - break - } - if c < utf8.RuneSelf { - r++ - continue - } - rr, size := utf8.DecodeRune(s[r:]) - if rr == utf8.RuneError && size == 1 { - break - } - r += size - } - if r == len(s) { - return s, true - } - - b := make([]byte, len(s)+2*utf8.UTFMax) - w := copy(b, s[0:r]) - for r < len(s) { - // Out of room? Can only happen if s is full of - // malformed UTF-8 and we're replacing each - // byte with RuneError. - if w >= len(b)-2*utf8.UTFMax { - nb := make([]byte, (len(b)+utf8.UTFMax)*2) - copy(nb, b[0:w]) - b = nb - } - switch c := s[r]; { - case c == '\\': - r++ - if r >= len(s) { - return - } - switch s[r] { - default: - return - case '"', '\\', '/', '\'': - b[w] = s[r] - r++ - w++ - case 'b': - b[w] = '\b' - r++ - w++ - case 'f': - b[w] = '\f' - r++ - w++ - case 'n': - b[w] = '\n' - r++ - w++ - case 'r': - b[w] = '\r' - r++ - w++ - case 't': - b[w] = '\t' - r++ - w++ - case 'u': - r-- - rr := getu4(s[r:]) - if rr < 0 { - return - } - r += 6 - if utf16.IsSurrogate(rr) { - rr1 := getu4(s[r:]) - if dec := utf16.DecodeRune(rr, rr1); dec != unicode.ReplacementChar { - // A valid pair; consume. - r += 6 - w += utf8.EncodeRune(b[w:], dec) - break - } - // Invalid surrogate; fall back to replacement rune. - rr = unicode.ReplacementChar - } - w += utf8.EncodeRune(b[w:], rr) - } - - // Quote, control characters are invalid. - case c == '"', c < ' ': - return - - // ASCII - case c < utf8.RuneSelf: - b[w] = c - r++ - w++ - - // Coerce to well-formed UTF-8. - default: - rr, size := utf8.DecodeRune(s[r:]) - r += size - w += utf8.EncodeRune(b[w:], rr) - } - } - return b[0:w], true -} diff --git a/vendor/github.com/globalsign/mgo/internal/json/encode.go b/vendor/github.com/globalsign/mgo/internal/json/encode.go deleted file mode 100644 index e4b8f864..00000000 --- a/vendor/github.com/globalsign/mgo/internal/json/encode.go +++ /dev/null @@ -1,1260 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package json implements encoding and decoding of JSON as defined in -// RFC 4627. The mapping between JSON and Go values is described -// in the documentation for the Marshal and Unmarshal functions. -// -// See "JSON and Go" for an introduction to this package: -// https://golang.org/doc/articles/json_and_go.html -package json - -import ( - "bytes" - "encoding" - "encoding/base64" - "fmt" - "math" - "reflect" - "runtime" - "sort" - "strconv" - "strings" - "sync" - "unicode" - "unicode/utf8" -) - -// Marshal returns the JSON encoding of v. -// -// Marshal traverses the value v recursively. -// If an encountered value implements the Marshaler interface -// and is not a nil pointer, Marshal calls its MarshalJSON method -// to produce JSON. If no MarshalJSON method is present but the -// value implements encoding.TextMarshaler instead, Marshal calls -// its MarshalText method. -// The nil pointer exception is not strictly necessary -// but mimics a similar, necessary exception in the behavior of -// UnmarshalJSON. -// -// Otherwise, Marshal uses the following type-dependent default encodings: -// -// Boolean values encode as JSON booleans. -// -// Floating point, integer, and Number values encode as JSON numbers. -// -// String values encode as JSON strings coerced to valid UTF-8, -// replacing invalid bytes with the Unicode replacement rune. -// The angle brackets "<" and ">" are escaped to "\u003c" and "\u003e" -// to keep some browsers from misinterpreting JSON output as HTML. -// Ampersand "&" is also escaped to "\u0026" for the same reason. -// This escaping can be disabled using an Encoder with DisableHTMLEscaping. -// -// Array and slice values encode as JSON arrays, except that -// []byte encodes as a base64-encoded string, and a nil slice -// encodes as the null JSON value. -// -// Struct values encode as JSON objects. Each exported struct field -// becomes a member of the object unless -// - the field's tag is "-", or -// - the field is empty and its tag specifies the "omitempty" option. -// The empty values are false, 0, any -// nil pointer or interface value, and any array, slice, map, or string of -// length zero. The object's default key string is the struct field name -// but can be specified in the struct field's tag value. The "json" key in -// the struct field's tag value is the key name, followed by an optional comma -// and options. Examples: -// -// // Field is ignored by this package. -// Field int `json:"-"` -// -// // Field appears in JSON as key "myName". -// Field int `json:"myName"` -// -// // Field appears in JSON as key "myName" and -// // the field is omitted from the object if its value is empty, -// // as defined above. -// Field int `json:"myName,omitempty"` -// -// // Field appears in JSON as key "Field" (the default), but -// // the field is skipped if empty. -// // Note the leading comma. -// Field int `json:",omitempty"` -// -// The "string" option signals that a field is stored as JSON inside a -// JSON-encoded string. It applies only to fields of string, floating point, -// integer, or boolean types. This extra level of encoding is sometimes used -// when communicating with JavaScript programs: -// -// Int64String int64 `json:",string"` -// -// The key name will be used if it's a non-empty string consisting of -// only Unicode letters, digits, dollar signs, percent signs, hyphens, -// underscores and slashes. -// -// Anonymous struct fields are usually marshaled as if their inner exported fields -// were fields in the outer struct, subject to the usual Go visibility rules amended -// as described in the next paragraph. -// An anonymous struct field with a name given in its JSON tag is treated as -// having that name, rather than being anonymous. -// An anonymous struct field of interface type is treated the same as having -// that type as its name, rather than being anonymous. -// -// The Go visibility rules for struct fields are amended for JSON when -// deciding which field to marshal or unmarshal. If there are -// multiple fields at the same level, and that level is the least -// nested (and would therefore be the nesting level selected by the -// usual Go rules), the following extra rules apply: -// -// 1) Of those fields, if any are JSON-tagged, only tagged fields are considered, -// even if there are multiple untagged fields that would otherwise conflict. -// 2) If there is exactly one field (tagged or not according to the first rule), that is selected. -// 3) Otherwise there are multiple fields, and all are ignored; no error occurs. -// -// Handling of anonymous struct fields is new in Go 1.1. -// Prior to Go 1.1, anonymous struct fields were ignored. To force ignoring of -// an anonymous struct field in both current and earlier versions, give the field -// a JSON tag of "-". -// -// Map values encode as JSON objects. The map's key type must either be a string -// or implement encoding.TextMarshaler. The map keys are used as JSON object -// keys, subject to the UTF-8 coercion described for string values above. -// -// Pointer values encode as the value pointed to. -// A nil pointer encodes as the null JSON value. -// -// Interface values encode as the value contained in the interface. -// A nil interface value encodes as the null JSON value. -// -// Channel, complex, and function values cannot be encoded in JSON. -// Attempting to encode such a value causes Marshal to return -// an UnsupportedTypeError. -// -// JSON cannot represent cyclic data structures and Marshal does not -// handle them. Passing cyclic structures to Marshal will result in -// an infinite recursion. -// -func Marshal(v interface{}) ([]byte, error) { - e := &encodeState{} - err := e.marshal(v, encOpts{escapeHTML: true}) - if err != nil { - return nil, err - } - return e.Bytes(), nil -} - -// MarshalIndent is like Marshal but applies Indent to format the output. -func MarshalIndent(v interface{}, prefix, indent string) ([]byte, error) { - b, err := Marshal(v) - if err != nil { - return nil, err - } - var buf bytes.Buffer - err = Indent(&buf, b, prefix, indent) - if err != nil { - return nil, err - } - return buf.Bytes(), nil -} - -// HTMLEscape appends to dst the JSON-encoded src with <, >, &, U+2028 and U+2029 -// characters inside string literals changed to \u003c, \u003e, \u0026, \u2028, \u2029 -// so that the JSON will be safe to embed inside HTML