diff --git a/.flake8 b/.flake8
deleted file mode 100644
index 87afe5469f..0000000000
--- a/.flake8
+++ /dev/null
@@ -1,3 +0,0 @@
-[flake8]
-max-line-length = 100
-extend-ignore = E203, W503
diff --git a/.github/ISSUE_TEMPLATE.md b/.github/ISSUE_TEMPLATE.md
deleted file mode 100644
index c1e2bb89eb..0000000000
--- a/.github/ISSUE_TEMPLATE.md
+++ /dev/null
@@ -1,3 +0,0 @@
-Please run `microk8s inspect` and attach the generated tarball to this issue.
-
-We appreciate your feedback. Thank you for using microk8s.
diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md
new file mode 100644
index 0000000000..bcd72749d6
--- /dev/null
+++ b/.github/ISSUE_TEMPLATE/bug_report.md
@@ -0,0 +1,32 @@
+---
+name: Bug Report
+about: Something is not working
+---
+
+
+
+#### Summary
+
+
+#### What Should Happen Instead?
+
+
+#### Reproduction Steps
+
+
+1. ...
+2. ...
+
+#### Introspection Report
+
+
+#### Can you suggest a fix?
+
+
+#### Are you interested in contributing with a fix?
+
+
+
diff --git a/.github/ISSUE_TEMPLATE/feature_request.md b/.github/ISSUE_TEMPLATE/feature_request.md
new file mode 100644
index 0000000000..63591d2deb
--- /dev/null
+++ b/.github/ISSUE_TEMPLATE/feature_request.md
@@ -0,0 +1,21 @@
+---
+name: Feature Request
+about: Suggest a new feature
+---
+
+
+
+#### Summary
+
+
+#### Why is this important?
+
+
+#### Are you interested in contributing to this feature?
+
+
+
+
diff --git a/.github/ISSUE_TEMPLATE/question.yml b/.github/ISSUE_TEMPLATE/question.yml
new file mode 100644
index 0000000000..b28f51b80b
--- /dev/null
+++ b/.github/ISSUE_TEMPLATE/question.yml
@@ -0,0 +1,4 @@
+contact_links:
+ - name: Ask a question
+ url: https://kubernetes.slack.com/archives/CAUNWQ85V
+ about: "For discussions and/or other questions related to MicroK8s, please use the #microk8s channel on the Kubernetes Slack"
diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md
index f5c088fb78..2f7b72a449 100644
--- a/.github/PULL_REQUEST_TEMPLATE.md
+++ b/.github/PULL_REQUEST_TEMPLATE.md
@@ -1,7 +1,33 @@
-### Thank you for making MicroK8s better
+
-Please reference the issue this PR is fixing, or provide a description of the problem addressed.
+#### Summary
+
+
+#### Changes
+
+
+#### Testing
+
+
+#### Possible Regressions
+
+
+#### Checklist
+
+
+* [ ] Read the [contributions](https://github.com/canonical/microk8s/blob/master/CONTRIBUTING.md) page.
* [ ] Submitted the [CLA form](https://ubuntu.com/legal/contributors/agreement), if you are a first time contributor.
+* [ ] The introduced changes are covered by unit and/or integration tests.
+
+#### Notes
+
diff --git a/.github/workflows/backport.yml b/.github/workflows/backport.yml
new file mode 100644
index 0000000000..15813cdbe5
--- /dev/null
+++ b/.github/workflows/backport.yml
@@ -0,0 +1,34 @@
+name: Backport merged pull request
+on:
+ pull_request_target:
+ types: [closed]
+ issue_comment:
+ types: [created]
+permissions:
+ contents: write # so it can comment
+ pull-requests: write # so it can create pull requests
+jobs:
+ backport:
+ name: Backport pull request
+ runs-on: ubuntu-latest
+
+ # Only run when pull request is merged
+ # or when a comment containing `/backport` is created by someone other than the
+ # https://github.com/backport-action bot user (user id: 97796249). Note that if you use your
+ # own PAT as `github_token`, that you should replace this id with yours.
+ if: >
+ (
+ github.event_name == 'pull_request_target' &&
+ github.event.pull_request.merged
+ ) || (
+ github.event_name == 'issue_comment' &&
+ github.event.issue.pull_request &&
+ github.event.comment.user.id != 97796249 &&
+ contains(github.event.comment.body, '/backport')
+ )
+ steps:
+ - uses: actions/checkout@v4
+ with:
+ fetch-depth: 0
+ - name: Create backport pull requests
+ uses: korthout/backport-action@v2
diff --git a/.github/workflows/build-installer.yml b/.github/workflows/build-installer.yml
index 8d211e51ce..c3211a7f74 100644
--- a/.github/workflows/build-installer.yml
+++ b/.github/workflows/build-installer.yml
@@ -16,43 +16,43 @@ jobs:
working-directory: ${{ github.workspace }}/installer/windows
steps:
- name: Checkout
- uses: actions/checkout@v2
+ uses: actions/checkout@v4
- name: Set up Python 3.8
- uses: actions/setup-python@v2.2.1
+ uses: actions/setup-python@v5.3.0
with:
python-version: 3.8
- name: Install Python requirements
run: python -m pip install -r ../requirements.txt
- name: Build exe
working-directory: ${{ github.workspace }}/installer
- run: pyinstaller.exe --onefile ./microk8s.spec
+ run: pyinstaller.exe ./microk8s.spec
- name: Move exe to installer build directory
working-directory: ${{ github.workspace }}/installer
run: move microk8s.exe ./windows/microk8s.exe
- name: Download EnVar plugin for NSIS
- uses: carlosperate/download-file-action@v1.0.3
+ uses: carlosperate/download-file-action@v2.0.2
with:
- file-url: https://nsis.sourceforge.io/mediawiki/images/7/7f/EnVar_plugin.zip
+ file-url: https://github.com/GsNSIS/EnVar/releases/download/v0.3.1/EnVar-Plugin.zip
file-name: envar_plugin.zip
location: ${{ github.workspace }}
- name: Extract EnVar plugin
run: 7z x -o"C:/Program Files (x86)/NSIS" "${{ github.workspace }}/envar_plugin.zip"
- name: Download Multipass installer
- uses: carlosperate/download-file-action@v1.0.3
+ uses: carlosperate/download-file-action@v2.0.2
with:
- file-url: https://github.com/canonical/multipass/releases/download/v1.5.0/multipass-1.5.0+win-win64.exe
+ file-url: https://github.com/canonical/multipass/releases/download/v1.12.2/multipass-1.12.2+win-win64.exe
file-name: multipass.exe
location: ${{ github.workspace }}/installer/windows
- name: Download kubectl
- uses: carlosperate/download-file-action@v1.0.3
+ uses: carlosperate/download-file-action@v2.0.2
with:
- file-url: https://storage.googleapis.com/kubernetes-release/release/v1.20.0/bin/windows/amd64/kubectl.exe
+ file-url: https://storage.googleapis.com/kubernetes-release/release/v1.28.3/bin/windows/amd64/kubectl.exe
file-name: kubectl.exe
location: ${{ github.workspace }}/installer/windows
- name: Create installer
run: makensis.exe ${{ github.workspace }}/installer/windows/microk8s.nsi
- name: Upload installer
- uses: actions/upload-artifact@v2
+ uses: actions/upload-artifact@v3
with:
name: Windows installer
path: ${{ github.workspace }}/installer/windows/microk8s-installer.exe
diff --git a/.github/workflows/build-snap.yml b/.github/workflows/build-snap.yml
index 8ada996534..3894bd39fa 100644
--- a/.github/workflows/build-snap.yml
+++ b/.github/workflows/build-snap.yml
@@ -1,9 +1,6 @@
-name: Build MicroK8s snap on PR and push to master
+name: Build and test MicroK8s snap
on:
- push:
- branches:
- - master
pull_request:
branches:
- master
@@ -15,45 +12,224 @@ jobs:
steps:
- name: Checking out repo
- uses: actions/checkout@v2
+ uses: actions/checkout@v4
- name: Install lxd
run: |
sudo lxd init --auto
sudo usermod --append --groups lxd $USER
sg lxd -c 'lxc version'
- name: Install snapcraft
- run: sudo snap install snapcraft --classic
+ run: |
+ sudo snap install snapcraft --classic
+ - name: Install snapd from candidate
+ run: |
+ # TODO(neoaggelos): revert this after latest/beta is working again
+ sudo snap refresh snapd --channel=latest/stable
- name: Build snap
run: |
sg lxd -c 'snapcraft --use-lxd'
sudo mv microk8s*.snap microk8s.snap
- name: Uploading snap
- uses: actions/upload-artifact@v1
+ uses: actions/upload-artifact@v3
with:
name: microk8s.snap
path: microk8s.snap
- - name: Running upgrade path test
+
+ test-upgrade:
+ name: Upgrade path test
+ runs-on: ubuntu-20.04
+ needs: build
+
+ steps:
+ - name: Checking out repo
+ uses: actions/checkout@v4
+ - name: Install test dependencies
run: |
set -x
sudo apt-get install python3-setuptools
sudo pip3 install --upgrade pip
- sudo pip3 install -U pytest sh
- sudo -E UPGRADE_MICROK8S_FROM=latest/edge UPGRADE_MICROK8S_TO=`pwd`/`ls microk8s*.snap` pytest -s ./tests/test-upgrade-path.py
- sudo snap remove microk8s --purge
- - name: Running addons tests
+ sudo pip3 install -U pytest sh psutil
+ sudo apt-get -y install open-iscsi
+ sudo systemctl enable iscsid
+ - name: Fetch snap
+ uses: actions/download-artifact@v3.0.2
+ with:
+ name: microk8s.snap
+ path: build
+ - name: Running upgrade path test
+ run: |
+ sudo -E UPGRADE_MICROK8S_FROM=latest/edge UPGRADE_MICROK8S_TO=$PWD/build/microk8s.snap pytest -s ./tests/test-upgrade-path.py
+
+ test-addons-core:
+ name: Test core addons
+ runs-on: ubuntu-20.04
+ needs: build
+
+ steps:
+ - name: Checking out repo
+ uses: actions/checkout@v4
+ - name: Install test dependencies
run: |
set -x
+ sudo apt-get install python3-setuptools
+ sudo pip3 install --upgrade pip
+ sudo pip3 install -U pytest sh psutil
sudo apt-get -y install open-iscsi
sudo systemctl enable iscsid
- sudo snap install *.snap --classic --dangerous
+ - name: Fetch snap
+ uses: actions/download-artifact@v3.0.2
+ with:
+ name: microk8s.snap
+ path: build
+ - name: Running addons tests
+ run: |
+ set -x
+ sudo snap install build/microk8s.snap --classic --dangerous
./tests/smoke-test.sh
export UNDER_TIME_PRESSURE="True"
export SKIP_PROMETHEUS="False"
- (cd tests; pytest -s verify-branches.py)
- (cd tests; sudo -E pytest -s -ra test-addons.py)
- sudo snap remove microk8s --purge
+ sudo -E bash -c "cd /var/snap/microk8s/common/addons/core/tests; pytest -s -ra test-addons.py"
+
+ test-addons-community:
+ name: Test community addons
+ runs-on: ubuntu-20.04
+ needs: build
+
+ steps:
+ - name: Checking out repo
+ uses: actions/checkout@v4
+ - name: Install test dependencies
+ run: |
+ set -x
+ sudo apt-get install python3-setuptools
+ sudo pip3 install --upgrade pip
+ sudo pip3 install -U pytest sh
+ sudo apt-get -y install open-iscsi
+ sudo systemctl enable iscsid
+ - name: Fetch snap
+ uses: actions/download-artifact@v3.0.2
+ with:
+ name: microk8s.snap
+ path: build
+ # - name: Setup tmate session
+ # uses: mxschmitt/action-tmate@v3
+ - name: Running addons tests
+ run: |
+ set -x
+ sudo snap install build/microk8s.snap --classic --dangerous
+ sudo microk8s enable community
+ export UNDER_TIME_PRESSURE="True"
+ sudo -E bash -c "cd /var/snap/microk8s/common/addons/community/; pytest -s -ra ./tests/"
+
+ test-addons-core-upgrade:
+ name: Test core addons upgrade
+ runs-on: ubuntu-20.04
+ needs: build
+
+ steps:
+ - name: Checking out repo
+ uses: actions/checkout@v4
+ # - name: Setup tmate session
+ # uses: mxschmitt/action-tmate@v3
+ - name: Install test dependencies
+ run: |
+ set -x
+ sudo apt-get install python3-setuptools
+ sudo pip3 install --upgrade pip
+ sudo pip3 install -U pytest sh psutil
+ sudo apt-get -y install open-iscsi
+ sudo systemctl enable iscsid
+ - name: Fetch snap
+ uses: actions/download-artifact@v3.0.2
+ with:
+ name: microk8s.snap
+ path: build
- name: Running upgrade tests
run: |
set -x
export UNDER_TIME_PRESSURE="True"
- sudo -E UPGRADE_MICROK8S_FROM=latest/edge UPGRADE_MICROK8S_TO=`pwd`/`ls microk8s*.snap` pytest -s ./tests/test-upgrade.py
+ sudo -E bash -c "UPGRADE_MICROK8S_FROM=latest/edge UPGRADE_MICROK8S_TO=$PWD/build/microk8s.snap pytest -s ./tests/test-upgrade.py"
+
+ test-cluster-agent:
+ name: Cluster agent health check
+ runs-on: ubuntu-20.04
+ needs: build
+
+ steps:
+ - name: Checking out repo
+ uses: actions/checkout@v4
+ - name: Install test dependencies
+ run: |
+ set -x
+ sudo apt-get install python3-setuptools
+ sudo pip3 install --upgrade pip
+ sudo pip3 install -U pytest sh requests
+ - name: Fetch snap
+ uses: actions/download-artifact@v3.0.2
+ with:
+ name: microk8s.snap
+ path: build
+ - name: Running cluster agent health check
+ run: |
+ set -x
+ sudo snap install build/microk8s.snap --classic --dangerous
+ sudo -E bash -c "pytest -s ./tests/test-cluster-agent.py"
+
+ test-airgap:
+ name: Test airgap installation
+ runs-on: ubuntu-20.04
+ needs: build
+
+ steps:
+ - name: Checking out repo
+ uses: actions/checkout@v4
+ - name: Fetch snap
+ uses: actions/download-artifact@v3.0.2
+ with:
+ name: microk8s.snap
+ path: build
+ - name: Initialize LXD
+ run: |
+ sudo lxd init --auto
+ sudo lxc network set lxdbr0 ipv6.address=none
+ sudo usermod --append --groups lxd $USER
+ sg lxd -c 'lxc version'
+ - name: Run airgap tests
+ run: |
+ sudo -E bash -x -c "./tests/libs/airgap.sh --distro ubuntu:20.04 --channel $PWD/build/microk8s.snap"
+
+ security-scan:
+ name: Security scan
+ runs-on: ubuntu-20.04
+ needs: build
+ steps:
+ - name: Checking out repo
+ uses: actions/checkout@v4
+ - name: Fetch snap
+ uses: actions/download-artifact@v3.0.2
+ with:
+ name: microk8s.snap
+ path: build
+ - name: Create sarifs directory
+ run: |
+ mkdir -p sarifs
+ - name: Install Trivy vulnerability scanner
+ uses: aquasecurity/setup-trivy@v0.2.2
+ - name: Run Trivy vulnerability scanner on codebase
+ run: |
+ trivy fs . --format sarif --severity CRITICAL > sarifs/trivy-microk8s-repo-scan--results.sarif
+ - name: Run Trivy vulnerability scanner on images
+ run: |
+ for i in $(cat ./build-scripts/images.txt) ; do
+ name=$(echo $i | awk -F ':|/' '{print $(NF-1)}')
+ trivy image $i --format sarif > sarifs/$name.sarif
+ done
+ - name: Run Trivy vulnerability scanner on the snap
+ run: |
+ cp build/microk8s.snap .
+ unsquashfs microk8s.snap
+ trivy rootfs ./squashfs-root/ --format sarif > sarifs/snap.sarif
+ - name: Upload Trivy scan results to GitHub Security tab
+ uses: github/codeql-action/upload-sarif@v3
+ with:
+ sarif_file: "sarifs"
diff --git a/.github/workflows/check-formatting.yml b/.github/workflows/check-formatting.yml
index c3be284ce6..7048cd06ac 100644
--- a/.github/workflows/check-formatting.yml
+++ b/.github/workflows/check-formatting.yml
@@ -1,21 +1,21 @@
name: Lint Code
on:
- - push
- pull_request
jobs:
- build:
+ check-formatting:
name: Check Formatting
runs-on: ubuntu-latest
steps:
- name: Check out code
- uses: actions/checkout@v2
+ uses: actions/checkout@v4
- name: Install dependencies
run: |
- sudo apt-get install tox
+ sudo apt-get update
+ sudo apt-get install tox --fix-missing
sudo snap install node --classic
sudo npm install --save-dev --save-exact -g prettier
- name: Check Python formatting
@@ -25,4 +25,7 @@ jobs:
run: |
set -eux
prettier --check $(find . -name "*.yaml" -o -name "*.yml" | \
- grep -v "./microk8s-resources/actions/ingress.yaml" | grep -v "./microk8s-resources/actions/metallb.yaml")
+ grep -v "./microk8s-resources/actions/ingress.yaml" | \
+ grep -v "./microk8s-resources/actions/metallb.yaml" | \
+ grep -v invalid.yaml | \
+ grep -v calico)
diff --git a/.github/workflows/check-unit-tests.yml b/.github/workflows/check-unit-tests.yml
new file mode 100644
index 0000000000..39efd79b8d
--- /dev/null
+++ b/.github/workflows/check-unit-tests.yml
@@ -0,0 +1,27 @@
+name: Unit Tests
+
+on:
+ - pull_request
+
+jobs:
+ check-unit-tests:
+ name: Check Unit Tests
+ runs-on: ubuntu-latest
+
+ steps:
+ - name: Check out code
+ uses: actions/checkout@v4
+
+ - name: Install dependencies
+ run: |
+ sudo apt-get update
+ sudo apt-get install tox --fix-missing
+ sudo pip3 install -U pytest==7.1.3 sh==1.14.3
+ - name: Check Units
+ run: |
+ tox -e scripts
+ tox -e wrappers
+ tox -e cluster
+ - name: Verify branches
+ run: |
+ pytest -s ./tests/verify-branches.py
diff --git a/.github/workflows/cla-check.yml b/.github/workflows/cla-check.yml
index 08e8ba624a..b153aea155 100644
--- a/.github/workflows/cla-check.yml
+++ b/.github/workflows/cla-check.yml
@@ -1,8 +1,6 @@
name: cla-check
on:
- push:
- branches: [master, default]
pull_request:
branches: [master, default]
diff --git a/.github/workflows/test-kubeflow.yaml b/.github/workflows/test-kubeflow.yaml
deleted file mode 100644
index e1899aa8b1..0000000000
--- a/.github/workflows/test-kubeflow.yaml
+++ /dev/null
@@ -1,221 +0,0 @@
-name: Test Kubeflow
-
-on:
- push:
- paths-ignore:
- - "docs/**"
- pull_request:
- paths-ignore:
- - "docs/**"
-
-jobs:
- build:
- name: Build
- runs-on: ubuntu-latest
-
- steps:
- - name: Checking out repo
- uses: actions/checkout@v2
-
- - name: Install lxd
- run: |
- sudo lxd init --auto
- sudo usermod --append --groups lxd $USER
- sg lxd -c 'lxc version'
- - name: Install snapcraft
- run: sudo snap install snapcraft --classic
-
- - name: Build snap
- run: sg lxd -c 'snapcraft --use-lxd'
-
- - name: Uploading snap
- uses: actions/upload-artifact@v2
- with:
- name: microk8s.snap
- path: ./microk8s*.snap
-
- test-actions:
- name: Actions
- runs-on: ubuntu-latest
- needs: [build]
- strategy:
- fail-fast: false
- steps:
- - name: Check out code
- uses: actions/checkout@v2
-
- - name: Download built snap
- uses: actions/download-artifact@v2
- with:
- name: microk8s.snap
-
- - name: Install snap
- run: |
- set -eux
- sudo snap install ./microk8s*.snap --classic --dangerous
- sudo usermod --append --groups microk8s $USER
- sudo microk8s status --wait-ready
- sudo microk8s kubectl -n kube-system rollout status ds/calico-node
- sudo snap install juju-helpers --classic
-
- - name: Enable kubeflow
- timeout-minutes: 45
- run: sg microk8s -c 'microk8s enable kubeflow --debug --bundle=edge --ignore-min-mem --password=hunter2'
-
- - name: Test kubeflow
- run: |
- set -eux
- export JUJU_DATA=/var/snap/microk8s/current/juju/share/juju
- sudo apt update
- sudo apt install -y libssl-dev python3-pip firefox-geckodriver
- git clone https://github.com/juju-solutions/bundle-kubeflow.git
- cd bundle-kubeflow
- git reset --hard 5e0b6fcb
- sudo pip3 install -r requirements.txt -r test-requirements.txt
- sudo microk8s status --wait-ready
- sudo microk8s kubectl -n kube-system rollout status ds/calico-node
- trap 'sudo pkill -f svc/pipelines-api' SIGINT SIGTERM EXIT
- sudo microk8s kubectl -n kubeflow port-forward svc/pipelines-api 8888:8888 &
- (i=30; while ! curl localhost:8888 ; do ((--i)) || exit; sleep 1; done)
- sudo -E pytest -vvs -m edge -k 'not kubectl'
- sudo -E pytest -vvs -m edge -k 'kubectl'
-
- - name: Get MicroK8s pods
- run: sudo microk8s kubectl get pods -A
- if: failure()
-
- - name: Describe MicroK8s pods
- run: sudo microk8s kubectl describe pods -nkubeflow
- if: failure()
-
- - name: Get pipeline logs
- run: |
- set -eux
- pods=$(sudo microk8s kubectl get -nkubeflow pods -l workflows.argoproj.io/completed="true" -o custom-columns=:metadata.name --no-headers)
- for pod in $pods; do
- containers=$(sudo microk8s kubectl get -nkubeflow pods -o jsonpath="{.spec.containers[*].name}" $pod)
- for container in $containers; do
- sudo microk8s kubectl logs -nkubeflow --timestamps $pod -c $container
- printf '\n'
- done
- printf '\n\n'
- done
- if: failure()
-
- - name: Generate inspect tarball
- run: >
- sudo microk8s inspect |
- grep -Po "Report tarball is at \K.+" |
- sudo xargs -I {} mv {} inspection-report-${{ strategy.job-index }}.tar.gz
- if: failure()
-
- - name: Upload inspect tarball
- uses: actions/upload-artifact@v2
- with:
- name: inspection-report-actions
- path: ./inspection-report-${{ strategy.job-index }}.tar.gz
- if: failure()
-
- test-aws:
- name: AWS
- runs-on: ubuntu-latest
- needs: [build]
- if: github.event.pull_request.head.repo.full_name == github.repository
- strategy:
- fail-fast: false
- matrix:
- bundle: [full, lite]
- steps:
- - name: Check out code
- uses: actions/checkout@v2
-
- - name: Download built snap
- uses: actions/download-artifact@v2
- with:
- name: microk8s.snap
-
- - name: Install dependencies
- run: |
- set -eux
- sudo snap install juju --classic
- sudo snap install juju-wait --classic
-
- - name: Bootstrap onto AWS
- env:
- AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }}
- AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
- run: |
- set -eux
- juju autoload-credentials --client aws
- juju bootstrap aws/us-east-1 uk8saws --config test-mode=true --model-default test-mode=true
- juju deploy ubuntu --constraints 'cores=4 mem=16G root-disk=60G'
- juju wait -vw
-
- - name: Copy snap to AWS instance
- run: juju scp ./microk8s*.snap ubuntu/0:~/microk8s.snap
-
- - name: Install snap
- run: |
- juju ssh ubuntu/0 <
-![](https://img.shields.io/badge/Kubernetes-1.21-326de6.svg)
+[![](https://github.com/canonical/microk8s/actions/workflows/build-snap.yml/badge.svg)](https://github.com/canonical/microk8s/actions/workflows/build-snap.yml)
+[![](https://snapcraft.io/microk8s/badge.svg)](https://snapcraft.io/microk8s)
+![](https://img.shields.io/badge/Kubernetes-1.30-326de6.svg)
@@ -42,7 +44,6 @@ flavours of Linux](https://snapcraft.io/microk8s). Perfect for:
- Ingress, DNS, Dashboard, Clustering
- Automatic updates to the latest Kubernetes version
- GPGPU bindings for AI/ML
- - Kubeflow!
Drop us a line at [MicroK8s in the Wild](docs/community.md) if you are
doing something fun with MicroK8s!
@@ -81,7 +82,8 @@ sudo usermod -a -G microk8s
MicroK8s installs a barebones upstream Kubernetes. Additional services like dns and the Kubernetes dashboard can be enabled using the `microk8s enable` command.
```
-sudo microk8s enable dns dashboard
+sudo microk8s enable dns
+sudo microk8s enable dashboard
```
Use `microk8s status` to see a list of enabled and available addons. You can find the addon manifests and/or scripts under `${SNAP}/actions/`, with `${SNAP}` pointing by default to `/snap/microk8s/current`.
@@ -97,3 +99,7 @@ contribute to MicroK8s.
+
+
+
+
diff --git a/build-scripts/.gitignore b/build-scripts/.gitignore
new file mode 100644
index 0000000000..37554b0aa4
--- /dev/null
+++ b/build-scripts/.gitignore
@@ -0,0 +1,2 @@
+.build
+.install
diff --git a/build-scripts/addons/repositories.sh b/build-scripts/addons/repositories.sh
new file mode 100755
index 0000000000..3108a16638
--- /dev/null
+++ b/build-scripts/addons/repositories.sh
@@ -0,0 +1,33 @@
+#!/bin/bash -x
+
+# List of addon repositories to bundle in the snap
+# (name),(repository),(reference)
+ADDONS_REPOS="
+core,https://github.com/canonical/microk8s-core-addons,main
+community,https://github.com/canonical/microk8s-community-addons,main
+"
+
+# List of addon repositories to automatically enable
+ADDONS_REPOS_ENABLED="core"
+
+INSTALL="${1}"
+if [ -d "${INSTALL}/addons" ]; then
+ rm -rf "${INSTALL}/addons"
+fi
+if [ -d addons ]; then
+ rm -rf addons
+fi
+
+IFS=';'
+echo "${ADDONS_REPOS}" | while read line; do
+ if [ -z "${line}" ];
+ then continue
+ fi
+ name="$(echo ${line} | cut -f1 -d',')"
+ repository="$(echo ${line} | cut -f2 -d',')"
+ reference="$(echo ${line} | cut -f3 -d',')"
+ git clone "${repository}" -b "${reference}" "addons/${name}"
+done
+echo "${ADDONS_REPOS_ENABLED}" > addons/.auto-add
+
+cp -r "addons" "${INSTALL}/addons"
diff --git a/build-scripts/build-certs.sh b/build-scripts/build-certs.sh
deleted file mode 100755
index 0f7073b2cc..0000000000
--- a/build-scripts/build-certs.sh
+++ /dev/null
@@ -1,13 +0,0 @@
-#!/usr/bin/env bash
-
-set -eu
-
-mkdir certs
-openssl genrsa -sha256 -out ./certs/serviceaccount.key 2048
-openssl genrsa -sha256 -out ./certs/ca.key 2048
-openssl req -x509 -new -sha256 -nodes -key ./certs/ca.key -subj "/C=GB/ST=Canonical/L=Canonical/O=Canonical/OU=Canonical/CN=127.0.0.1" -out ./certs/ca.crt
-openssl req -x509 -new -sha256 -nodes -key ./certs/ca.key -out ./certs/ca.crt
-openssl genrsa -sha256 -out ./certs/server.key 2048
-openssl req -new -sha256 -key ./certs/server.key -out ./certs/server.csr -config $KUBE_SNAP_ROOT/microk8s-resources/certs/csr.conf -subj "/C=GB/ST=Canonical/L=Canonical/O=Canonical/OU=Canonical/CN=127.0.0.1"
-openssl x509 -req -sha256 -in ./certs/server.csr -CA ./certs/ca.crt -CAkey ./certs/ca.key -CAcreateserial -out ./certs/server.crt -days 365 -extensions v3_ext -extfile $KUBE_SNAP_ROOT/microk8s-resources/certs/csr.conf
-rm -rf .srl
diff --git a/build-scripts/build-component.sh b/build-scripts/build-component.sh
new file mode 100755
index 0000000000..d92640de6e
--- /dev/null
+++ b/build-scripts/build-component.sh
@@ -0,0 +1,45 @@
+#!/bin/bash
+
+set -ex
+
+DIR=`realpath $(dirname "${0}")`
+
+BUILD_DIRECTORY="${SNAPCRAFT_PART_BUILD:-${DIR}/.build}"
+INSTALL_DIRECTORY="${SNAPCRAFT_PART_INSTALL:-${DIR}/.install}"
+
+mkdir -p "${BUILD_DIRECTORY}" "${INSTALL_DIRECTORY}"
+
+COMPONENT_NAME="${1}"
+COMPONENT_DIRECTORY="${DIR}/components/${COMPONENT_NAME}"
+
+GIT_REPOSITORY="$(cat "${COMPONENT_DIRECTORY}/repository")"
+GIT_TAG="$("${COMPONENT_DIRECTORY}/version.sh")"
+
+COMPONENT_BUILD_DIRECTORY="${BUILD_DIRECTORY}/${COMPONENT_NAME}"
+
+# cleanup git repository if we cannot git checkout to the build tag
+if [ -d "${COMPONENT_BUILD_DIRECTORY}" ]; then
+ cd "${COMPONENT_BUILD_DIRECTORY}"
+ if ! git checkout "${GIT_TAG}"; then
+ cd "${BUILD_DIRECTORY}"
+ rm -rf "${COMPONENT_BUILD_DIRECTORY}"
+ fi
+fi
+
+if [ ! -d "${COMPONENT_BUILD_DIRECTORY}" ]; then
+ git clone "${GIT_REPOSITORY}" --depth 1 -b "${GIT_TAG}" "${COMPONENT_BUILD_DIRECTORY}"
+fi
+
+cd "${COMPONENT_BUILD_DIRECTORY}"
+git config user.name "MicroK8s builder bot"
+git config user.email "microk8s-builder-bot@canonical.com"
+
+if [ -e "${COMPONENT_DIRECTORY}/pre-patch.sh" ]; then
+ bash -xe "${COMPONENT_DIRECTORY}/pre-patch.sh"
+fi
+
+for patch in $(python3 "${DIR}/print-patches-for.py" "${COMPONENT_NAME}" "${GIT_TAG}"); do
+ git am "${patch}"
+done
+
+bash -xe "${COMPONENT_DIRECTORY}/build.sh" "${INSTALL_DIRECTORY}" "${GIT_TAG}"
diff --git a/build-scripts/build-k8s-binaries.sh b/build-scripts/build-k8s-binaries.sh
deleted file mode 100755
index e474bddd81..0000000000
--- a/build-scripts/build-k8s-binaries.sh
+++ /dev/null
@@ -1,50 +0,0 @@
-#!/bin/bash
-set -eux
-
-echo "Building k8s binaries from $KUBERNETES_REPOSITORY tag $KUBERNETES_TAG"
-apps="kubelite kubectl"
-path_apps="cmd/kubelite cmd/kubectl"
-export KUBE_SNAP_BINS="build/kube_bins/$KUBE_VERSION"
-mkdir -p $KUBE_SNAP_BINS/$KUBE_ARCH
-echo $KUBE_VERSION > $KUBE_SNAP_BINS/version
-
-export GOPATH=$SNAPCRAFT_PART_BUILD/go
-
-rm -rf $GOPATH
-mkdir -p $GOPATH
-
-git clone --depth 1 https://github.com/kubernetes/kubernetes $GOPATH/src/github.com/kubernetes/kubernetes/ -b $KUBERNETES_TAG
-
-(cd $GOPATH/src/$KUBERNETES_REPOSITORY
- git config user.email "microk8s-builder-bot@ubuntu.com"
- git config user.name "MicroK8s builder bot"
-
- PATCHES="patches"
- if echo "$KUBE_VERSION" | grep -e beta -e rc -e alpha
- then
- PATCHES="pre-patches"
- fi
-
- for patch in "${SNAPCRAFT_PART_SRC}"/"$PATCHES"/*.patch
- do
- echo "Applying patch $patch"
- git am < "$patch"
- done
-
- rm -rf $GOPATH/src/$KUBERNETES_REPOSITORY/_output/
- make clean
- for app in ${path_apps}
- do
- if [ "$app" = "cmd/kubelite" ]
- then
- make WHAT="${app}" GOFLAGS=-tags=libsqlite3,dqlite CGO_CFLAGS="-I${SNAPCRAFT_STAGE}/usr/include/" CGO_LDFLAGS="-L${SNAPCRAFT_STAGE}/lib" KUBE_CGO_OVERRIDES=kubelite
- else
- make WHAT="${app}"
- fi
- done
-)
-for app in $apps; do
- cp $GOPATH/src/$KUBERNETES_REPOSITORY/_output/bin/$app $KUBE_SNAP_BINS/$KUBE_ARCH/
-done
-
-rm -rf $GOPATH/src/$KUBERNETES_REPOSITORY/_output/
diff --git a/build-scripts/components/README.md b/build-scripts/components/README.md
new file mode 100644
index 0000000000..6c2159bbe2
--- /dev/null
+++ b/build-scripts/components/README.md
@@ -0,0 +1,81 @@
+# Parts directory
+
+This directory contains the build scripts for Go components built into MicroK8s.
+
+The directory structure looks like this:
+
+```
+build-scripts/
+ build-component.sh <-- runs as `build-component.sh $component_name`
+ - checks out the git repository
+ - runs the `pre-patch.sh` script (if any)
+ - applies the patches (if any)
+ - runs the `build.sh` script to build the component
+ component/
+ $component_name/
+ repository <-- git repository to clone
+ version.sh <-- prints the repository tag or commit to checkout
+ build.sh <-- runs as `build.sh $output $version`
+ first argument is the output directory where
+ binaries should be placed, second is the component version
+ pre-patch.sh <-- runs as `pre-patch.sh`. takes any action needed before applying
+ the component patches
+ patches/ <-- list of patches to apply after checkout (see section below)
+ ...
+ strict-patches/ <-- list of extra patches to apply when building strictly confined snap
+ ...
+```
+
+## Applying patches
+
+Most MicroK8s components are retrieved from an upstream source (specified in the `repository`), with a specific tag (specified in `version.sh`), have some patches applied to them (from the `patches/` and `strict-patches/` directories) and are then built (using `build.sh`).
+
+
+This section explains the directory format for the `patches` and `strict-patches` directories. The same rules apply for both. Note that the `strict-patches` (if any) are applied **after** any `patches` have been applied.
+
+Our patches do not frequently change between versions, but they do have to be rebased from time to time, which breaks compatibility with older versions. For that reason, we maintain a set of patches for each version that introduces a breaking change. Consider the following directory structure for the Kubernetes component.
+
+```
+patches/default/0.patch
+patches/v1.27.0/a.patch
+patches/v1.27.0/b.patch
+patches/v1.27.4/c.patch
+patches/v1.28.0/d.patch
+patches/v1.28.0-beta.0/e.patch
+```
+
+The Kubernetes version to build may be decided dynamically while building the snap, or be pinned to a specified version. The following table shows which patches we would apply depending on the Kubernetes version that we build:
+
+| Kubernetes version | Applied patches | Explanation |
+| ------------------ | ----------------------- | ------------------------------------------------------------------------------------------ |
+| `v1.27.0` | `a.patch` and `b.patch` | |
+| `v1.27.1` | `a.patch` and `b.patch` | In case there is no exact match, find the most recent older version |
+| `v1.27.4` | `c.patch` | Older patches are not applied |
+| `v1.27.12` | `c.patch` | In semver, `v1.27.12 > v1.27.4` so we again must get the most recent patches |
+| `v1.28.0-rc.0` | `d.patch` | Extra items from semver are ignored, so we can define the `v1.28.0` patch and be done |
+| `v1.28.0-beta.0` | `e.patch` | Extra items from semver are ignored, but due to exact match this patch is used instead |
+| `v1.28.0` | `d.patch` | Extra items from semver are ignored, so we can define the `v1.28.0` patch and be done |
+| `v1.28.4` | `d.patch` | Picks the patches from the stable versions only, not from beta |
+| `v1.29.1` | `d.patch` | Uses patches from most recent version, even if on a different minor |
+| `hack/branch` | `0.patch` | If not semver and no match, any patches from the `default/` directory are applied (if any) |
+
+Same logic applies for all other components as well.
+
+### Testing which patches would be applied
+
+You can verify which set of patches would be applied in any case using the `print-patches-for.py` script directly:
+
+```bash
+$ ./build-scripts/print-patches-for.py kubernetes v1.27.4
+/home/ubuntu/microk8s/build-scripts/components/kubernetes/patches/v1.27.4/0000-Kubelite-integration.patch
+$ ./build-scripts/print-patches-for.py kubernetes v1.27.3
+/home/ubuntu/microk8s/build-scripts/components/kubernetes/patches/v1.27.0/0000-Kubelite-integration.patch
+/home/ubuntu/microk8s/build-scripts/components/kubernetes/patches/v1.27.0/0001-Unix-socket-skip-validation-in-component-status.patch
+$ ./build-scripts/print-patches-for.py kubernetes v1.28.1
+/home/ubuntu/microk8s/build-scripts/components/kubernetes/patches/v1.28.0/0001-Set-log-reapply-handling-to-ignore-unchanged.patch
+/home/ubuntu/microk8s/build-scripts/components/kubernetes/patches/v1.28.0/0000-Kubelite-integration.patch
+```
+
+### How to add support for newer versions
+
+When a new release comes out which is no longer compatible with the existing latest patches, simply create a new directory under `patches/` with the new version number. This ensures that previous versions will still work, and newer ones will pick up the fixed patches.
diff --git a/build-scripts/components/cluster-agent/build.sh b/build-scripts/components/cluster-agent/build.sh
new file mode 100755
index 0000000000..86282e7f67
--- /dev/null
+++ b/build-scripts/components/cluster-agent/build.sh
@@ -0,0 +1,7 @@
+#!/bin/bash
+
+export INSTALL="${1}/bin"
+mkdir -p "${INSTALL}"
+
+make cluster-agent
+cp cluster-agent "${INSTALL}"
diff --git a/build-scripts/components/cluster-agent/repository b/build-scripts/components/cluster-agent/repository
new file mode 100644
index 0000000000..661ffe684f
--- /dev/null
+++ b/build-scripts/components/cluster-agent/repository
@@ -0,0 +1 @@
+https://github.com/canonical/microk8s-cluster-agent
diff --git a/build-scripts/components/cluster-agent/version.sh b/build-scripts/components/cluster-agent/version.sh
new file mode 100755
index 0000000000..f4e34e2190
--- /dev/null
+++ b/build-scripts/components/cluster-agent/version.sh
@@ -0,0 +1,3 @@
+#!/bin/bash
+
+echo "main"
diff --git a/build-scripts/components/cni/build.sh b/build-scripts/components/cni/build.sh
new file mode 100755
index 0000000000..353db2bf02
--- /dev/null
+++ b/build-scripts/components/cni/build.sh
@@ -0,0 +1,21 @@
+#!/bin/bash
+
+VERSION="${2}"
+
+INSTALL="${1}/opt/cni/bin"
+mkdir -p "${INSTALL}"
+
+# these would very tedious to apply with a patch
+go get github.com/docker/docker/pkg/reexec
+go mod vendor
+sed -i 's/^package main/package plugin_main/' plugins/*/*/*.go
+sed -i 's/^func main()/func Main()/' plugins/*/*/*.go
+
+export CGO_ENABLED=0
+
+go build -o cni -ldflags "-s -w -extldflags -static -X github.com/containernetworking/plugins/pkg/utils/buildversion.BuildVersion=${VERSION}" ./cni.go
+
+cp cni "${INSTALL}/"
+for plugin in dhcp host-local static bridge host-device ipvlan loopback macvlan ptp vlan bandwidth firewall portmap sbr tuning vrf; do
+ ln -f -s ./cni "${INSTALL}/${plugin}"
+done
diff --git a/build-scripts/components/cni/patches/default/0001-single-entrypoint-for-cni-tools.patch b/build-scripts/components/cni/patches/default/0001-single-entrypoint-for-cni-tools.patch
new file mode 100644
index 0000000000..3e3a465615
--- /dev/null
+++ b/build-scripts/components/cni/patches/default/0001-single-entrypoint-for-cni-tools.patch
@@ -0,0 +1,72 @@
+From 3d0636d0ad86c9050da190b50bc01387d71dc80a Mon Sep 17 00:00:00 2001
+From: MicroK8s builder bot
+Date: Sun, 12 Feb 2023 13:34:45 +0000
+Subject: [PATCH] single entrypoint for cni tools
+
+---
+ cni.go | 54 ++++++++++++++++++++++++++++++++++++++++++++++++++++++
+ 1 file changed, 54 insertions(+)
+ create mode 100644 cni.go
+
+diff --git a/cni.go b/cni.go
+new file mode 100644
+index 0000000..93828f9
+--- /dev/null
++++ b/cni.go
+@@ -0,0 +1,54 @@
++package main
++
++import (
++ "os"
++ "path/filepath"
++
++ "github.com/docker/docker/pkg/reexec"
++
++ ipam_dhcp "github.com/containernetworking/plugins/plugins/ipam/dhcp"
++ ipam_host_local "github.com/containernetworking/plugins/plugins/ipam/host-local"
++ ipam_static "github.com/containernetworking/plugins/plugins/ipam/static"
++
++ main_bridge "github.com/containernetworking/plugins/plugins/main/bridge"
++ main_host_device "github.com/containernetworking/plugins/plugins/main/host-device"
++ main_ipvlan "github.com/containernetworking/plugins/plugins/main/ipvlan"
++ main_loopback "github.com/containernetworking/plugins/plugins/main/loopback"
++ main_macvlan "github.com/containernetworking/plugins/plugins/main/macvlan"
++ main_ptp "github.com/containernetworking/plugins/plugins/main/ptp"
++ main_vlan "github.com/containernetworking/plugins/plugins/main/vlan"
++
++ meta_bandwidth "github.com/containernetworking/plugins/plugins/meta/bandwidth"
++ meta_firewall "github.com/containernetworking/plugins/plugins/meta/firewall"
++ meta_portmap "github.com/containernetworking/plugins/plugins/meta/portmap"
++ meta_sbr "github.com/containernetworking/plugins/plugins/meta/sbr"
++ meta_tuning "github.com/containernetworking/plugins/plugins/meta/tuning"
++ meta_vrf "github.com/containernetworking/plugins/plugins/meta/vrf"
++)
++
++func main() {
++ os.Args[0] = filepath.Base(os.Args[0])
++ if reexec.Init() {
++ return
++ }
++ panic("invalid entrypoint name")
++}
++
++func init() {
++ reexec.Register("dhcp", ipam_dhcp.Main)
++ reexec.Register("host-local", ipam_host_local.Main)
++ reexec.Register("static", ipam_static.Main)
++ reexec.Register("bridge", main_bridge.Main)
++ reexec.Register("host-device", main_host_device.Main)
++ reexec.Register("ipvlan", main_ipvlan.Main)
++ reexec.Register("loopback", main_loopback.Main)
++ reexec.Register("macvlan", main_macvlan.Main)
++ reexec.Register("ptp", main_ptp.Main)
++ reexec.Register("vlan", main_vlan.Main)
++ reexec.Register("bandwidth", meta_bandwidth.Main)
++ reexec.Register("firewall", meta_firewall.Main)
++ reexec.Register("portmap", meta_portmap.Main)
++ reexec.Register("sbr", meta_sbr.Main)
++ reexec.Register("tuning", meta_tuning.Main)
++ reexec.Register("vrf", meta_vrf.Main)
++}
+--
+2.25.1
diff --git a/build-scripts/components/cni/repository b/build-scripts/components/cni/repository
new file mode 100644
index 0000000000..4c32da97df
--- /dev/null
+++ b/build-scripts/components/cni/repository
@@ -0,0 +1 @@
+https://github.com/containernetworking/plugins
diff --git a/build-scripts/components/cni/version.sh b/build-scripts/components/cni/version.sh
new file mode 100755
index 0000000000..b50ffb4fc3
--- /dev/null
+++ b/build-scripts/components/cni/version.sh
@@ -0,0 +1,4 @@
+#!/bin/bash
+
+# Match https://github.com/kubernetes/kubernetes/blob/master/build/dependencies.yaml#L20
+echo "v1.6.0"
diff --git a/build-scripts/components/containerd/build.sh b/build-scripts/components/containerd/build.sh
new file mode 100755
index 0000000000..c04cfdaa39
--- /dev/null
+++ b/build-scripts/components/containerd/build.sh
@@ -0,0 +1,16 @@
+#!/bin/bash
+
+INSTALL="${1}/bin"
+mkdir -p "${INSTALL}"
+
+VERSION="${2}"
+REVISION=$(git rev-parse HEAD)
+
+sed -i "s,^VERSION.*$,VERSION=${VERSION}," Makefile
+sed -i "s,^REVISION.*$,REVISION=${REVISION}," Makefile
+
+export STATIC=1
+for bin in ctr containerd containerd-shim containerd-shim-runc-v1 containerd-shim-runc-v2; do
+ make "bin/${bin}"
+ cp "bin/${bin}" "${INSTALL}/${bin}"
+done
diff --git a/build-scripts/components/containerd/patches/default/0001-microk8s-sideload-images-plugin.patch b/build-scripts/components/containerd/patches/default/0001-microk8s-sideload-images-plugin.patch
new file mode 100644
index 0000000000..a0ee671cfc
--- /dev/null
+++ b/build-scripts/components/containerd/patches/default/0001-microk8s-sideload-images-plugin.patch
@@ -0,0 +1,164 @@
+From d703811ab64963a6d52e6ac98b6a33b26b13e020 Mon Sep 17 00:00:00 2001
+From: Angelos Kolaitis
+Date: Mon, 10 Jul 2023 12:15:34 +0300
+Subject: [PATCH] microk8s sideload images plugin
+
+---
+ cmd/containerd/builtins_microk8s.go | 6 ++
+ microk8s_plugins/sideload.go | 132 ++++++++++++++++++++++++++++
+ 2 files changed, 138 insertions(+)
+ create mode 100644 cmd/containerd/builtins_microk8s.go
+ create mode 100644 microk8s_plugins/sideload.go
+
+diff --git a/cmd/containerd/builtins_microk8s.go b/cmd/containerd/builtins_microk8s.go
+new file mode 100644
+index 0000000..d9afc6f
+--- /dev/null
++++ b/cmd/containerd/builtins_microk8s.go
+@@ -0,0 +1,6 @@
++package main
++
++// register containerd microk8s plugins here
++import (
++ _ "github.com/containerd/containerd/microk8s_plugins"
++)
+diff --git a/microk8s_plugins/sideload.go b/microk8s_plugins/sideload.go
+new file mode 100644
+index 0000000..3ac632e
+--- /dev/null
++++ b/microk8s_plugins/sideload.go
+@@ -0,0 +1,132 @@
++package microk8s
++
++import (
++ "fmt"
++ "os"
++ "path/filepath"
++ "time"
++
++ "github.com/containerd/containerd"
++ "github.com/containerd/containerd/log"
++ "github.com/containerd/containerd/platforms"
++ "github.com/containerd/containerd/plugin"
++)
++
++const pluginName = "sideload-images"
++
++var logger = log.L.WithField("plugin", pluginName)
++
++type Config struct {
++ // Interval configures how frequently the plugin will look for new images found
++ // in the sources. If set to zero, images are only loaded during initial start.
++ Interval *time.Duration `toml:"interval"`
++
++ // Sources is a list of paths to look for .tar images.
++ // For example, `/var/snap/microk8s/common/etc/sideload`
++ Sources []string `toml:"sources"`
++
++ // Namespace the images will be loaded into, e.g. "k8s.io"
++ Namespace string `toml:"namespace"`
++}
++
++func (c *Config) SetDefaults() {
++ if c.Namespace == "" {
++ c.Namespace = "k8s.io"
++ }
++ if len(c.Sources) == 0 {
++ snapCommon := os.Getenv("SNAP_COMMON")
++ if snapCommon == "" {
++ snapCommon = "/var/snap/microk8s/common"
++ }
++ c.Sources = []string{filepath.Join(snapCommon, "etc", "sideload")}
++ }
++ if c.Interval == nil {
++ t := 5 * time.Second
++ c.Interval = &t
++ }
++}
++
++func init() {
++ c := &Config{}
++ plugin.Register(&plugin.Registration{
++ Type: plugin.ServicePlugin,
++ ID: pluginName,
++ Config: c,
++ InitFn: func(ic *plugin.InitContext) (interface{}, error) {
++ config := ic.Config.(*Config)
++ config.SetDefaults()
++
++ logger.Debugf("Loaded config %#v", config)
++
++ if len(config.Sources) == 0 {
++ return nil, fmt.Errorf("no sources configured: %w", plugin.ErrSkipPlugin)
++ }
++
++ go func() {
++ // get a containerd client
++ var (
++ cl *containerd.Client
++ err error
++ )
++ for cl == nil {
++ select {
++ case <-ic.Context.Done():
++ return
++ default:
++ }
++
++ cl, err = containerd.New(ic.Address, containerd.WithDefaultNamespace(config.Namespace), containerd.WithTimeout(2*time.Second))
++ if err != nil {
++ logger.Info("Failed to create containerd client")
++ }
++ }
++
++ for {
++ nextDir:
++ for _, dir := range c.Sources {
++ logger := logger.WithField("dir", dir)
++ logger.Debug("Looking for images")
++ files, err := filepath.Glob(filepath.Join(dir, "*.tar"))
++ if err != nil {
++ logger.WithError(err).Warn("Failed to look for images")
++ continue nextDir
++ }
++
++ nextFile:
++ for _, file := range files {
++ logger := logger.WithField("file", file)
++ r, err := os.Open(file)
++ if err != nil {
++ logger.WithError(err).Warn("Failed to open file")
++ continue nextFile
++ }
++ images, err := cl.Import(ic.Context, r, containerd.WithImportPlatform(platforms.Default()))
++ if err != nil {
++ logger.WithError(err).Error("Failed to import images")
++ } else {
++ logger.Infof("Imported %d images", len(images))
++ os.Rename(file, file+".loaded")
++ }
++ if closeErr := r.Close(); closeErr != nil {
++ logger.WithError(closeErr).Error("Failed to close reader")
++ }
++ }
++ }
++
++ // retry after interval, finish if interval is zero
++ if *c.Interval == 0 {
++ logger.Info("Plugin terminating")
++ return
++ }
++ select {
++ case <-ic.Context.Done():
++ return
++ case <-time.After(*c.Interval):
++ }
++ }
++ }()
++
++ return nil, nil
++ },
++ })
++}
+--
+2.34.1
diff --git a/build-scripts/components/containerd/repository b/build-scripts/components/containerd/repository
new file mode 100644
index 0000000000..2df75561b8
--- /dev/null
+++ b/build-scripts/components/containerd/repository
@@ -0,0 +1 @@
+https://github.com/containerd/containerd
diff --git a/build-scripts/components/containerd/version.sh b/build-scripts/components/containerd/version.sh
new file mode 100755
index 0000000000..c8707a92fe
--- /dev/null
+++ b/build-scripts/components/containerd/version.sh
@@ -0,0 +1,3 @@
+#!/bin/bash
+
+echo "v1.6.36"
diff --git a/build-scripts/components/etcd/build.sh b/build-scripts/components/etcd/build.sh
new file mode 100755
index 0000000000..3eaaa9df3b
--- /dev/null
+++ b/build-scripts/components/etcd/build.sh
@@ -0,0 +1,10 @@
+#!/bin/bash
+
+export INSTALL="${1}"
+mkdir -p "${INSTALL}"
+
+GO_LDFLAGS="-s -w" GO_BUILD_FLAGS="-v" ./build.sh
+
+for bin in etcd etcdctl; do
+ cp "bin/${bin}" "${INSTALL}/${bin}"
+done
diff --git a/build-scripts/components/etcd/repository b/build-scripts/components/etcd/repository
new file mode 100644
index 0000000000..cece2938c2
--- /dev/null
+++ b/build-scripts/components/etcd/repository
@@ -0,0 +1 @@
+https://github.com/etcd-io/etcd
diff --git a/build-scripts/components/etcd/version.sh b/build-scripts/components/etcd/version.sh
new file mode 100755
index 0000000000..12db0c6e1d
--- /dev/null
+++ b/build-scripts/components/etcd/version.sh
@@ -0,0 +1,3 @@
+#!/bin/bash
+
+echo "v3.5.17"
diff --git a/build-scripts/components/flannel-cni-plugin/build.sh b/build-scripts/components/flannel-cni-plugin/build.sh
new file mode 100755
index 0000000000..4df1472a38
--- /dev/null
+++ b/build-scripts/components/flannel-cni-plugin/build.sh
@@ -0,0 +1,11 @@
+#!/bin/bash
+
+INSTALL="${1}/opt/cni/bin"
+mkdir -p "${INSTALL}"
+
+VERSION="${2}"
+
+export CGO_ENABLED=0
+go build -o dist/flannel -ldflags "-s -w -X github.com/flannel-io/cni-plugin/version.Version=${VERSION} -extldflags -static"
+
+cp dist/flannel "${INSTALL}/flannel"
diff --git a/build-scripts/components/flannel-cni-plugin/repository b/build-scripts/components/flannel-cni-plugin/repository
new file mode 100644
index 0000000000..a3709ca794
--- /dev/null
+++ b/build-scripts/components/flannel-cni-plugin/repository
@@ -0,0 +1 @@
+https://github.com/flannel-io/cni-plugin
diff --git a/build-scripts/components/flannel-cni-plugin/version.sh b/build-scripts/components/flannel-cni-plugin/version.sh
new file mode 100755
index 0000000000..370ee9feb5
--- /dev/null
+++ b/build-scripts/components/flannel-cni-plugin/version.sh
@@ -0,0 +1,3 @@
+#!/bin/bash
+
+echo "v1.1.2"
diff --git a/build-scripts/components/flanneld/build.sh b/build-scripts/components/flanneld/build.sh
new file mode 100755
index 0000000000..c2904a77b5
--- /dev/null
+++ b/build-scripts/components/flanneld/build.sh
@@ -0,0 +1,11 @@
+#!/bin/bash
+
+INSTALL="${1}/opt/cni/bin"
+mkdir -p "${INSTALL}"
+
+VERSION="${2}"
+
+export CGO_ENABLED=0
+go build -o dist/flanneld -ldflags "-s -w -X github.com/flannel-io/flannel/version.Version=${VERSION} -extldflags -static"
+
+cp dist/flanneld "${INSTALL}/flanneld"
diff --git a/build-scripts/components/flanneld/patches/default/0001-disable-udp-backend.patch b/build-scripts/components/flanneld/patches/default/0001-disable-udp-backend.patch
new file mode 100644
index 0000000000..92ae95eb77
--- /dev/null
+++ b/build-scripts/components/flanneld/patches/default/0001-disable-udp-backend.patch
@@ -0,0 +1,24 @@
+From 45ec777a0d113089453eca7fd2f7cb195555c6c9 Mon Sep 17 00:00:00 2001
+From: MicroK8s builder bot
+Date: Wed, 15 Feb 2023 15:52:51 +0000
+Subject: [PATCH] disable udp backend
+
+---
+ main.go | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/main.go b/main.go
+index 064f58d..b591458 100644
+--- a/main.go
++++ b/main.go
+@@ -50,7 +50,7 @@ import (
+ _ "github.com/flannel-io/flannel/pkg/backend/ipip"
+ _ "github.com/flannel-io/flannel/pkg/backend/ipsec"
+ _ "github.com/flannel-io/flannel/pkg/backend/tencentvpc"
+- _ "github.com/flannel-io/flannel/pkg/backend/udp"
++ // _ "github.com/flannel-io/flannel/pkg/backend/udp"
+ _ "github.com/flannel-io/flannel/pkg/backend/vxlan"
+ _ "github.com/flannel-io/flannel/pkg/backend/wireguard"
+ )
+--
+2.25.1
diff --git a/build-scripts/components/flanneld/repository b/build-scripts/components/flanneld/repository
new file mode 100644
index 0000000000..6c300bbb7e
--- /dev/null
+++ b/build-scripts/components/flanneld/repository
@@ -0,0 +1 @@
+https://github.com/flannel-io/flannel
diff --git a/build-scripts/components/flanneld/version.sh b/build-scripts/components/flanneld/version.sh
new file mode 100755
index 0000000000..c7ca71757a
--- /dev/null
+++ b/build-scripts/components/flanneld/version.sh
@@ -0,0 +1,3 @@
+#!/bin/bash
+
+echo "v0.21.2"
diff --git a/build-scripts/components/helm/build.sh b/build-scripts/components/helm/build.sh
new file mode 100755
index 0000000000..a799914d40
--- /dev/null
+++ b/build-scripts/components/helm/build.sh
@@ -0,0 +1,22 @@
+#!/bin/bash
+
+VERSION="${2}"
+
+INSTALL="${1}"
+mkdir -p "${INSTALL}/bin"
+
+make VERSION="${VERSION}"
+cp bin/helm "${INSTALL}/bin/helm"
+
+./bin/helm completion bash \
+ | sed "s/complete -o default -F __start_helm helm/complete -o default -F __start_helm microk8s.helm/g" \
+ | sed "s/complete -o default -o nospace -F __start_helm helm/complete -o default -o nospace -F __start_helm microk8s.helm/g" \
+ > bin/helm.bash
+
+./bin/helm completion bash \
+ | sed "s/complete -o default -F __start_helm helm/complete -o default -F __start_helm microk8s.helm3/g" \
+ | sed "s/complete -o default -o nospace -F __start_helm helm/complete -o default -o nospace -F __start_helm microk8s.helm3/g" \
+ > bin/helm3.bash
+
+cp bin/helm.bash "${INSTALL}/helm.bash"
+cp bin/helm3.bash "${INSTALL}/helm3.bash"
diff --git a/build-scripts/components/helm/patches/default/0001-disable-warnings-for-kubeconfig-permissions.patch b/build-scripts/components/helm/patches/default/0001-disable-warnings-for-kubeconfig-permissions.patch
new file mode 100644
index 0000000000..8726a62f7a
--- /dev/null
+++ b/build-scripts/components/helm/patches/default/0001-disable-warnings-for-kubeconfig-permissions.patch
@@ -0,0 +1,24 @@
+From d08d2604a9f0f925b93b87d74ee0a2c26c785467 Mon Sep 17 00:00:00 2001
+From: Angelos Kolaitis
+Date: Thu, 14 Jul 2022 18:18:09 +0300
+Subject: [PATCH] disable warnings for kubeconfig permissions
+
+---
+ cmd/helm/root.go | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/cmd/helm/root.go b/cmd/helm/root.go
+index ef92fea9..512f823f 100644
+--- a/cmd/helm/root.go
++++ b/cmd/helm/root.go
+@@ -206,7 +206,7 @@ func newRootCmd(actionConfig *action.Configuration, out io.Writer, args []string
+ loadPlugins(cmd, out)
+
+ // Check permissions on critical files
+- checkPerms()
++ // checkPerms()
+
+ // Check for expired repositories
+ checkForExpiredRepos(settings.RepositoryConfig)
+--
+2.25.1
diff --git a/build-scripts/components/helm/repository b/build-scripts/components/helm/repository
new file mode 100644
index 0000000000..e7c0fbd488
--- /dev/null
+++ b/build-scripts/components/helm/repository
@@ -0,0 +1 @@
+https://github.com/helm/helm
diff --git a/build-scripts/components/helm/version.sh b/build-scripts/components/helm/version.sh
new file mode 100755
index 0000000000..8609bd8869
--- /dev/null
+++ b/build-scripts/components/helm/version.sh
@@ -0,0 +1,3 @@
+#!/bin/bash
+
+echo "v3.16.3"
diff --git a/build-scripts/components/k8s-dqlite/build.sh b/build-scripts/components/k8s-dqlite/build.sh
new file mode 100755
index 0000000000..c9672777f9
--- /dev/null
+++ b/build-scripts/components/k8s-dqlite/build.sh
@@ -0,0 +1,9 @@
+#!/bin/bash
+
+INSTALL="${1}/bin"
+mkdir -p "${INSTALL}"
+
+make static -j
+
+cp bin/static/dqlite "${INSTALL}/dqlite"
+cp bin/static/k8s-dqlite "${INSTALL}/k8s-dqlite"
diff --git a/build-scripts/components/k8s-dqlite/repository b/build-scripts/components/k8s-dqlite/repository
new file mode 100644
index 0000000000..87e2f7f5ed
--- /dev/null
+++ b/build-scripts/components/k8s-dqlite/repository
@@ -0,0 +1 @@
+https://github.com/canonical/k8s-dqlite
diff --git a/build-scripts/components/k8s-dqlite/version.sh b/build-scripts/components/k8s-dqlite/version.sh
new file mode 100755
index 0000000000..9c8aa07ff9
--- /dev/null
+++ b/build-scripts/components/k8s-dqlite/version.sh
@@ -0,0 +1,3 @@
+#!/bin/bash
+
+echo "v1.3.0"
diff --git a/build-scripts/components/kubernetes/build.sh b/build-scripts/components/kubernetes/build.sh
new file mode 100755
index 0000000000..542ba9f64f
--- /dev/null
+++ b/build-scripts/components/kubernetes/build.sh
@@ -0,0 +1,17 @@
+#!/bin/bash -x
+
+INSTALL="${1}"
+
+export KUBE_GIT_VERSION_FILE="${PWD}/.version.sh"
+
+for app in kubectl kubelite; do
+ make WHAT="cmd/${app}" KUBE_STATIC_OVERRIDES=kubelite
+ cp _output/bin/"${app}" "${INSTALL}/${app}"
+done
+
+_output/bin/kubectl completion bash \
+ | sed "s/complete -o default -F __start_kubectl kubectl/complete -o default -F __start_kubectl microk8s.kubectl/g" \
+ | sed "s/complete -o default -o nospace -F __start_kubectl kubectl/complete -o default -o nospace -F __start_kubectl microk8s.kubectl/g" \
+ > _output/kubectl.bash
+
+cp _output/kubectl.bash "${INSTALL}/kubectl.bash"
diff --git a/build-scripts/patches/0001-Kubelite-integration.patch b/build-scripts/components/kubernetes/patches/v1.27.0/0000-Kubelite-integration.patch
similarity index 85%
rename from build-scripts/patches/0001-Kubelite-integration.patch
rename to build-scripts/components/kubernetes/patches/v1.27.0/0000-Kubelite-integration.patch
index 707444aabd..e501ba60cc 100644
--- a/build-scripts/patches/0001-Kubelite-integration.patch
+++ b/build-scripts/components/kubernetes/patches/v1.27.0/0000-Kubelite-integration.patch
@@ -1,41 +1,42 @@
-From de618434d4a4168f11bec34c062700b3ad17d2ca Mon Sep 17 00:00:00 2001
+From d0ae18d074db5ff361f363073f32b2f30c7a3686 Mon Sep 17 00:00:00 2001
From: Konstantinos Tsakalozos
Date: Wed, 3 Mar 2021 18:19:37 +0200
Subject: [PATCH] Kubelite integration
---
- cmd/kube-apiserver/app/server.go | 9 ++--
- cmd/kubelet/app/server.go | 11 ++--
+ cmd/kube-apiserver/app/server.go | 9 +++-
+ cmd/kube-scheduler/app/server.go | 6 ++-
+ cmd/kubelet/app/server.go | 13 +++--
cmd/kubelite/app/daemons/daemon.go | 84 +++++++++++++++++++++++++++++
cmd/kubelite/app/options/options.go | 79 +++++++++++++++++++++++++++
cmd/kubelite/app/server.go | 79 +++++++++++++++++++++++++++
cmd/kubelite/kubelite.go | 28 ++++++++++
pkg/volume/csi/csi_plugin.go | 10 ++--
- 7 files changed, 291 insertions(+), 9 deletions(-)
+ 8 files changed, 297 insertions(+), 11 deletions(-)
create mode 100644 cmd/kubelite/app/daemons/daemon.go
create mode 100644 cmd/kubelite/app/options/options.go
create mode 100644 cmd/kubelite/app/server.go
create mode 100644 cmd/kubelite/kubelite.go
diff --git a/cmd/kube-apiserver/app/server.go b/cmd/kube-apiserver/app/server.go
-index 9fb54ec9a64..ade92c7337b 100644
+index fc36d044dbe..cffb7c35a3c 100644
--- a/cmd/kube-apiserver/app/server.go
+++ b/cmd/kube-apiserver/app/server.go
-@@ -106,7 +106,7 @@ func checkNonZeroInsecurePort(fs *pflag.FlagSet) error {
+@@ -89,7 +89,7 @@ func init() {
}
-
+
// NewAPIServerCommand creates a *cobra.Command object with default parameters
-func NewAPIServerCommand() *cobra.Command {
+func NewAPIServerCommand(stopCh... <- chan struct{}) *cobra.Command {
s := options.NewServerRunOptions()
cmd := &cobra.Command{
Use: "kube-apiserver",
-@@ -142,8 +142,11 @@ cluster's shared state through which all other components interact.`,
- if errs := completedOptions.Validate(); len(errs) != 0 {
- return utilerrors.NewAggregate(errs)
+@@ -129,7 +129,12 @@ cluster's shared state through which all other components interact.`,
}
--
+ // add feature enablement metrics
+ utilfeature.DefaultMutableFeatureGate.AddMetrics()
- return Run(completedOptions, genericapiserver.SetupSignalHandler())
++
+ if len(stopCh) != 0 {
+ return Run(completedOptions, stopCh[0])
+ } else {
@@ -44,42 +45,63 @@ index 9fb54ec9a64..ade92c7337b 100644
},
Args: func(cmd *cobra.Command, args []string) error {
for _, arg := range args {
+diff --git a/cmd/kube-scheduler/app/server.go b/cmd/kube-scheduler/app/server.go
+index 8d01f3b7670..44ac7f69328 100644
+--- a/cmd/kube-scheduler/app/server.go
++++ b/cmd/kube-scheduler/app/server.go
+@@ -132,7 +132,11 @@ func runCommand(cmd *cobra.Command, opts *options.Options, registryOptions ...Op
+ ctx, cancel := context.WithCancel(context.Background())
+ defer cancel()
+ go func() {
+- stopCh := server.SetupSignalHandler()
++ c := cmd.Context()
++ if c == nil {
++ c = server.SetupSignalContext()
++ }
++ stopCh := c.Done()
+ <-stopCh
+ cancel()
+ }()
diff --git a/cmd/kubelet/app/server.go b/cmd/kubelet/app/server.go
-index 2dc10a512b3..40d01f347ce 100644
+index 9444f136866..8ca88f64d04 100644
--- a/cmd/kubelet/app/server.go
+++ b/cmd/kubelet/app/server.go
-@@ -110,7 +110,7 @@ const (
+@@ -120,7 +120,7 @@ const (
)
-
+
// NewKubeletCommand creates a *cobra.Command object with default parameters
-func NewKubeletCommand() *cobra.Command {
+func NewKubeletCommand(ctx ...context.Context) *cobra.Command {
cleanFlagSet := pflag.NewFlagSet(componentKubelet, pflag.ContinueOnError)
cleanFlagSet.SetNormalizeFunc(cliflag.WordSepNormalizeFunc)
kubeletFlags := options.NewKubeletFlags()
-@@ -277,7 +277,12 @@ HTTP server: The kubelet can also listen for HTTP and respond to a simple API
+@@ -250,6 +250,12 @@ HTTP server: The kubelet can also listen for HTTP and respond to a simple API
+ if err := checkPermissions(); err != nil {
klog.ErrorS(err, "kubelet running with insufficient permissions")
}
- // set up signal context here in order to be reused by kubelet and docker shim
-- ctx := genericapiserver.SetupSignalContext()
+ runctx := context.Background()
+ if len(ctx) == 0 {
+ runctx = genericapiserver.SetupSignalContext()
+ } else {
+ runctx = ctx[0]
+ }
-
+
// make the kubelet's config safe for logging
config := kubeletServer.KubeletConfiguration.DeepCopy()
-@@ -288,7 +293,7 @@ HTTP server: The kubelet can also listen for HTTP and respond to a simple API
- klog.V(5).InfoS("KubeletConfiguration", "configuration", kubeletServer.KubeletConfiguration)
-
+@@ -259,12 +265,9 @@ HTTP server: The kubelet can also listen for HTTP and respond to a simple API
+ // log the kubelet's config for inspection
+ klog.V(5).InfoS("KubeletConfiguration", "configuration", config)
+
+- // set up signal context for kubelet shutdown
+- ctx := genericapiserver.SetupSignalContext()
+-
+ utilfeature.DefaultMutableFeatureGate.AddMetrics()
// run the kubelet
-- if err := Run(ctx, kubeletServer, kubeletDeps, utilfeature.DefaultFeatureGate); err != nil {
-+ if err := Run(runctx, kubeletServer, kubeletDeps, utilfeature.DefaultFeatureGate); err != nil {
- klog.ErrorS(err, "Failed to run kubelet")
- os.Exit(1)
- }
+- return Run(ctx, kubeletServer, kubeletDeps, utilfeature.DefaultFeatureGate)
++ return Run(runctx, kubeletServer, kubeletDeps, utilfeature.DefaultFeatureGate)
+ },
+ }
+
diff --git a/cmd/kubelite/app/daemons/daemon.go b/cmd/kubelite/app/daemons/daemon.go
new file mode 100644
index 00000000000..dbef03cf07e
@@ -376,37 +398,34 @@ index 00000000000..667b24f68e6
+ println("Stopping kubelite")
+}
diff --git a/pkg/volume/csi/csi_plugin.go b/pkg/volume/csi/csi_plugin.go
-index 85c1c1f3db1..b90c82225f5 100644
+index ce7a543c94f..a8094f878d6 100644
--- a/pkg/volume/csi/csi_plugin.go
+++ b/pkg/volume/csi/csi_plugin.go
-@@ -237,20 +237,24 @@ func (p *csiPlugin) Init(host volume.VolumeHost) error {
+@@ -240,18 +240,22 @@ func (p *csiPlugin) Init(host volume.VolumeHost) error {
}
-
+
// Initializing the label management channels
- nim = nodeinfomanager.NewNodeInfoManager(host.GetNodeName(), host, migratedPlugins)
+ localNim := nodeinfomanager.NewNodeInfoManager(host.GetNodeName(), host, migratedPlugins)
-
- if utilfeature.DefaultFeatureGate.Enabled(features.CSIMigration) {
- // This function prevents Kubelet from posting Ready status until CSINode
- // is both installed and initialized
-- if err := initializeCSINode(host); err != nil {
-+ if err := initializeCSINode(host, localNim); err != nil {
- return errors.New(log("failed to initialize CSINode: %v", err))
- }
+
+ // This function prevents Kubelet from posting Ready status until CSINode
+ // is both installed and initialized
+- if err := initializeCSINode(host); err != nil {
++ if err := initializeCSINode(host, localNim); err != nil {
+ return errors.New(log("failed to initialize CSINode: %v", err))
}
-
+
+ if _, ok := host.(volume.KubeletVolumeHost); ok {
+ nim = localNim
+ }
+
return nil
}
-
+
-func initializeCSINode(host volume.VolumeHost) error {
+func initializeCSINode(host volume.VolumeHost, nim nodeinfomanager.Interface) error {
kvh, ok := host.(volume.KubeletVolumeHost)
if !ok {
klog.V(4).Info("Cast from VolumeHost to KubeletVolumeHost failed. Skipping CSINode initialization, not running on kubelet")
---
-2.25.1
-
+--
+2.34.1
diff --git a/build-scripts/components/kubernetes/patches/v1.27.0/0001-Unix-socket-skip-validation-in-component-status.patch b/build-scripts/components/kubernetes/patches/v1.27.0/0001-Unix-socket-skip-validation-in-component-status.patch
new file mode 100644
index 0000000000..f4fc353dda
--- /dev/null
+++ b/build-scripts/components/kubernetes/patches/v1.27.0/0001-Unix-socket-skip-validation-in-component-status.patch
@@ -0,0 +1,28 @@
+From dd1db952eab13912a55207c81a2ac267909677ac Mon Sep 17 00:00:00 2001
+From: Konstantinos Tsakalozos
+Date: Tue, 24 Aug 2021 11:17:19 +0300
+Subject: [PATCH] Unix socket skip validation in component status
+
+---
+ pkg/registry/core/rest/storage_core.go | 6 ++++++
+ 1 file changed, 6 insertions(+)
+
+diff --git a/pkg/registry/core/rest/storage_core.go b/pkg/registry/core/rest/storage_core.go
+index 1f915c32d4b..0bb7f1a9bf9 100644
+--- a/pkg/registry/core/rest/storage_core.go
++++ b/pkg/registry/core/rest/storage_core.go
+@@ -350,6 +350,12 @@ func (s componentStatusStorage) serversToValidate() map[string]*componentstatus.
+ klog.Errorf("Failed to parse etcd url for validation: %v", err)
+ continue
+ }
++
++ if etcdUrl.Scheme == "unix" {
++ klog.Infof("Socket etcd endpoint detected. Will not validate")
++ continue
++ }
++
+ var port int
+ var addr string
+ if strings.Contains(etcdUrl.Host, ":") {
+--
+2.25.1
diff --git a/build-scripts/pre-patches/0001-Kubelite-integration.patch b/build-scripts/components/kubernetes/patches/v1.27.4/0000-Kubelite-integration.patch
similarity index 85%
rename from build-scripts/pre-patches/0001-Kubelite-integration.patch
rename to build-scripts/components/kubernetes/patches/v1.27.4/0000-Kubelite-integration.patch
index 707444aabd..2663d7381b 100644
--- a/build-scripts/pre-patches/0001-Kubelite-integration.patch
+++ b/build-scripts/components/kubernetes/patches/v1.27.4/0000-Kubelite-integration.patch
@@ -1,41 +1,42 @@
-From de618434d4a4168f11bec34c062700b3ad17d2ca Mon Sep 17 00:00:00 2001
+From 3162aa9df25819b60a3c0a3b044394639d55280c Mon Sep 17 00:00:00 2001
From: Konstantinos Tsakalozos
Date: Wed, 3 Mar 2021 18:19:37 +0200
Subject: [PATCH] Kubelite integration
---
- cmd/kube-apiserver/app/server.go | 9 ++--
- cmd/kubelet/app/server.go | 11 ++--
+ cmd/kube-apiserver/app/server.go | 9 +++-
+ cmd/kube-scheduler/app/server.go | 6 ++-
+ cmd/kubelet/app/server.go | 13 +++--
cmd/kubelite/app/daemons/daemon.go | 84 +++++++++++++++++++++++++++++
cmd/kubelite/app/options/options.go | 79 +++++++++++++++++++++++++++
cmd/kubelite/app/server.go | 79 +++++++++++++++++++++++++++
cmd/kubelite/kubelite.go | 28 ++++++++++
pkg/volume/csi/csi_plugin.go | 10 ++--
- 7 files changed, 291 insertions(+), 9 deletions(-)
+ 8 files changed, 297 insertions(+), 11 deletions(-)
create mode 100644 cmd/kubelite/app/daemons/daemon.go
create mode 100644 cmd/kubelite/app/options/options.go
create mode 100644 cmd/kubelite/app/server.go
create mode 100644 cmd/kubelite/kubelite.go
diff --git a/cmd/kube-apiserver/app/server.go b/cmd/kube-apiserver/app/server.go
-index 9fb54ec9a64..ade92c7337b 100644
+index b021bac2574..8d5a1bad8ed 100644
--- a/cmd/kube-apiserver/app/server.go
+++ b/cmd/kube-apiserver/app/server.go
-@@ -106,7 +106,7 @@ func checkNonZeroInsecurePort(fs *pflag.FlagSet) error {
+@@ -76,7 +76,7 @@ func init() {
}
-
+
// NewAPIServerCommand creates a *cobra.Command object with default parameters
-func NewAPIServerCommand() *cobra.Command {
+func NewAPIServerCommand(stopCh... <- chan struct{}) *cobra.Command {
s := options.NewServerRunOptions()
cmd := &cobra.Command{
Use: "kube-apiserver",
-@@ -142,8 +142,11 @@ cluster's shared state through which all other components interact.`,
- if errs := completedOptions.Validate(); len(errs) != 0 {
- return utilerrors.NewAggregate(errs)
+@@ -116,7 +116,12 @@ cluster's shared state through which all other components interact.`,
}
--
+ // add feature enablement metrics
+ utilfeature.DefaultMutableFeatureGate.AddMetrics()
- return Run(completedOptions, genericapiserver.SetupSignalHandler())
++
+ if len(stopCh) != 0 {
+ return Run(completedOptions, stopCh[0])
+ } else {
@@ -44,42 +45,63 @@ index 9fb54ec9a64..ade92c7337b 100644
},
Args: func(cmd *cobra.Command, args []string) error {
for _, arg := range args {
+diff --git a/cmd/kube-scheduler/app/server.go b/cmd/kube-scheduler/app/server.go
+index c48b09a420d..b7f273b02ac 100644
+--- a/cmd/kube-scheduler/app/server.go
++++ b/cmd/kube-scheduler/app/server.go
+@@ -132,7 +132,11 @@ func runCommand(cmd *cobra.Command, opts *options.Options, registryOptions ...Op
+ ctx, cancel := context.WithCancel(context.Background())
+ defer cancel()
+ go func() {
+- stopCh := server.SetupSignalHandler()
++ c := cmd.Context()
++ if c == nil {
++ c = server.SetupSignalContext()
++ }
++ stopCh := c.Done()
+ <-stopCh
+ cancel()
+ }()
diff --git a/cmd/kubelet/app/server.go b/cmd/kubelet/app/server.go
-index 2dc10a512b3..40d01f347ce 100644
+index 742524e325f..561cbf68868 100644
--- a/cmd/kubelet/app/server.go
+++ b/cmd/kubelet/app/server.go
-@@ -110,7 +110,7 @@ const (
+@@ -122,7 +122,7 @@ const (
)
-
+
// NewKubeletCommand creates a *cobra.Command object with default parameters
-func NewKubeletCommand() *cobra.Command {
+func NewKubeletCommand(ctx ...context.Context) *cobra.Command {
cleanFlagSet := pflag.NewFlagSet(componentKubelet, pflag.ContinueOnError)
cleanFlagSet.SetNormalizeFunc(cliflag.WordSepNormalizeFunc)
kubeletFlags := options.NewKubeletFlags()
-@@ -277,7 +277,12 @@ HTTP server: The kubelet can also listen for HTTP and respond to a simple API
+@@ -249,6 +249,12 @@ is checked every 20 seconds (also configurable with a flag).`,
+ if err := checkPermissions(); err != nil {
klog.ErrorS(err, "kubelet running with insufficient permissions")
}
- // set up signal context here in order to be reused by kubelet and docker shim
-- ctx := genericapiserver.SetupSignalContext()
+ runctx := context.Background()
+ if len(ctx) == 0 {
+ runctx = genericapiserver.SetupSignalContext()
+ } else {
+ runctx = ctx[0]
+ }
-
+
// make the kubelet's config safe for logging
config := kubeletServer.KubeletConfiguration.DeepCopy()
-@@ -288,7 +293,7 @@ HTTP server: The kubelet can also listen for HTTP and respond to a simple API
- klog.V(5).InfoS("KubeletConfiguration", "configuration", kubeletServer.KubeletConfiguration)
-
+@@ -258,12 +264,9 @@ is checked every 20 seconds (also configurable with a flag).`,
+ // log the kubelet's config for inspection
+ klog.V(5).InfoS("KubeletConfiguration", "configuration", klog.Format(config))
+
+- // set up signal context for kubelet shutdown
+- ctx := genericapiserver.SetupSignalContext()
+-
+ utilfeature.DefaultMutableFeatureGate.AddMetrics()
// run the kubelet
-- if err := Run(ctx, kubeletServer, kubeletDeps, utilfeature.DefaultFeatureGate); err != nil {
-+ if err := Run(runctx, kubeletServer, kubeletDeps, utilfeature.DefaultFeatureGate); err != nil {
- klog.ErrorS(err, "Failed to run kubelet")
- os.Exit(1)
- }
+- return Run(ctx, kubeletServer, kubeletDeps, utilfeature.DefaultFeatureGate)
++ return Run(runctx, kubeletServer, kubeletDeps, utilfeature.DefaultFeatureGate)
+ },
+ }
+
diff --git a/cmd/kubelite/app/daemons/daemon.go b/cmd/kubelite/app/daemons/daemon.go
new file mode 100644
index 00000000000..dbef03cf07e
@@ -376,37 +398,34 @@ index 00000000000..667b24f68e6
+ println("Stopping kubelite")
+}
diff --git a/pkg/volume/csi/csi_plugin.go b/pkg/volume/csi/csi_plugin.go
-index 85c1c1f3db1..b90c82225f5 100644
+index 2556517276e..0b5ef45d083 100644
--- a/pkg/volume/csi/csi_plugin.go
+++ b/pkg/volume/csi/csi_plugin.go
-@@ -237,20 +237,24 @@ func (p *csiPlugin) Init(host volume.VolumeHost) error {
+@@ -243,18 +243,22 @@ func (p *csiPlugin) Init(host volume.VolumeHost) error {
}
-
+
// Initializing the label management channels
- nim = nodeinfomanager.NewNodeInfoManager(host.GetNodeName(), host, migratedPlugins)
+ localNim := nodeinfomanager.NewNodeInfoManager(host.GetNodeName(), host, migratedPlugins)
-
- if utilfeature.DefaultFeatureGate.Enabled(features.CSIMigration) {
- // This function prevents Kubelet from posting Ready status until CSINode
- // is both installed and initialized
-- if err := initializeCSINode(host); err != nil {
-+ if err := initializeCSINode(host, localNim); err != nil {
- return errors.New(log("failed to initialize CSINode: %v", err))
- }
+
+ // This function prevents Kubelet from posting Ready status until CSINode
+ // is both installed and initialized
+- if err := initializeCSINode(host); err != nil {
++ if err := initializeCSINode(host, localNim); err != nil {
+ return errors.New(log("failed to initialize CSINode: %v", err))
}
-
+
+ if _, ok := host.(volume.KubeletVolumeHost); ok {
+ nim = localNim
+ }
+
return nil
}
-
+
-func initializeCSINode(host volume.VolumeHost) error {
+func initializeCSINode(host volume.VolumeHost, nim nodeinfomanager.Interface) error {
kvh, ok := host.(volume.KubeletVolumeHost)
if !ok {
klog.V(4).Info("Cast from VolumeHost to KubeletVolumeHost failed. Skipping CSINode initialization, not running on kubelet")
---
-2.25.1
-
+--
+2.34.1
diff --git a/build-scripts/components/kubernetes/patches/v1.28.0/0000-Kubelite-integration.patch b/build-scripts/components/kubernetes/patches/v1.28.0/0000-Kubelite-integration.patch
new file mode 100644
index 0000000000..2663d7381b
--- /dev/null
+++ b/build-scripts/components/kubernetes/patches/v1.28.0/0000-Kubelite-integration.patch
@@ -0,0 +1,431 @@
+From 3162aa9df25819b60a3c0a3b044394639d55280c Mon Sep 17 00:00:00 2001
+From: Konstantinos Tsakalozos
+Date: Wed, 3 Mar 2021 18:19:37 +0200
+Subject: [PATCH] Kubelite integration
+
+---
+ cmd/kube-apiserver/app/server.go | 9 +++-
+ cmd/kube-scheduler/app/server.go | 6 ++-
+ cmd/kubelet/app/server.go | 13 +++--
+ cmd/kubelite/app/daemons/daemon.go | 84 +++++++++++++++++++++++++++++
+ cmd/kubelite/app/options/options.go | 79 +++++++++++++++++++++++++++
+ cmd/kubelite/app/server.go | 79 +++++++++++++++++++++++++++
+ cmd/kubelite/kubelite.go | 28 ++++++++++
+ pkg/volume/csi/csi_plugin.go | 10 ++--
+ 8 files changed, 297 insertions(+), 11 deletions(-)
+ create mode 100644 cmd/kubelite/app/daemons/daemon.go
+ create mode 100644 cmd/kubelite/app/options/options.go
+ create mode 100644 cmd/kubelite/app/server.go
+ create mode 100644 cmd/kubelite/kubelite.go
+
+diff --git a/cmd/kube-apiserver/app/server.go b/cmd/kube-apiserver/app/server.go
+index b021bac2574..8d5a1bad8ed 100644
+--- a/cmd/kube-apiserver/app/server.go
++++ b/cmd/kube-apiserver/app/server.go
+@@ -76,7 +76,7 @@ func init() {
+ }
+
+ // NewAPIServerCommand creates a *cobra.Command object with default parameters
+-func NewAPIServerCommand() *cobra.Command {
++func NewAPIServerCommand(stopCh... <- chan struct{}) *cobra.Command {
+ s := options.NewServerRunOptions()
+ cmd := &cobra.Command{
+ Use: "kube-apiserver",
+@@ -116,7 +116,12 @@ cluster's shared state through which all other components interact.`,
+ }
+ // add feature enablement metrics
+ utilfeature.DefaultMutableFeatureGate.AddMetrics()
+- return Run(completedOptions, genericapiserver.SetupSignalHandler())
++
++ if len(stopCh) != 0 {
++ return Run(completedOptions, stopCh[0])
++ } else {
++ return Run(completedOptions, genericapiserver.SetupSignalHandler())
++ }
+ },
+ Args: func(cmd *cobra.Command, args []string) error {
+ for _, arg := range args {
+diff --git a/cmd/kube-scheduler/app/server.go b/cmd/kube-scheduler/app/server.go
+index c48b09a420d..b7f273b02ac 100644
+--- a/cmd/kube-scheduler/app/server.go
++++ b/cmd/kube-scheduler/app/server.go
+@@ -132,7 +132,11 @@ func runCommand(cmd *cobra.Command, opts *options.Options, registryOptions ...Op
+ ctx, cancel := context.WithCancel(context.Background())
+ defer cancel()
+ go func() {
+- stopCh := server.SetupSignalHandler()
++ c := cmd.Context()
++ if c == nil {
++ c = server.SetupSignalContext()
++ }
++ stopCh := c.Done()
+ <-stopCh
+ cancel()
+ }()
+diff --git a/cmd/kubelet/app/server.go b/cmd/kubelet/app/server.go
+index 742524e325f..561cbf68868 100644
+--- a/cmd/kubelet/app/server.go
++++ b/cmd/kubelet/app/server.go
+@@ -122,7 +122,7 @@ const (
+ )
+
+ // NewKubeletCommand creates a *cobra.Command object with default parameters
+-func NewKubeletCommand() *cobra.Command {
++func NewKubeletCommand(ctx ...context.Context) *cobra.Command {
+ cleanFlagSet := pflag.NewFlagSet(componentKubelet, pflag.ContinueOnError)
+ cleanFlagSet.SetNormalizeFunc(cliflag.WordSepNormalizeFunc)
+ kubeletFlags := options.NewKubeletFlags()
+@@ -249,6 +249,12 @@ is checked every 20 seconds (also configurable with a flag).`,
+ if err := checkPermissions(); err != nil {
+ klog.ErrorS(err, "kubelet running with insufficient permissions")
+ }
++ runctx := context.Background()
++ if len(ctx) == 0 {
++ runctx = genericapiserver.SetupSignalContext()
++ } else {
++ runctx = ctx[0]
++ }
+
+ // make the kubelet's config safe for logging
+ config := kubeletServer.KubeletConfiguration.DeepCopy()
+@@ -258,12 +264,9 @@ is checked every 20 seconds (also configurable with a flag).`,
+ // log the kubelet's config for inspection
+ klog.V(5).InfoS("KubeletConfiguration", "configuration", klog.Format(config))
+
+- // set up signal context for kubelet shutdown
+- ctx := genericapiserver.SetupSignalContext()
+-
+ utilfeature.DefaultMutableFeatureGate.AddMetrics()
+ // run the kubelet
+- return Run(ctx, kubeletServer, kubeletDeps, utilfeature.DefaultFeatureGate)
++ return Run(runctx, kubeletServer, kubeletDeps, utilfeature.DefaultFeatureGate)
+ },
+ }
+
+diff --git a/cmd/kubelite/app/daemons/daemon.go b/cmd/kubelite/app/daemons/daemon.go
+new file mode 100644
+index 00000000000..dbef03cf07e
+--- /dev/null
++++ b/cmd/kubelite/app/daemons/daemon.go
+@@ -0,0 +1,84 @@
++package daemon
++
++import (
++ "context"
++ "k8s.io/client-go/kubernetes"
++ "k8s.io/client-go/tools/clientcmd"
++ "k8s.io/klog/v2"
++ genericcontrollermanager "k8s.io/controller-manager/app"
++ apiserver "k8s.io/kubernetes/cmd/kube-apiserver/app"
++ controller "k8s.io/kubernetes/cmd/kube-controller-manager/app"
++ proxy "k8s.io/kubernetes/cmd/kube-proxy/app"
++ scheduler "k8s.io/kubernetes/cmd/kube-scheduler/app"
++ kubelet "k8s.io/kubernetes/cmd/kubelet/app"
++
++ "time"
++)
++
++func StartControllerManager(args []string, ctx context.Context) {
++ command := controller.NewControllerManagerCommand()
++ command.SetArgs(args)
++
++ klog.Info("Starting Controller Manager")
++ if err := command.ExecuteContext(ctx); err != nil {
++ klog.Fatalf("Controller Manager exited %v", err)
++ }
++ klog.Info("Stopping Controller Manager")
++}
++
++func StartScheduler(args []string, ctx context.Context) {
++ command := scheduler.NewSchedulerCommand()
++ command.SetArgs(args)
++
++ klog.Info("Starting Scheduler")
++ if err := command.ExecuteContext(ctx); err != nil {
++ klog.Fatalf("Scheduler exited %v", err)
++ }
++ klog.Info("Stopping Scheduler")
++}
++
++func StartProxy(args []string) {
++ command := proxy.NewProxyCommand()
++ command.SetArgs(args)
++
++ klog.Info("Starting Proxy")
++ if err := command.Execute(); err != nil {
++ klog.Fatalf("Proxy exited %v", err)
++ }
++ klog.Info("Stopping Proxy")
++}
++
++func StartKubelet(args []string, ctx context.Context) {
++ command := kubelet.NewKubeletCommand(ctx)
++ command.SetArgs(args)
++
++ klog.Info("Starting Kubelet")
++ if err := command.Execute(); err != nil {
++ klog.Fatalf("Kubelet exited %v", err)
++ }
++ klog.Info("Stopping Kubelet")
++}
++
++func StartAPIServer(args []string, ctx <-chan struct{}) {
++ command := apiserver.NewAPIServerCommand(ctx)
++ command.SetArgs(args)
++ klog.Info("Starting API Server")
++ if err := command.Execute(); err != nil {
++ klog.Fatalf("API Server exited %v", err)
++ }
++ klog.Info("Stopping API Server")
++}
++
++func WaitForAPIServer(kubeconfigpath string, timeout time.Duration) {
++ klog.Info("Waiting for the API server")
++ config, err := clientcmd.BuildConfigFromFlags("", kubeconfigpath)
++ if err != nil {
++ klog.Fatalf("could not find the cluster's kubeconfig file %v", err)
++ }
++ // create the client
++ client, err := kubernetes.NewForConfig(config)
++ if err != nil {
++ klog.Fatalf("could not create client to the cluster %v", err)
++ }
++ genericcontrollermanager.WaitForAPIServer(client, timeout)
++}
+\ No newline at end of file
+diff --git a/cmd/kubelite/app/options/options.go b/cmd/kubelite/app/options/options.go
+new file mode 100644
+index 00000000000..80f1d8b09fc
+--- /dev/null
++++ b/cmd/kubelite/app/options/options.go
+@@ -0,0 +1,79 @@
++/*
++Copyright 2018 The Kubernetes Authors.
++
++Licensed under the Apache License, Version 2.0 (the "License");
++you may not use this file except in compliance with the License.
++You may obtain a copy of the License at
++
++ http://www.apache.org/licenses/LICENSE-2.0
++
++Unless required by applicable law or agreed to in writing, software
++distributed under the License is distributed on an "AS IS" BASIS,
++WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++See the License for the specific language governing permissions and
++limitations under the License.
++*/
++
++package options
++
++import (
++ "bufio"
++ "k8s.io/klog/v2"
++ "os"
++ "strings"
++)
++
++// Options has all the params needed to run a Kubelite
++type Options struct {
++ SchedulerArgsFile string
++ ControllerManagerArgsFile string
++ ProxyArgsFile string
++ KubeletArgsFile string
++ APIServerArgsFile string
++ KubeconfigFile string
++ StartControlPlane bool
++}
++
++func NewOptions() (*Options){
++ o := Options{
++ "/var/snap/microk8s/current/args/kube-scheduler",
++ "/var/snap/microk8s/current/args/kube-controller-manager",
++ "/var/snap/microk8s/current/args/kube-proxy",
++ "/var/snap/microk8s/current/args/kubelet",
++ "/var/snap/microk8s/current/args/kube-apiserver",
++ "/var/snap/microk8s/current/credentials/client.config",
++ true,
++ }
++ return &o
++}
++
++func ReadArgsFromFile(filename string) []string {
++ var args []string
++ file, err := os.Open(filename)
++ if err != nil {
++ klog.Fatalf("Failed to open arguments file %v", err)
++ }
++ defer file.Close()
++
++ scanner := bufio.NewScanner(file)
++ for scanner.Scan() {
++ line := scanner.Text()
++ line = strings.TrimSpace(line)
++ // ignore lines with # and empty lines
++ if len(line) <= 0 || strings.HasPrefix(line, "#") {
++ continue
++ }
++ // remove " and '
++ for _, r := range "\"'" {
++ line = strings.ReplaceAll(line, string(r), "")
++ }
++ for _, part := range strings.Split(line, " ") {
++
++ args = append(args, os.ExpandEnv(part))
++ }
++ }
++ if err := scanner.Err(); err != nil {
++ klog.Fatalf("Failed to read arguments file %v", err)
++ }
++ return args
++}
+diff --git a/cmd/kubelite/app/server.go b/cmd/kubelite/app/server.go
+new file mode 100644
+index 00000000000..e7452a09e3e
+--- /dev/null
++++ b/cmd/kubelite/app/server.go
+@@ -0,0 +1,79 @@
++/*
++Copyright © 2020 NAME HERE
++
++Licensed under the Apache License, Version 2.0 (the "License");
++you may not use this file except in compliance with the License.
++You may obtain a copy of the License at
++
++ http://www.apache.org/licenses/LICENSE-2.0
++
++Unless required by applicable law or agreed to in writing, software
++distributed under the License is distributed on an "AS IS" BASIS,
++WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++See the License for the specific language governing permissions and
++limitations under the License.
++*/
++package app
++
++import (
++ "fmt"
++ "github.com/spf13/cobra"
++ genericapiserver "k8s.io/apiserver/pkg/server"
++ daemon "k8s.io/kubernetes/cmd/kubelite/app/daemons"
++ "k8s.io/kubernetes/cmd/kubelite/app/options"
++ "os"
++ "time"
++)
++
++var opts = options.NewOptions()
++
++// liteCmd represents the base command when called without any subcommands
++var liteCmd = &cobra.Command{
++ Use: "kubelite",
++ Short: "Single server kubernetes",
++ Long: `A single server that spawns all other kubernetes servers as threads`,
++ // Uncomment the following line if your bare application
++ // has an action associated with it:
++ Run: func(cmd *cobra.Command, args []string) {
++ ctx := genericapiserver.SetupSignalContext()
++
++ if opts.StartControlPlane {
++ apiserverArgs := options.ReadArgsFromFile(opts.APIServerArgsFile)
++ go daemon.StartAPIServer(apiserverArgs, ctx.Done())
++ daemon.WaitForAPIServer(opts.KubeconfigFile, 360 * time.Second)
++
++ controllerArgs := options.ReadArgsFromFile(opts.ControllerManagerArgsFile)
++ go daemon.StartControllerManager(controllerArgs, ctx)
++
++ schedulerArgs := options.ReadArgsFromFile(opts.SchedulerArgsFile)
++ go daemon.StartScheduler(schedulerArgs, ctx)
++ }
++
++ proxyArgs := options.ReadArgsFromFile(opts.ProxyArgsFile)
++ go daemon.StartProxy(proxyArgs)
++
++ kubeletArgs := options.ReadArgsFromFile(opts.KubeletArgsFile)
++ daemon.StartKubelet(kubeletArgs, ctx)
++ },
++}
++
++// Execute adds all child commands to the root command and sets flags appropriately.
++// This is called by main.main(). It only needs to happen once to the liteCmd.
++func Execute() {
++ if err := liteCmd.Execute(); err != nil {
++ fmt.Println(err)
++ os.Exit(1)
++ }
++}
++
++func init() {
++ cobra.OnInitialize()
++
++ liteCmd.Flags().StringVar(&opts.SchedulerArgsFile, "scheduler-args-file", opts.SchedulerArgsFile, "file with the arguments for the scheduler")
++ liteCmd.Flags().StringVar(&opts.ControllerManagerArgsFile, "controller-manager-args-file", opts.ControllerManagerArgsFile, "file with the arguments for the controller manager")
++ liteCmd.Flags().StringVar(&opts.ProxyArgsFile, "proxy-args-file", opts.ProxyArgsFile , "file with the arguments for kube-proxy")
++ liteCmd.Flags().StringVar(&opts.KubeletArgsFile, "kubelet-args-file", opts.KubeletArgsFile, "file with the arguments for kubelet")
++ liteCmd.Flags().StringVar(&opts.APIServerArgsFile, "apiserver-args-file", opts.APIServerArgsFile, "file with the arguments for the API server")
++ liteCmd.Flags().StringVar(&opts.KubeconfigFile , "kubeconfig-file", opts.KubeconfigFile, "the kubeconfig file to use to healthcheck the API server")
++ liteCmd.Flags().BoolVar(&opts.StartControlPlane, "start-control-plane", opts.StartControlPlane, "start the control plane (API server, scheduler and controller manager)")
++}
+diff --git a/cmd/kubelite/kubelite.go b/cmd/kubelite/kubelite.go
+new file mode 100644
+index 00000000000..667b24f68e6
+--- /dev/null
++++ b/cmd/kubelite/kubelite.go
+@@ -0,0 +1,28 @@
++package main
++
++import (
++ "github.com/spf13/pflag"
++ cliflag "k8s.io/component-base/cli/flag"
++ "math/rand"
++ "time"
++
++ "k8s.io/component-base/logs"
++ _ "k8s.io/component-base/metrics/prometheus/clientgo" // load all the prometheus client-go plugin
++ _ "k8s.io/component-base/metrics/prometheus/version" // for version metric registration
++ "k8s.io/kubernetes/cmd/kubelite/app"
++)
++
++func main() {
++ println("Starting kubelite")
++ rand.Seed(time.Now().UnixNano())
++ // TODO: once we switch everything over to Cobra commands, we can go back to calling
++ // utilflag.InitFlags() (by removing its pflag.Parse() call). For now, we have to set the
++ // normalize func and add the go flag set by hand.
++ pflag.CommandLine.SetNormalizeFunc(cliflag.WordSepNormalizeFunc)
++ // utilflag.InitFlags()
++ logs.InitLogs()
++ defer logs.FlushLogs()
++
++ app.Execute()
++ println("Stopping kubelite")
++}
+diff --git a/pkg/volume/csi/csi_plugin.go b/pkg/volume/csi/csi_plugin.go
+index 2556517276e..0b5ef45d083 100644
+--- a/pkg/volume/csi/csi_plugin.go
++++ b/pkg/volume/csi/csi_plugin.go
+@@ -243,18 +243,22 @@ func (p *csiPlugin) Init(host volume.VolumeHost) error {
+ }
+
+ // Initializing the label management channels
+- nim = nodeinfomanager.NewNodeInfoManager(host.GetNodeName(), host, migratedPlugins)
++ localNim := nodeinfomanager.NewNodeInfoManager(host.GetNodeName(), host, migratedPlugins)
+
+ // This function prevents Kubelet from posting Ready status until CSINode
+ // is both installed and initialized
+- if err := initializeCSINode(host); err != nil {
++ if err := initializeCSINode(host, localNim); err != nil {
+ return errors.New(log("failed to initialize CSINode: %v", err))
+ }
+
++ if _, ok := host.(volume.KubeletVolumeHost); ok {
++ nim = localNim
++ }
++
+ return nil
+ }
+
+-func initializeCSINode(host volume.VolumeHost) error {
++func initializeCSINode(host volume.VolumeHost, nim nodeinfomanager.Interface) error {
+ kvh, ok := host.(volume.KubeletVolumeHost)
+ if !ok {
+ klog.V(4).Info("Cast from VolumeHost to KubeletVolumeHost failed. Skipping CSINode initialization, not running on kubelet")
+--
+2.34.1
diff --git a/build-scripts/components/kubernetes/patches/v1.28.0/0001-Set-log-reapply-handling-to-ignore-unchanged.patch b/build-scripts/components/kubernetes/patches/v1.28.0/0001-Set-log-reapply-handling-to-ignore-unchanged.patch
new file mode 100644
index 0000000000..31fbc29fd9
--- /dev/null
+++ b/build-scripts/components/kubernetes/patches/v1.28.0/0001-Set-log-reapply-handling-to-ignore-unchanged.patch
@@ -0,0 +1,24 @@
+From 55f4864d816c8e7ca0ebb39571dc88dbdf05eff2 Mon Sep 17 00:00:00 2001
+From: Angelos Kolaitis
+Date: Thu, 27 Jul 2023 18:08:00 +0300
+Subject: [PATCH] Set log reapply handling to ignore unchanged
+
+---
+ staging/src/k8s.io/component-base/logs/api/v1/options.go | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/staging/src/k8s.io/component-base/logs/api/v1/options.go b/staging/src/k8s.io/component-base/logs/api/v1/options.go
+index 2db9b1f5382..e0824dcdc4e 100644
+--- a/staging/src/k8s.io/component-base/logs/api/v1/options.go
++++ b/staging/src/k8s.io/component-base/logs/api/v1/options.go
+@@ -64,7 +64,7 @@ func NewLoggingConfiguration() *LoggingConfiguration {
+ // are no goroutines which might call logging functions. The default for ValidateAndApply
+ // and ValidateAndApplyWithOptions is to return an error when called more than once.
+ // Binaries and unit tests can override that behavior.
+-var ReapplyHandling = ReapplyHandlingError
++var ReapplyHandling = ReapplyHandlingIgnoreUnchanged
+
+ type ReapplyHandlingType int
+
+--
+2.34.1
diff --git a/build-scripts/components/kubernetes/patches/v1.31.0/0000-Kubelite-integration.patch b/build-scripts/components/kubernetes/patches/v1.31.0/0000-Kubelite-integration.patch
new file mode 100644
index 0000000000..ab0e155352
--- /dev/null
+++ b/build-scripts/components/kubernetes/patches/v1.31.0/0000-Kubelite-integration.patch
@@ -0,0 +1,430 @@
+From d261b947963b9e808725a21e3d55e52c20826e22 Mon Sep 17 00:00:00 2001
+From: Konstantinos Tsakalozos
+Date: Wed, 3 Mar 2021 18:19:37 +0200
+Subject: [PATCH] Kubelite integration
+
+---
+ cmd/kube-apiserver/app/server.go | 9 ++-
+ cmd/kube-scheduler/app/server.go | 6 +-
+ cmd/kubelet/app/server.go | 13 +++--
+ cmd/kubelite/app/daemons/daemon.go | 85 +++++++++++++++++++++++++++++
+ cmd/kubelite/app/options/options.go | 79 +++++++++++++++++++++++++++
+ cmd/kubelite/app/server.go | 80 +++++++++++++++++++++++++++
+ cmd/kubelite/kubelite.go | 25 +++++++++
+ pkg/volume/csi/csi_plugin.go | 10 +++-
+ 8 files changed, 296 insertions(+), 11 deletions(-)
+ create mode 100644 cmd/kubelite/app/daemons/daemon.go
+ create mode 100644 cmd/kubelite/app/options/options.go
+ create mode 100644 cmd/kubelite/app/server.go
+ create mode 100644 cmd/kubelite/kubelite.go
+
+diff --git a/cmd/kube-apiserver/app/server.go b/cmd/kube-apiserver/app/server.go
+index fad0f1e9579..5e291005671 100644
+--- a/cmd/kube-apiserver/app/server.go
++++ b/cmd/kube-apiserver/app/server.go
+@@ -63,7 +63,7 @@ func init() {
+ }
+
+ // NewAPIServerCommand creates a *cobra.Command object with default parameters
+-func NewAPIServerCommand() *cobra.Command {
++func NewAPIServerCommand(ctx ...context.Context) *cobra.Command {
+ _, featureGate := utilversion.DefaultComponentGlobalsRegistry.ComponentGlobalsOrRegister(
+ utilversion.DefaultKubeComponent, utilversion.DefaultBuildEffectiveVersion(), utilfeature.DefaultMutableFeatureGate)
+ s := options.NewServerRunOptions()
+@@ -119,7 +119,12 @@ cluster's shared state through which all other components interact.`,
+ return nil
+ },
+ }
+- cmd.SetContext(genericapiserver.SetupSignalContext())
++
++ if len(ctx) == 0 {
++ cmd.SetContext(genericapiserver.SetupSignalContext())
++ } else {
++ cmd.SetContext(ctx[0])
++ }
+
+ fs := cmd.Flags()
+ namedFlagSets := s.Flags()
+diff --git a/cmd/kube-scheduler/app/server.go b/cmd/kube-scheduler/app/server.go
+index 7b08b119b4b..5d2a5a5f51a 100644
+--- a/cmd/kube-scheduler/app/server.go
++++ b/cmd/kube-scheduler/app/server.go
+@@ -145,7 +145,11 @@ func runCommand(cmd *cobra.Command, opts *options.Options, registryOptions ...Op
+ ctx, cancel := context.WithCancel(context.Background())
+ defer cancel()
+ go func() {
+- stopCh := server.SetupSignalHandler()
++ c := cmd.Context()
++ if c == nil {
++ c = server.SetupSignalContext()
++ }
++ stopCh := c.Done()
+ <-stopCh
+ cancel()
+ }()
+diff --git a/cmd/kubelet/app/server.go b/cmd/kubelet/app/server.go
+index 2d90030d210..666996e2ecb 100644
+--- a/cmd/kubelet/app/server.go
++++ b/cmd/kubelet/app/server.go
+@@ -129,7 +129,7 @@ const (
+ )
+
+ // NewKubeletCommand creates a *cobra.Command object with default parameters
+-func NewKubeletCommand() *cobra.Command {
++func NewKubeletCommand(ctx ...context.Context) *cobra.Command {
+ cleanFlagSet := pflag.NewFlagSet(componentKubelet, pflag.ContinueOnError)
+ cleanFlagSet.SetNormalizeFunc(cliflag.WordSepNormalizeFunc)
+ kubeletFlags := options.NewKubeletFlags()
+@@ -266,6 +266,12 @@ is checked every 20 seconds (also configurable with a flag).`,
+ if err := checkPermissions(); err != nil {
+ klog.ErrorS(err, "kubelet running with insufficient permissions")
+ }
++ runctx := context.Background()
++ if len(ctx) == 0 {
++ runctx = genericapiserver.SetupSignalContext()
++ } else {
++ runctx = ctx[0]
++ }
+
+ // make the kubelet's config safe for logging
+ config := kubeletServer.KubeletConfiguration.DeepCopy()
+@@ -275,12 +281,9 @@ is checked every 20 seconds (also configurable with a flag).`,
+ // log the kubelet's config for inspection
+ klog.V(5).InfoS("KubeletConfiguration", "configuration", klog.Format(config))
+
+- // set up signal context for kubelet shutdown
+- ctx := genericapiserver.SetupSignalContext()
+-
+ utilfeature.DefaultMutableFeatureGate.AddMetrics()
+ // run the kubelet
+- return Run(ctx, kubeletServer, kubeletDeps, utilfeature.DefaultFeatureGate)
++ return Run(runctx, kubeletServer, kubeletDeps, utilfeature.DefaultFeatureGate)
+ },
+ }
+
+diff --git a/cmd/kubelite/app/daemons/daemon.go b/cmd/kubelite/app/daemons/daemon.go
+new file mode 100644
+index 00000000000..46c1af7fdb9
+--- /dev/null
++++ b/cmd/kubelite/app/daemons/daemon.go
+@@ -0,0 +1,85 @@
++package daemon
++
++import (
++ "context"
++
++ "k8s.io/client-go/kubernetes"
++ "k8s.io/client-go/tools/clientcmd"
++ genericcontrollermanager "k8s.io/controller-manager/app"
++ "k8s.io/klog/v2"
++ apiserver "k8s.io/kubernetes/cmd/kube-apiserver/app"
++ controller "k8s.io/kubernetes/cmd/kube-controller-manager/app"
++ proxy "k8s.io/kubernetes/cmd/kube-proxy/app"
++ scheduler "k8s.io/kubernetes/cmd/kube-scheduler/app"
++ kubelet "k8s.io/kubernetes/cmd/kubelet/app"
++
++ "time"
++)
++
++func StartControllerManager(args []string, ctx context.Context) {
++ command := controller.NewControllerManagerCommand()
++ command.SetArgs(args)
++
++ klog.Info("Starting Controller Manager")
++ if err := command.ExecuteContext(ctx); err != nil {
++ klog.Fatalf("Controller Manager exited %v", err)
++ }
++ klog.Info("Stopping Controller Manager")
++}
++
++func StartScheduler(args []string, ctx context.Context) {
++ command := scheduler.NewSchedulerCommand()
++ command.SetArgs(args)
++
++ klog.Info("Starting Scheduler")
++ if err := command.ExecuteContext(ctx); err != nil {
++ klog.Fatalf("Scheduler exited %v", err)
++ }
++ klog.Info("Stopping Scheduler")
++}
++
++func StartProxy(args []string) {
++ command := proxy.NewProxyCommand()
++ command.SetArgs(args)
++
++ klog.Info("Starting Proxy")
++ if err := command.Execute(); err != nil {
++ klog.Fatalf("Proxy exited %v", err)
++ }
++ klog.Info("Stopping Proxy")
++}
++
++func StartKubelet(args []string, ctx context.Context) {
++ command := kubelet.NewKubeletCommand(ctx)
++ command.SetArgs(args)
++
++ klog.Info("Starting Kubelet")
++ if err := command.Execute(); err != nil {
++ klog.Fatalf("Kubelet exited %v", err)
++ }
++ klog.Info("Stopping Kubelet")
++}
++
++func StartAPIServer(args []string, ctx context.Context) {
++ command := apiserver.NewAPIServerCommand(ctx)
++ command.SetArgs(args)
++ klog.Info("Starting API Server")
++ if err := command.Execute(); err != nil {
++ klog.Fatalf("API Server exited %v", err)
++ }
++ klog.Info("Stopping API Server")
++}
++
++func WaitForAPIServer(kubeconfigpath string, timeout time.Duration) {
++ klog.Info("Waiting for the API server")
++ config, err := clientcmd.BuildConfigFromFlags("", kubeconfigpath)
++ if err != nil {
++ klog.Fatalf("could not find the cluster's kubeconfig file %v", err)
++ }
++ // create the client
++ client, err := kubernetes.NewForConfig(config)
++ if err != nil {
++ klog.Fatalf("could not create client to the cluster %v", err)
++ }
++ genericcontrollermanager.WaitForAPIServer(client, timeout)
++}
+diff --git a/cmd/kubelite/app/options/options.go b/cmd/kubelite/app/options/options.go
+new file mode 100644
+index 00000000000..80f1d8b09fc
+--- /dev/null
++++ b/cmd/kubelite/app/options/options.go
+@@ -0,0 +1,79 @@
++/*
++Copyright 2018 The Kubernetes Authors.
++
++Licensed under the Apache License, Version 2.0 (the "License");
++you may not use this file except in compliance with the License.
++You may obtain a copy of the License at
++
++ http://www.apache.org/licenses/LICENSE-2.0
++
++Unless required by applicable law or agreed to in writing, software
++distributed under the License is distributed on an "AS IS" BASIS,
++WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++See the License for the specific language governing permissions and
++limitations under the License.
++*/
++
++package options
++
++import (
++ "bufio"
++ "k8s.io/klog/v2"
++ "os"
++ "strings"
++)
++
++// Options has all the params needed to run a Kubelite
++type Options struct {
++ SchedulerArgsFile string
++ ControllerManagerArgsFile string
++ ProxyArgsFile string
++ KubeletArgsFile string
++ APIServerArgsFile string
++ KubeconfigFile string
++ StartControlPlane bool
++}
++
++func NewOptions() (*Options){
++ o := Options{
++ "/var/snap/microk8s/current/args/kube-scheduler",
++ "/var/snap/microk8s/current/args/kube-controller-manager",
++ "/var/snap/microk8s/current/args/kube-proxy",
++ "/var/snap/microk8s/current/args/kubelet",
++ "/var/snap/microk8s/current/args/kube-apiserver",
++ "/var/snap/microk8s/current/credentials/client.config",
++ true,
++ }
++ return &o
++}
++
++func ReadArgsFromFile(filename string) []string {
++ var args []string
++ file, err := os.Open(filename)
++ if err != nil {
++ klog.Fatalf("Failed to open arguments file %v", err)
++ }
++ defer file.Close()
++
++ scanner := bufio.NewScanner(file)
++ for scanner.Scan() {
++ line := scanner.Text()
++ line = strings.TrimSpace(line)
++ // ignore lines with # and empty lines
++ if len(line) <= 0 || strings.HasPrefix(line, "#") {
++ continue
++ }
++ // remove " and '
++ for _, r := range "\"'" {
++ line = strings.ReplaceAll(line, string(r), "")
++ }
++ for _, part := range strings.Split(line, " ") {
++
++ args = append(args, os.ExpandEnv(part))
++ }
++ }
++ if err := scanner.Err(); err != nil {
++ klog.Fatalf("Failed to read arguments file %v", err)
++ }
++ return args
++}
+diff --git a/cmd/kubelite/app/server.go b/cmd/kubelite/app/server.go
+new file mode 100644
+index 00000000000..4ff36cd6432
+--- /dev/null
++++ b/cmd/kubelite/app/server.go
+@@ -0,0 +1,80 @@
++/*
++Copyright © 2020 NAME HERE
++
++Licensed under the Apache License, Version 2.0 (the "License");
++you may not use this file except in compliance with the License.
++You may obtain a copy of the License at
++
++ http://www.apache.org/licenses/LICENSE-2.0
++
++Unless required by applicable law or agreed to in writing, software
++distributed under the License is distributed on an "AS IS" BASIS,
++WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++See the License for the specific language governing permissions and
++limitations under the License.
++*/
++package app
++
++import (
++ "fmt"
++ "os"
++ "time"
++
++ "github.com/spf13/cobra"
++ genericapiserver "k8s.io/apiserver/pkg/server"
++ daemon "k8s.io/kubernetes/cmd/kubelite/app/daemons"
++ "k8s.io/kubernetes/cmd/kubelite/app/options"
++)
++
++var opts = options.NewOptions()
++
++// liteCmd represents the base command when called without any subcommands
++var liteCmd = &cobra.Command{
++ Use: "kubelite",
++ Short: "Single server kubernetes",
++ Long: `A single server that spawns all other kubernetes servers as threads`,
++ // Uncomment the following line if your bare application
++ // has an action associated with it:
++ Run: func(cmd *cobra.Command, args []string) {
++ ctx := genericapiserver.SetupSignalContext()
++
++ if opts.StartControlPlane {
++ apiserverArgs := options.ReadArgsFromFile(opts.APIServerArgsFile)
++ go daemon.StartAPIServer(apiserverArgs, ctx)
++ daemon.WaitForAPIServer(opts.KubeconfigFile, 360*time.Second)
++
++ controllerArgs := options.ReadArgsFromFile(opts.ControllerManagerArgsFile)
++ go daemon.StartControllerManager(controllerArgs, ctx)
++
++ schedulerArgs := options.ReadArgsFromFile(opts.SchedulerArgsFile)
++ go daemon.StartScheduler(schedulerArgs, ctx)
++ }
++
++ proxyArgs := options.ReadArgsFromFile(opts.ProxyArgsFile)
++ go daemon.StartProxy(proxyArgs)
++
++ kubeletArgs := options.ReadArgsFromFile(opts.KubeletArgsFile)
++ daemon.StartKubelet(kubeletArgs, ctx)
++ },
++}
++
++// Execute adds all child commands to the root command and sets flags appropriately.
++// This is called by main.main(). It only needs to happen once to the liteCmd.
++func Execute() {
++ if err := liteCmd.Execute(); err != nil {
++ fmt.Println(err)
++ os.Exit(1)
++ }
++}
++
++func init() {
++ cobra.OnInitialize()
++
++ liteCmd.Flags().StringVar(&opts.SchedulerArgsFile, "scheduler-args-file", opts.SchedulerArgsFile, "file with the arguments for the scheduler")
++ liteCmd.Flags().StringVar(&opts.ControllerManagerArgsFile, "controller-manager-args-file", opts.ControllerManagerArgsFile, "file with the arguments for the controller manager")
++ liteCmd.Flags().StringVar(&opts.ProxyArgsFile, "proxy-args-file", opts.ProxyArgsFile, "file with the arguments for kube-proxy")
++ liteCmd.Flags().StringVar(&opts.KubeletArgsFile, "kubelet-args-file", opts.KubeletArgsFile, "file with the arguments for kubelet")
++ liteCmd.Flags().StringVar(&opts.APIServerArgsFile, "apiserver-args-file", opts.APIServerArgsFile, "file with the arguments for the API server")
++ liteCmd.Flags().StringVar(&opts.KubeconfigFile, "kubeconfig-file", opts.KubeconfigFile, "the kubeconfig file to use to healthcheck the API server")
++ liteCmd.Flags().BoolVar(&opts.StartControlPlane, "start-control-plane", opts.StartControlPlane, "start the control plane (API server, scheduler and controller manager)")
++}
+diff --git a/cmd/kubelite/kubelite.go b/cmd/kubelite/kubelite.go
+new file mode 100644
+index 00000000000..30ab604f480
+--- /dev/null
++++ b/cmd/kubelite/kubelite.go
+@@ -0,0 +1,25 @@
++package main
++
++import (
++ "github.com/spf13/pflag"
++ cliflag "k8s.io/component-base/cli/flag"
++
++ "k8s.io/component-base/logs"
++ _ "k8s.io/component-base/metrics/prometheus/clientgo" // load all the prometheus client-go plugin
++ _ "k8s.io/component-base/metrics/prometheus/version" // for version metric registration
++ "k8s.io/kubernetes/cmd/kubelite/app"
++)
++
++func main() {
++ println("Starting kubelite")
++ // TODO: once we switch everything over to Cobra commands, we can go back to calling
++ // utilflag.InitFlags() (by removing its pflag.Parse() call). For now, we have to set the
++ // normalize func and add the go flag set by hand.
++ pflag.CommandLine.SetNormalizeFunc(cliflag.WordSepNormalizeFunc)
++ // utilflag.InitFlags()
++ logs.InitLogs()
++ defer logs.FlushLogs()
++
++ app.Execute()
++ println("Stopping kubelite")
++}
+diff --git a/pkg/volume/csi/csi_plugin.go b/pkg/volume/csi/csi_plugin.go
+index fec8a34b4d3..523eb78f05c 100644
+--- a/pkg/volume/csi/csi_plugin.go
++++ b/pkg/volume/csi/csi_plugin.go
+@@ -253,18 +253,22 @@ func (p *csiPlugin) Init(host volume.VolumeHost) error {
+ }
+
+ // Initializing the label management channels
+- nim = nodeinfomanager.NewNodeInfoManager(host.GetNodeName(), host, migratedPlugins)
++ localNim := nodeinfomanager.NewNodeInfoManager(host.GetNodeName(), host, migratedPlugins)
+
+ // This function prevents Kubelet from posting Ready status until CSINode
+ // is both installed and initialized
+- if err := initializeCSINode(host); err != nil {
++ if err := initializeCSINode(host, localNim); err != nil {
+ return errors.New(log("failed to initialize CSINode: %v", err))
+ }
+
++ if _, ok := host.(volume.KubeletVolumeHost); ok {
++ nim = localNim
++ }
++
+ return nil
+ }
+
+-func initializeCSINode(host volume.VolumeHost) error {
++func initializeCSINode(host volume.VolumeHost, nim nodeinfomanager.Interface) error {
+ kvh, ok := host.(volume.KubeletVolumeHost)
+ if !ok {
+ klog.V(4).Info("Cast from VolumeHost to KubeletVolumeHost failed. Skipping CSINode initialization, not running on kubelet")
+--
+2.34.1
+
diff --git a/build-scripts/components/kubernetes/patches/v1.31.0/0001-Set-log-reapply-handling-to-ignore-unchanged.patch b/build-scripts/components/kubernetes/patches/v1.31.0/0001-Set-log-reapply-handling-to-ignore-unchanged.patch
new file mode 100644
index 0000000000..31fbc29fd9
--- /dev/null
+++ b/build-scripts/components/kubernetes/patches/v1.31.0/0001-Set-log-reapply-handling-to-ignore-unchanged.patch
@@ -0,0 +1,24 @@
+From 55f4864d816c8e7ca0ebb39571dc88dbdf05eff2 Mon Sep 17 00:00:00 2001
+From: Angelos Kolaitis
+Date: Thu, 27 Jul 2023 18:08:00 +0300
+Subject: [PATCH] Set log reapply handling to ignore unchanged
+
+---
+ staging/src/k8s.io/component-base/logs/api/v1/options.go | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/staging/src/k8s.io/component-base/logs/api/v1/options.go b/staging/src/k8s.io/component-base/logs/api/v1/options.go
+index 2db9b1f5382..e0824dcdc4e 100644
+--- a/staging/src/k8s.io/component-base/logs/api/v1/options.go
++++ b/staging/src/k8s.io/component-base/logs/api/v1/options.go
+@@ -64,7 +64,7 @@ func NewLoggingConfiguration() *LoggingConfiguration {
+ // are no goroutines which might call logging functions. The default for ValidateAndApply
+ // and ValidateAndApplyWithOptions is to return an error when called more than once.
+ // Binaries and unit tests can override that behavior.
+-var ReapplyHandling = ReapplyHandlingError
++var ReapplyHandling = ReapplyHandlingIgnoreUnchanged
+
+ type ReapplyHandlingType int
+
+--
+2.34.1
diff --git a/build-scripts/components/kubernetes/patches/v1.32.0/0000-Kubelite-integration.patch b/build-scripts/components/kubernetes/patches/v1.32.0/0000-Kubelite-integration.patch
new file mode 100644
index 0000000000..2cbe4798e9
--- /dev/null
+++ b/build-scripts/components/kubernetes/patches/v1.32.0/0000-Kubelite-integration.patch
@@ -0,0 +1,426 @@
+From 819b718ecfee8c4f6fb503d0dea80a43e86cdb6f Mon Sep 17 00:00:00 2001
+From: Konstantinos Tsakalozos
+Date: Wed, 3 Mar 2021 18:19:37 +0200
+Subject: [PATCH] Kubelite integration
+
+---
+ cmd/kube-apiserver/app/server.go | 9 ++-
+ cmd/kube-scheduler/app/server.go | 6 +-
+ cmd/kubelet/app/server.go | 13 +++--
+ cmd/kubelite/app/daemons/daemon.go | 85 +++++++++++++++++++++++++++++
+ cmd/kubelite/app/options/options.go | 79 +++++++++++++++++++++++++++
+ cmd/kubelite/app/server.go | 80 +++++++++++++++++++++++++++
+ cmd/kubelite/kubelite.go | 25 +++++++++
+ pkg/volume/csi/csi_plugin.go | 10 +++-
+ 8 files changed, 296 insertions(+), 11 deletions(-)
+ create mode 100644 cmd/kubelite/app/daemons/daemon.go
+ create mode 100644 cmd/kubelite/app/options/options.go
+ create mode 100644 cmd/kubelite/app/server.go
+ create mode 100644 cmd/kubelite/kubelite.go
+
+diff --git a/cmd/kube-apiserver/app/server.go b/cmd/kube-apiserver/app/server.go
+index 8aa05a4d8f8..75b982aaff9 100644
+--- a/cmd/kube-apiserver/app/server.go
++++ b/cmd/kube-apiserver/app/server.go
+@@ -63,11 +63,16 @@ func init() {
+ }
+
+ // NewAPIServerCommand creates a *cobra.Command object with default parameters
+-func NewAPIServerCommand() *cobra.Command {
++func NewAPIServerCommand(ctxs ...context.Context) *cobra.Command {
+ _, featureGate := featuregate.DefaultComponentGlobalsRegistry.ComponentGlobalsOrRegister(
+ featuregate.DefaultKubeComponent, utilversion.DefaultBuildEffectiveVersion(), utilfeature.DefaultMutableFeatureGate)
+ s := options.NewServerRunOptions()
+- ctx := genericapiserver.SetupSignalContext()
++ ctx := context.Background()
++ if len(ctxs) == 0 {
++ ctx = genericapiserver.SetupSignalContext()
++ } else {
++ ctx = ctxs[0]
++ }
+
+ cmd := &cobra.Command{
+ Use: "kube-apiserver",
+diff --git a/cmd/kube-scheduler/app/server.go b/cmd/kube-scheduler/app/server.go
+index 1785bbdcc91..64f01ba5c93 100644
+--- a/cmd/kube-scheduler/app/server.go
++++ b/cmd/kube-scheduler/app/server.go
+@@ -144,7 +144,11 @@ func runCommand(cmd *cobra.Command, opts *options.Options, registryOptions ...Op
+ ctx, cancel := context.WithCancel(context.Background())
+ defer cancel()
+ go func() {
+- stopCh := server.SetupSignalHandler()
++ c := cmd.Context()
++ if c == nil {
++ c = server.SetupSignalContext()
++ }
++ stopCh := c.Done()
+ <-stopCh
+ cancel()
+ }()
+diff --git a/cmd/kubelet/app/server.go b/cmd/kubelet/app/server.go
+index cfcf6e7d5cd..9c3853173ff 100644
+--- a/cmd/kubelet/app/server.go
++++ b/cmd/kubelet/app/server.go
+@@ -138,7 +138,7 @@ const (
+ )
+
+ // NewKubeletCommand creates a *cobra.Command object with default parameters
+-func NewKubeletCommand() *cobra.Command {
++func NewKubeletCommand(ctx ...context.Context) *cobra.Command {
+ cleanFlagSet := pflag.NewFlagSet(componentKubelet, pflag.ContinueOnError)
+ cleanFlagSet.SetNormalizeFunc(cliflag.WordSepNormalizeFunc)
+ kubeletFlags := options.NewKubeletFlags()
+@@ -275,6 +275,12 @@ is checked every 20 seconds (also configurable with a flag).`,
+ if err := checkPermissions(); err != nil {
+ klog.ErrorS(err, "kubelet running with insufficient permissions")
+ }
++ runctx := context.Background()
++ if len(ctx) == 0 {
++ runctx = genericapiserver.SetupSignalContext()
++ } else {
++ runctx = ctx[0]
++ }
+
+ // make the kubelet's config safe for logging
+ config := kubeletServer.KubeletConfiguration.DeepCopy()
+@@ -284,12 +290,9 @@ is checked every 20 seconds (also configurable with a flag).`,
+ // log the kubelet's config for inspection
+ klog.V(5).InfoS("KubeletConfiguration", "configuration", klog.Format(config))
+
+- // set up signal context for kubelet shutdown
+- ctx := genericapiserver.SetupSignalContext()
+-
+ utilfeature.DefaultMutableFeatureGate.AddMetrics()
+ // run the kubelet
+- return Run(ctx, kubeletServer, kubeletDeps, utilfeature.DefaultFeatureGate)
++ return Run(runctx, kubeletServer, kubeletDeps, utilfeature.DefaultFeatureGate)
+ },
+ }
+
+diff --git a/cmd/kubelite/app/daemons/daemon.go b/cmd/kubelite/app/daemons/daemon.go
+new file mode 100644
+index 00000000000..46c1af7fdb9
+--- /dev/null
++++ b/cmd/kubelite/app/daemons/daemon.go
+@@ -0,0 +1,85 @@
++package daemon
++
++import (
++ "context"
++
++ "k8s.io/client-go/kubernetes"
++ "k8s.io/client-go/tools/clientcmd"
++ genericcontrollermanager "k8s.io/controller-manager/app"
++ "k8s.io/klog/v2"
++ apiserver "k8s.io/kubernetes/cmd/kube-apiserver/app"
++ controller "k8s.io/kubernetes/cmd/kube-controller-manager/app"
++ proxy "k8s.io/kubernetes/cmd/kube-proxy/app"
++ scheduler "k8s.io/kubernetes/cmd/kube-scheduler/app"
++ kubelet "k8s.io/kubernetes/cmd/kubelet/app"
++
++ "time"
++)
++
++func StartControllerManager(args []string, ctx context.Context) {
++ command := controller.NewControllerManagerCommand()
++ command.SetArgs(args)
++
++ klog.Info("Starting Controller Manager")
++ if err := command.ExecuteContext(ctx); err != nil {
++ klog.Fatalf("Controller Manager exited %v", err)
++ }
++ klog.Info("Stopping Controller Manager")
++}
++
++func StartScheduler(args []string, ctx context.Context) {
++ command := scheduler.NewSchedulerCommand()
++ command.SetArgs(args)
++
++ klog.Info("Starting Scheduler")
++ if err := command.ExecuteContext(ctx); err != nil {
++ klog.Fatalf("Scheduler exited %v", err)
++ }
++ klog.Info("Stopping Scheduler")
++}
++
++func StartProxy(args []string) {
++ command := proxy.NewProxyCommand()
++ command.SetArgs(args)
++
++ klog.Info("Starting Proxy")
++ if err := command.Execute(); err != nil {
++ klog.Fatalf("Proxy exited %v", err)
++ }
++ klog.Info("Stopping Proxy")
++}
++
++func StartKubelet(args []string, ctx context.Context) {
++ command := kubelet.NewKubeletCommand(ctx)
++ command.SetArgs(args)
++
++ klog.Info("Starting Kubelet")
++ if err := command.Execute(); err != nil {
++ klog.Fatalf("Kubelet exited %v", err)
++ }
++ klog.Info("Stopping Kubelet")
++}
++
++func StartAPIServer(args []string, ctx context.Context) {
++ command := apiserver.NewAPIServerCommand(ctx)
++ command.SetArgs(args)
++ klog.Info("Starting API Server")
++ if err := command.Execute(); err != nil {
++ klog.Fatalf("API Server exited %v", err)
++ }
++ klog.Info("Stopping API Server")
++}
++
++func WaitForAPIServer(kubeconfigpath string, timeout time.Duration) {
++ klog.Info("Waiting for the API server")
++ config, err := clientcmd.BuildConfigFromFlags("", kubeconfigpath)
++ if err != nil {
++ klog.Fatalf("could not find the cluster's kubeconfig file %v", err)
++ }
++ // create the client
++ client, err := kubernetes.NewForConfig(config)
++ if err != nil {
++ klog.Fatalf("could not create client to the cluster %v", err)
++ }
++ genericcontrollermanager.WaitForAPIServer(client, timeout)
++}
+diff --git a/cmd/kubelite/app/options/options.go b/cmd/kubelite/app/options/options.go
+new file mode 100644
+index 00000000000..80f1d8b09fc
+--- /dev/null
++++ b/cmd/kubelite/app/options/options.go
+@@ -0,0 +1,79 @@
++/*
++Copyright 2018 The Kubernetes Authors.
++
++Licensed under the Apache License, Version 2.0 (the "License");
++you may not use this file except in compliance with the License.
++You may obtain a copy of the License at
++
++ http://www.apache.org/licenses/LICENSE-2.0
++
++Unless required by applicable law or agreed to in writing, software
++distributed under the License is distributed on an "AS IS" BASIS,
++WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++See the License for the specific language governing permissions and
++limitations under the License.
++*/
++
++package options
++
++import (
++ "bufio"
++ "k8s.io/klog/v2"
++ "os"
++ "strings"
++)
++
++// Options has all the params needed to run a Kubelite
++type Options struct {
++ SchedulerArgsFile string
++ ControllerManagerArgsFile string
++ ProxyArgsFile string
++ KubeletArgsFile string
++ APIServerArgsFile string
++ KubeconfigFile string
++ StartControlPlane bool
++}
++
++func NewOptions() (*Options){
++ o := Options{
++ "/var/snap/microk8s/current/args/kube-scheduler",
++ "/var/snap/microk8s/current/args/kube-controller-manager",
++ "/var/snap/microk8s/current/args/kube-proxy",
++ "/var/snap/microk8s/current/args/kubelet",
++ "/var/snap/microk8s/current/args/kube-apiserver",
++ "/var/snap/microk8s/current/credentials/client.config",
++ true,
++ }
++ return &o
++}
++
++func ReadArgsFromFile(filename string) []string {
++ var args []string
++ file, err := os.Open(filename)
++ if err != nil {
++ klog.Fatalf("Failed to open arguments file %v", err)
++ }
++ defer file.Close()
++
++ scanner := bufio.NewScanner(file)
++ for scanner.Scan() {
++ line := scanner.Text()
++ line = strings.TrimSpace(line)
++ // ignore lines with # and empty lines
++ if len(line) <= 0 || strings.HasPrefix(line, "#") {
++ continue
++ }
++ // remove " and '
++ for _, r := range "\"'" {
++ line = strings.ReplaceAll(line, string(r), "")
++ }
++ for _, part := range strings.Split(line, " ") {
++
++ args = append(args, os.ExpandEnv(part))
++ }
++ }
++ if err := scanner.Err(); err != nil {
++ klog.Fatalf("Failed to read arguments file %v", err)
++ }
++ return args
++}
+diff --git a/cmd/kubelite/app/server.go b/cmd/kubelite/app/server.go
+new file mode 100644
+index 00000000000..4ff36cd6432
+--- /dev/null
++++ b/cmd/kubelite/app/server.go
+@@ -0,0 +1,80 @@
++/*
++Copyright © 2020 NAME HERE
++
++Licensed under the Apache License, Version 2.0 (the "License");
++you may not use this file except in compliance with the License.
++You may obtain a copy of the License at
++
++ http://www.apache.org/licenses/LICENSE-2.0
++
++Unless required by applicable law or agreed to in writing, software
++distributed under the License is distributed on an "AS IS" BASIS,
++WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++See the License for the specific language governing permissions and
++limitations under the License.
++*/
++package app
++
++import (
++ "fmt"
++ "os"
++ "time"
++
++ "github.com/spf13/cobra"
++ genericapiserver "k8s.io/apiserver/pkg/server"
++ daemon "k8s.io/kubernetes/cmd/kubelite/app/daemons"
++ "k8s.io/kubernetes/cmd/kubelite/app/options"
++)
++
++var opts = options.NewOptions()
++
++// liteCmd represents the base command when called without any subcommands
++var liteCmd = &cobra.Command{
++ Use: "kubelite",
++ Short: "Single server kubernetes",
++ Long: `A single server that spawns all other kubernetes servers as threads`,
++ // Uncomment the following line if your bare application
++ // has an action associated with it:
++ Run: func(cmd *cobra.Command, args []string) {
++ ctx := genericapiserver.SetupSignalContext()
++
++ if opts.StartControlPlane {
++ apiserverArgs := options.ReadArgsFromFile(opts.APIServerArgsFile)
++ go daemon.StartAPIServer(apiserverArgs, ctx)
++ daemon.WaitForAPIServer(opts.KubeconfigFile, 360*time.Second)
++
++ controllerArgs := options.ReadArgsFromFile(opts.ControllerManagerArgsFile)
++ go daemon.StartControllerManager(controllerArgs, ctx)
++
++ schedulerArgs := options.ReadArgsFromFile(opts.SchedulerArgsFile)
++ go daemon.StartScheduler(schedulerArgs, ctx)
++ }
++
++ proxyArgs := options.ReadArgsFromFile(opts.ProxyArgsFile)
++ go daemon.StartProxy(proxyArgs)
++
++ kubeletArgs := options.ReadArgsFromFile(opts.KubeletArgsFile)
++ daemon.StartKubelet(kubeletArgs, ctx)
++ },
++}
++
++// Execute adds all child commands to the root command and sets flags appropriately.
++// This is called by main.main(). It only needs to happen once to the liteCmd.
++func Execute() {
++ if err := liteCmd.Execute(); err != nil {
++ fmt.Println(err)
++ os.Exit(1)
++ }
++}
++
++func init() {
++ cobra.OnInitialize()
++
++ liteCmd.Flags().StringVar(&opts.SchedulerArgsFile, "scheduler-args-file", opts.SchedulerArgsFile, "file with the arguments for the scheduler")
++ liteCmd.Flags().StringVar(&opts.ControllerManagerArgsFile, "controller-manager-args-file", opts.ControllerManagerArgsFile, "file with the arguments for the controller manager")
++ liteCmd.Flags().StringVar(&opts.ProxyArgsFile, "proxy-args-file", opts.ProxyArgsFile, "file with the arguments for kube-proxy")
++ liteCmd.Flags().StringVar(&opts.KubeletArgsFile, "kubelet-args-file", opts.KubeletArgsFile, "file with the arguments for kubelet")
++ liteCmd.Flags().StringVar(&opts.APIServerArgsFile, "apiserver-args-file", opts.APIServerArgsFile, "file with the arguments for the API server")
++ liteCmd.Flags().StringVar(&opts.KubeconfigFile, "kubeconfig-file", opts.KubeconfigFile, "the kubeconfig file to use to healthcheck the API server")
++ liteCmd.Flags().BoolVar(&opts.StartControlPlane, "start-control-plane", opts.StartControlPlane, "start the control plane (API server, scheduler and controller manager)")
++}
+diff --git a/cmd/kubelite/kubelite.go b/cmd/kubelite/kubelite.go
+new file mode 100644
+index 00000000000..30ab604f480
+--- /dev/null
++++ b/cmd/kubelite/kubelite.go
+@@ -0,0 +1,25 @@
++package main
++
++import (
++ "github.com/spf13/pflag"
++ cliflag "k8s.io/component-base/cli/flag"
++
++ "k8s.io/component-base/logs"
++ _ "k8s.io/component-base/metrics/prometheus/clientgo" // load all the prometheus client-go plugin
++ _ "k8s.io/component-base/metrics/prometheus/version" // for version metric registration
++ "k8s.io/kubernetes/cmd/kubelite/app"
++)
++
++func main() {
++ println("Starting kubelite")
++ // TODO: once we switch everything over to Cobra commands, we can go back to calling
++ // utilflag.InitFlags() (by removing its pflag.Parse() call). For now, we have to set the
++ // normalize func and add the go flag set by hand.
++ pflag.CommandLine.SetNormalizeFunc(cliflag.WordSepNormalizeFunc)
++ // utilflag.InitFlags()
++ logs.InitLogs()
++ defer logs.FlushLogs()
++
++ app.Execute()
++ println("Stopping kubelite")
++}
+diff --git a/pkg/volume/csi/csi_plugin.go b/pkg/volume/csi/csi_plugin.go
+index 59fa6245b7f..6443dffd695 100644
+--- a/pkg/volume/csi/csi_plugin.go
++++ b/pkg/volume/csi/csi_plugin.go
+@@ -256,18 +256,22 @@ func (p *csiPlugin) Init(host volume.VolumeHost) error {
+ }
+
+ // Initializing the label management channels
+- nim = nodeinfomanager.NewNodeInfoManager(host.GetNodeName(), host, migratedPlugins)
++ localNim := nodeinfomanager.NewNodeInfoManager(host.GetNodeName(), host, migratedPlugins)
+
+ // This function prevents Kubelet from posting Ready status until CSINode
+ // is both installed and initialized
+- if err := initializeCSINode(host); err != nil {
++ if err := initializeCSINode(host, localNim); err != nil {
+ return errors.New(log("failed to initialize CSINode: %v", err))
+ }
+
++ if _, ok := host.(volume.KubeletVolumeHost); ok {
++ nim = localNim
++ }
++
+ return nil
+ }
+
+-func initializeCSINode(host volume.VolumeHost) error {
++func initializeCSINode(host volume.VolumeHost, nim nodeinfomanager.Interface) error {
+ kvh, ok := host.(volume.KubeletVolumeHost)
+ if !ok {
+ klog.V(4).Info("Cast from VolumeHost to KubeletVolumeHost failed. Skipping CSINode initialization, not running on kubelet")
+--
+2.43.0
+
diff --git a/build-scripts/components/kubernetes/patches/v1.32.0/0001-Set-log-reapply-handling-to-ignore-unchanged.patch b/build-scripts/components/kubernetes/patches/v1.32.0/0001-Set-log-reapply-handling-to-ignore-unchanged.patch
new file mode 100644
index 0000000000..31fbc29fd9
--- /dev/null
+++ b/build-scripts/components/kubernetes/patches/v1.32.0/0001-Set-log-reapply-handling-to-ignore-unchanged.patch
@@ -0,0 +1,24 @@
+From 55f4864d816c8e7ca0ebb39571dc88dbdf05eff2 Mon Sep 17 00:00:00 2001
+From: Angelos Kolaitis
+Date: Thu, 27 Jul 2023 18:08:00 +0300
+Subject: [PATCH] Set log reapply handling to ignore unchanged
+
+---
+ staging/src/k8s.io/component-base/logs/api/v1/options.go | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/staging/src/k8s.io/component-base/logs/api/v1/options.go b/staging/src/k8s.io/component-base/logs/api/v1/options.go
+index 2db9b1f5382..e0824dcdc4e 100644
+--- a/staging/src/k8s.io/component-base/logs/api/v1/options.go
++++ b/staging/src/k8s.io/component-base/logs/api/v1/options.go
+@@ -64,7 +64,7 @@ func NewLoggingConfiguration() *LoggingConfiguration {
+ // are no goroutines which might call logging functions. The default for ValidateAndApply
+ // and ValidateAndApplyWithOptions is to return an error when called more than once.
+ // Binaries and unit tests can override that behavior.
+-var ReapplyHandling = ReapplyHandlingError
++var ReapplyHandling = ReapplyHandlingIgnoreUnchanged
+
+ type ReapplyHandlingType int
+
+--
+2.34.1
diff --git a/build-scripts/components/kubernetes/pre-patch.sh b/build-scripts/components/kubernetes/pre-patch.sh
new file mode 100644
index 0000000000..850841e345
--- /dev/null
+++ b/build-scripts/components/kubernetes/pre-patch.sh
@@ -0,0 +1,7 @@
+#!/bin/bash -x
+
+# Ensure clean Kubernetes version
+KUBE_ROOT="${PWD}"
+source "${KUBE_ROOT}/hack/lib/version.sh"
+kube::version::get_version_vars
+kube::version::save_version_vars "${PWD}/.version.sh"
diff --git a/build-scripts/components/kubernetes/repository b/build-scripts/components/kubernetes/repository
new file mode 100644
index 0000000000..e2536ceb44
--- /dev/null
+++ b/build-scripts/components/kubernetes/repository
@@ -0,0 +1 @@
+https://github.com/kubernetes/kubernetes
diff --git a/build-scripts/components/kubernetes/version.sh b/build-scripts/components/kubernetes/version.sh
new file mode 100755
index 0000000000..dc1c4f2f05
--- /dev/null
+++ b/build-scripts/components/kubernetes/version.sh
@@ -0,0 +1,15 @@
+#!/bin/bash
+
+KUBE_TRACK="${KUBE_TRACK:-}" # example: "1.24"
+KUBE_VERSION="${KUBE_VERSION:-}" # example: "v1.24.2"
+
+if [ -z "${KUBE_VERSION}" ]; then
+ if [ -z "${KUBE_TRACK}" ]; then
+ KUBE_VERSION="$(curl -L --silent "https://dl.k8s.io/release/stable.txt")"
+ else
+ KUBE_TRACK="${KUBE_TRACK#v}"
+ KUBE_VERSION="$(curl -L --silent "https://dl.k8s.io/release/stable-${KUBE_TRACK}.txt")"
+ fi
+fi
+
+echo "${KUBE_VERSION}"
diff --git a/build-scripts/components/microk8s-completion/build.sh b/build-scripts/components/microk8s-completion/build.sh
new file mode 100755
index 0000000000..d020f3961e
--- /dev/null
+++ b/build-scripts/components/microk8s-completion/build.sh
@@ -0,0 +1,8 @@
+#!/bin/bash
+
+INSTALL="${1}"
+
+go mod tidy -compat=1.17
+go run -tags microk8s_hack ./cmd/helm 2> /dev/null
+
+cp microk8s.bash "${INSTALL}/microk8s.bash"
diff --git a/build-scripts/components/microk8s-completion/patches/default/0001-microk8s-autocompleter-script.patch b/build-scripts/components/microk8s-completion/patches/default/0001-microk8s-autocompleter-script.patch
new file mode 100644
index 0000000000..1a5a5e55ba
--- /dev/null
+++ b/build-scripts/components/microk8s-completion/patches/default/0001-microk8s-autocompleter-script.patch
@@ -0,0 +1,84 @@
+From f154f915fe39d6b929dbe1e513011fd271fcd12c Mon Sep 17 00:00:00 2001
+From: Angelos Kolaitis
+Date: Fri, 15 Jul 2022 15:07:18 +0300
+Subject: [PATCH] microk8s autocompleter script
+
+---
+ cmd/helm/hack_microk8s_autocompleter.go | 51 +++++++++++++++++++++++++
+ cmd/helm/helm.go | 2 +-
+ 2 files changed, 52 insertions(+), 1 deletion(-)
+ create mode 100644 cmd/helm/hack_microk8s_autocompleter.go
+
+diff --git a/cmd/helm/hack_microk8s_autocompleter.go b/cmd/helm/hack_microk8s_autocompleter.go
+new file mode 100644
+index 00000000..e819f44b
+--- /dev/null
++++ b/cmd/helm/hack_microk8s_autocompleter.go
+@@ -0,0 +1,51 @@
++// go:build microk8s_hack
++
++package main
++
++import (
++ "bytes"
++ "log"
++ "os"
++ "strings"
++
++ "github.com/spf13/cobra"
++ "helm.sh/helm/v3/pkg/action"
++ "k8s.io/kubectl/pkg/cmd"
++)
++
++func main() {
++ command := &cobra.Command{Use: "microk8s"}
++ command.AddCommand(cmd.NewDefaultKubectlCommand())
++ helmCmd, _ := newRootCmd(&action.Configuration{}, nil, nil)
++ helmCmd.Use = "helm"
++ helm3Cmd, _ := newRootCmd(&action.Configuration{}, nil, nil)
++ helm3Cmd.Use = "helm3"
++ command.AddCommand(&cobra.Command{Use: "addons", Run: func(cmd *cobra.Command, args []string) {}})
++ command.AddCommand(&cobra.Command{Use: "add-node", Run: func(cmd *cobra.Command, args []string) {}})
++ command.AddCommand(&cobra.Command{Use: "config", Run: func(cmd *cobra.Command, args []string) {}})
++ command.AddCommand(&cobra.Command{Use: "enable", Run: func(cmd *cobra.Command, args []string) {}})
++ command.AddCommand(&cobra.Command{Use: "disable", Run: func(cmd *cobra.Command, args []string) {}})
++ command.AddCommand(helmCmd)
++ command.AddCommand(helm3Cmd)
++ command.AddCommand(&cobra.Command{Use: "helm3", Run: func(cmd *cobra.Command, args []string) {}})
++ command.AddCommand(&cobra.Command{Use: "images", Run: func(cmd *cobra.Command, args []string) {}})
++ command.AddCommand(&cobra.Command{Use: "inspect", Run: func(cmd *cobra.Command, args []string) {}})
++ command.AddCommand(&cobra.Command{Use: "join", Run: func(cmd *cobra.Command, args []string) {}})
++ command.AddCommand(&cobra.Command{Use: "refresh-certs", Run: func(cmd *cobra.Command, args []string) {}})
++ command.AddCommand(&cobra.Command{Use: "remove-node", Run: func(cmd *cobra.Command, args []string) {}})
++ command.AddCommand(&cobra.Command{Use: "reset", Run: func(cmd *cobra.Command, args []string) {}})
++ command.AddCommand(&cobra.Command{Use: "start", Run: func(cmd *cobra.Command, args []string) {}})
++ command.AddCommand(&cobra.Command{Use: "status", Run: func(cmd *cobra.Command, args []string) {}})
++ command.AddCommand(&cobra.Command{Use: "stop", Run: func(cmd *cobra.Command, args []string) {}})
++ command.AddCommand(&cobra.Command{Use: "version", Run: func(cmd *cobra.Command, args []string) {}})
++
++ var b bytes.Buffer
++ if err := command.GenBashCompletion(&b); err != nil {
++ log.Fatalf("failed to generate completion script: %v", err)
++ }
++ completionScript := b.String()
++ completionScript = strings.ReplaceAll(completionScript, `args=("${words[@]:1}")`, `args=("${words[@]:2}")`)
++ completionScript = strings.ReplaceAll(completionScript, `requestComp="${words[0]} __completeNoDesc ${args[*]}"`, `requestComp="${words[0]} ${words[1]} __completeNoDesc ${args[*]}"`)
++
++ os.WriteFile("microk8s.bash", []byte(completionScript), 0644)
++}
+diff --git a/cmd/helm/helm.go b/cmd/helm/helm.go
+index 15b0d5c7..6499d4ca 100644
+--- a/cmd/helm/helm.go
++++ b/cmd/helm/helm.go
+@@ -55,7 +55,7 @@ func warning(format string, v ...interface{}) {
+ fmt.Fprintf(os.Stderr, format, v...)
+ }
+
+-func main() {
++func Main() {
+ // Setting the name of the app for managedFields in the Kubernetes client.
+ // It is set here to the full name of "helm" so that renaming of helm to
+ // another name (e.g., helm2 or helm3) does not change the name of the
+--
+2.25.1
+
diff --git a/build-scripts/components/microk8s-completion/repository b/build-scripts/components/microk8s-completion/repository
new file mode 100644
index 0000000000..e7c0fbd488
--- /dev/null
+++ b/build-scripts/components/microk8s-completion/repository
@@ -0,0 +1 @@
+https://github.com/helm/helm
diff --git a/build-scripts/components/microk8s-completion/version.sh b/build-scripts/components/microk8s-completion/version.sh
new file mode 100755
index 0000000000..4382fc4905
--- /dev/null
+++ b/build-scripts/components/microk8s-completion/version.sh
@@ -0,0 +1,3 @@
+#!/bin/bash
+
+echo "v3.9.1"
diff --git a/build-scripts/components/python/requirements.txt b/build-scripts/components/python/requirements.txt
new file mode 100644
index 0000000000..30e93e72fd
--- /dev/null
+++ b/build-scripts/components/python/requirements.txt
@@ -0,0 +1,3 @@
+PyYAML==6.0.1
+netifaces==0.10.9
+jsonschema==4.0.0
diff --git a/build-scripts/components/runc/build.sh b/build-scripts/components/runc/build.sh
new file mode 100755
index 0000000000..ba2a6a7ff3
--- /dev/null
+++ b/build-scripts/components/runc/build.sh
@@ -0,0 +1,12 @@
+#!/bin/bash
+
+VERSION="${2}"
+
+export INSTALL="${1}/bin"
+mkdir -p "${INSTALL}"
+
+# Ensure `runc --version` prints the correct release commit
+export COMMIT="$(git describe --always --long "${VERSION}")"
+
+make BUILDTAGS="seccomp apparmor" EXTRA_LDFLAGS="-s -w" static
+cp runc "${INSTALL}/runc"
diff --git a/build-scripts/components/runc/patches/default/0001-Disable-static-PIE-on-arm64.patch b/build-scripts/components/runc/patches/default/0001-Disable-static-PIE-on-arm64.patch
new file mode 100644
index 0000000000..146c5d5fa3
--- /dev/null
+++ b/build-scripts/components/runc/patches/default/0001-Disable-static-PIE-on-arm64.patch
@@ -0,0 +1,25 @@
+From bcf130f097781d162c0461105a12f4c9f412d3e8 Mon Sep 17 00:00:00 2001
+From: Angelos Kolaitis
+Date: Tue, 20 Feb 2024 12:32:27 +0200
+Subject: [PATCH] Disable static PIE on arm64
+
+Ubuntu does not currently have the rcrt1.o file on arm64
+---
+ Makefile | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/Makefile b/Makefile
+index e3af9bc1..b2b07720 100644
+--- a/Makefile
++++ b/Makefile
+@@ -33,7 +33,7 @@ LDFLAGS_STATIC := -extldflags -static
+ # Enable static PIE executables on supported platforms.
+ # This (among the other things) requires libc support (rcrt1.o), which seems
+ # to be available only for arm64 and amd64 (Debian Bullseye).
+-ifneq (,$(filter $(GOARCH),arm64 amd64))
++ifneq (,$(filter $(GOARCH),amd64))
+ ifeq (,$(findstring -race,$(EXTRA_FLAGS)))
+ GO_BUILDMODE_STATIC := -buildmode=pie
+ LDFLAGS_STATIC := -linkmode external -extldflags --static-pie
+--
+2.34.1
diff --git a/build-scripts/components/runc/repository b/build-scripts/components/runc/repository
new file mode 100644
index 0000000000..e8bdb4d77e
--- /dev/null
+++ b/build-scripts/components/runc/repository
@@ -0,0 +1 @@
+https://github.com/opencontainers/runc
diff --git a/build-scripts/components/runc/strict-patches/v1.1.12/0001-apparmor-change-profile-immediately-not-on-exec.patch b/build-scripts/components/runc/strict-patches/v1.1.12/0001-apparmor-change-profile-immediately-not-on-exec.patch
new file mode 100644
index 0000000000..3d608e0e88
--- /dev/null
+++ b/build-scripts/components/runc/strict-patches/v1.1.12/0001-apparmor-change-profile-immediately-not-on-exec.patch
@@ -0,0 +1,35 @@
+From a367e391600dfab0d9eb3deaec4db300a2fb1fa1 Mon Sep 17 00:00:00 2001
+From: Alberto Mardegan
+Date: Wed, 16 Jun 2021 15:04:16 +0300
+Subject: [PATCH 1/3] apparmor: change profile immediately, not on exec
+
+---
+ libcontainer/apparmor/apparmor_linux.go | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+diff --git a/libcontainer/apparmor/apparmor_linux.go b/libcontainer/apparmor/apparmor_linux.go
+index 8b1483c..292cfa6 100644
+--- a/libcontainer/apparmor/apparmor_linux.go
++++ b/libcontainer/apparmor/apparmor_linux.go
+@@ -48,9 +48,9 @@ func setProcAttr(attr, value string) error {
+ return err
+ }
+
+-// changeOnExec reimplements aa_change_onexec from libapparmor in Go
+-func changeOnExec(name string) error {
+- if err := setProcAttr("exec", "exec "+name); err != nil {
++// changeProfile reimplements aa_change_profile from libapparmor in Go
++func changeProfile(name string) error {
++ if err := setProcAttr("current", "changeprofile "+name); err != nil {
+ return fmt.Errorf("apparmor failed to apply profile: %w", err)
+ }
+ return nil
+@@ -64,5 +64,5 @@ func applyProfile(name string) error {
+ return nil
+ }
+
+- return changeOnExec(name)
++ return changeProfile(name)
+ }
+--
+2.34.1
diff --git a/build-scripts/components/runc/strict-patches/v1.1.12/0002-setns_init_linux-set-the-NNP-flag-after-changing-the.patch b/build-scripts/components/runc/strict-patches/v1.1.12/0002-setns_init_linux-set-the-NNP-flag-after-changing-the.patch
new file mode 100644
index 0000000000..2b9ac883bd
--- /dev/null
+++ b/build-scripts/components/runc/strict-patches/v1.1.12/0002-setns_init_linux-set-the-NNP-flag-after-changing-the.patch
@@ -0,0 +1,47 @@
+From 5351ef6f5b592472e077512714b2516cdbae1b51 Mon Sep 17 00:00:00 2001
+From: Angelos Kolaitis
+Date: Thu, 1 Feb 2024 11:23:08 +0200
+Subject: [PATCH 2/3] setns_init_linux: set the NNP flag after changing the
+ apparmor profile
+
+With the current version of the AppArmor kernel module, it's not
+possible to switch the AppArmor profile if the NoNewPrivileges flag is
+set. So, we invert the order of the two operations.
+
+Adjusts the previous patch for runc version v1.1.12
+
+Co-Authored-By: Alberto Mardegan
+---
+ libcontainer/setns_init_linux.go | 10 +++++-----
+ 1 file changed, 5 insertions(+), 5 deletions(-)
+
+diff --git a/libcontainer/setns_init_linux.go b/libcontainer/setns_init_linux.go
+index d1bb122..00407ce 100644
+--- a/libcontainer/setns_init_linux.go
++++ b/libcontainer/setns_init_linux.go
+@@ -56,11 +56,6 @@ func (l *linuxSetnsInit) Init() error {
+ return err
+ }
+ }
+- if l.config.NoNewPrivileges {
+- if err := unix.Prctl(unix.PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); err != nil {
+- return err
+- }
+- }
+ if err := selinux.SetExecLabel(l.config.ProcessLabel); err != nil {
+ return err
+ }
+@@ -84,6 +79,11 @@ func (l *linuxSetnsInit) Init() error {
+ if err := apparmor.ApplyProfile(l.config.AppArmorProfile); err != nil {
+ return err
+ }
++ if l.config.NoNewPrivileges {
++ if err := unix.Prctl(unix.PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); err != nil {
++ return err
++ }
++ }
+
+ // Check for the arg before waiting to make sure it exists and it is
+ // returned as a create time error.
+--
+2.34.1
\ No newline at end of file
diff --git a/build-scripts/components/runc/strict-patches/v1.1.12/0003-standard_init_linux-change-AppArmor-profile-as-late-.patch b/build-scripts/components/runc/strict-patches/v1.1.12/0003-standard_init_linux-change-AppArmor-profile-as-late-.patch
new file mode 100644
index 0000000000..9415a05f88
--- /dev/null
+++ b/build-scripts/components/runc/strict-patches/v1.1.12/0003-standard_init_linux-change-AppArmor-profile-as-late-.patch
@@ -0,0 +1,54 @@
+From 103a94a51ea334d25bf573f2f20cd4d9a099d827 Mon Sep 17 00:00:00 2001
+From: Alberto Mardegan
+Date: Thu, 17 Jun 2021 14:31:35 +0300
+Subject: [PATCH 3/3] standard_init_linux: change AppArmor profile as late as
+ possible
+
+---
+ libcontainer/standard_init_linux.go | 18 +++++++++---------
+ 1 file changed, 9 insertions(+), 9 deletions(-)
+
+diff --git a/libcontainer/standard_init_linux.go b/libcontainer/standard_init_linux.go
+index d1d9435..7097571 100644
+--- a/libcontainer/standard_init_linux.go
++++ b/libcontainer/standard_init_linux.go
+@@ -127,10 +127,6 @@ func (l *linuxStandardInit) Init() error {
+ return &os.SyscallError{Syscall: "sethostname", Err: err}
+ }
+ }
+- if err := apparmor.ApplyProfile(l.config.AppArmorProfile); err != nil {
+- return fmt.Errorf("unable to apply apparmor profile: %w", err)
+- }
+-
+ for key, value := range l.config.Config.Sysctl {
+ if err := writeSystemProperty(key, value); err != nil {
+ return err
+@@ -150,17 +146,21 @@ func (l *linuxStandardInit) Init() error {
+ if err != nil {
+ return fmt.Errorf("can't get pdeath signal: %w", err)
+ }
+- if l.config.NoNewPrivileges {
+- if err := unix.Prctl(unix.PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); err != nil {
+- return &os.SyscallError{Syscall: "prctl(SET_NO_NEW_PRIVS)", Err: err}
+- }
+- }
+ // Tell our parent that we're ready to Execv. This must be done before the
+ // Seccomp rules have been applied, because we need to be able to read and
+ // write to a socket.
+ if err := syncParentReady(l.pipe); err != nil {
+ return fmt.Errorf("sync ready: %w", err)
+ }
++ if err := apparmor.ApplyProfile(l.config.AppArmorProfile); err != nil {
++ return fmt.Errorf("apply apparmor profile: %w", err)
++ }
++ if l.config.NoNewPrivileges {
++ if err := unix.Prctl(unix.PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); err != nil {
++ return fmt.Errorf("set nonewprivileges: %w", err)
++ }
++ }
++
+ if err := selinux.SetExecLabel(l.config.ProcessLabel); err != nil {
+ return fmt.Errorf("can't set process label: %w", err)
+ }
+--
+2.34.1
\ No newline at end of file
diff --git a/build-scripts/components/runc/strict-patches/v1.1.15/0001-apparmor-change-profile-immediately-not-on-exec.patch b/build-scripts/components/runc/strict-patches/v1.1.15/0001-apparmor-change-profile-immediately-not-on-exec.patch
new file mode 100644
index 0000000000..3d608e0e88
--- /dev/null
+++ b/build-scripts/components/runc/strict-patches/v1.1.15/0001-apparmor-change-profile-immediately-not-on-exec.patch
@@ -0,0 +1,35 @@
+From a367e391600dfab0d9eb3deaec4db300a2fb1fa1 Mon Sep 17 00:00:00 2001
+From: Alberto Mardegan
+Date: Wed, 16 Jun 2021 15:04:16 +0300
+Subject: [PATCH 1/3] apparmor: change profile immediately, not on exec
+
+---
+ libcontainer/apparmor/apparmor_linux.go | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+diff --git a/libcontainer/apparmor/apparmor_linux.go b/libcontainer/apparmor/apparmor_linux.go
+index 8b1483c..292cfa6 100644
+--- a/libcontainer/apparmor/apparmor_linux.go
++++ b/libcontainer/apparmor/apparmor_linux.go
+@@ -48,9 +48,9 @@ func setProcAttr(attr, value string) error {
+ return err
+ }
+
+-// changeOnExec reimplements aa_change_onexec from libapparmor in Go
+-func changeOnExec(name string) error {
+- if err := setProcAttr("exec", "exec "+name); err != nil {
++// changeProfile reimplements aa_change_profile from libapparmor in Go
++func changeProfile(name string) error {
++ if err := setProcAttr("current", "changeprofile "+name); err != nil {
+ return fmt.Errorf("apparmor failed to apply profile: %w", err)
+ }
+ return nil
+@@ -64,5 +64,5 @@ func applyProfile(name string) error {
+ return nil
+ }
+
+- return changeOnExec(name)
++ return changeProfile(name)
+ }
+--
+2.34.1
diff --git a/build-scripts/components/runc/strict-patches/v1.1.15/0002-setns_init_linux-set-the-NNP-flag-after-changing-the.patch b/build-scripts/components/runc/strict-patches/v1.1.15/0002-setns_init_linux-set-the-NNP-flag-after-changing-the.patch
new file mode 100644
index 0000000000..4c4323c431
--- /dev/null
+++ b/build-scripts/components/runc/strict-patches/v1.1.15/0002-setns_init_linux-set-the-NNP-flag-after-changing-the.patch
@@ -0,0 +1,48 @@
+From b145a4ac9e9cd09e82d35e0998c6ddee80854275 Mon Sep 17 00:00:00 2001
+From: Angelos Kolaitis
+Date: Thu, 1 Feb 2024 11:23:08 +0200
+Subject: [PATCH] setns_init_linux: set the NNP flag after changing the
+ apparmor profile
+
+With the current version of the AppArmor kernel module, it's not
+possible to switch the AppArmor profile if the NoNewPrivileges flag is
+set. So, we invert the order of the two operations.
+
+Adjusts the previous patch for runc version v1.1.12
+
+Co-Authored-By: Alberto Mardegan
+---
+ libcontainer/setns_init_linux.go | 10 +++++-----
+ 1 file changed, 5 insertions(+), 5 deletions(-)
+
+diff --git a/libcontainer/setns_init_linux.go b/libcontainer/setns_init_linux.go
+index bb358901..6c1b16bd 100644
+--- a/libcontainer/setns_init_linux.go
++++ b/libcontainer/setns_init_linux.go
+@@ -57,11 +57,6 @@ func (l *linuxSetnsInit) Init() error {
+ return err
+ }
+ }
+- if l.config.NoNewPrivileges {
+- if err := unix.Prctl(unix.PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); err != nil {
+- return err
+- }
+- }
+
+ // Tell our parent that we're ready to exec. This must be done before the
+ // Seccomp rules have been applied, because we need to be able to read and
+@@ -93,6 +88,11 @@ func (l *linuxSetnsInit) Init() error {
+ if err := apparmor.ApplyProfile(l.config.AppArmorProfile); err != nil {
+ return err
+ }
++ if l.config.NoNewPrivileges {
++ if err := unix.Prctl(unix.PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); err != nil {
++ return err
++ }
++ }
+
+ // Check for the arg before waiting to make sure it exists and it is
+ // returned as a create time error.
+--
+2.43.0
+
diff --git a/build-scripts/components/runc/strict-patches/v1.1.15/0003-standard_init_linux-change-AppArmor-profile-as-late-.patch b/build-scripts/components/runc/strict-patches/v1.1.15/0003-standard_init_linux-change-AppArmor-profile-as-late-.patch
new file mode 100644
index 0000000000..6b37ad9df8
--- /dev/null
+++ b/build-scripts/components/runc/strict-patches/v1.1.15/0003-standard_init_linux-change-AppArmor-profile-as-late-.patch
@@ -0,0 +1,56 @@
+From f9e0ca2f29c6c77ea9bc9c52929dac3915545dd9 Mon Sep 17 00:00:00 2001
+From: Alberto Mardegan
+Date: Thu, 17 Jun 2021 14:31:35 +0300
+Subject: [PATCH] standard_init_linux: change AppArmor profile as late as
+ possible
+
+---
+ libcontainer/standard_init_linux.go | 18 +++++++++---------
+ 1 file changed, 9 insertions(+), 9 deletions(-)
+
+diff --git a/libcontainer/standard_init_linux.go b/libcontainer/standard_init_linux.go
+index d9a6a224..e4d603e4 100644
+--- a/libcontainer/standard_init_linux.go
++++ b/libcontainer/standard_init_linux.go
+@@ -127,10 +127,6 @@ func (l *linuxStandardInit) Init() error {
+ return &os.SyscallError{Syscall: "sethostname", Err: err}
+ }
+ }
+- if err := apparmor.ApplyProfile(l.config.AppArmorProfile); err != nil {
+- return fmt.Errorf("unable to apply apparmor profile: %w", err)
+- }
+-
+ for key, value := range l.config.Config.Sysctl {
+ if err := writeSystemProperty(key, value); err != nil {
+ return err
+@@ -150,11 +146,6 @@ func (l *linuxStandardInit) Init() error {
+ if err != nil {
+ return fmt.Errorf("can't get pdeath signal: %w", err)
+ }
+- if l.config.NoNewPrivileges {
+- if err := unix.Prctl(unix.PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); err != nil {
+- return &os.SyscallError{Syscall: "prctl(SET_NO_NEW_PRIVS)", Err: err}
+- }
+- }
+
+ // Tell our parent that we're ready to exec. This must be done before the
+ // Seccomp rules have been applied, because we need to be able to read and
+@@ -162,6 +153,15 @@ func (l *linuxStandardInit) Init() error {
+ if err := syncParentReady(l.pipe); err != nil {
+ return fmt.Errorf("sync ready: %w", err)
+ }
++ if err := apparmor.ApplyProfile(l.config.AppArmorProfile); err != nil {
++ return fmt.Errorf("apply apparmor profile: %w", err)
++ }
++ if l.config.NoNewPrivileges {
++ if err := unix.Prctl(unix.PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); err != nil {
++ return fmt.Errorf("set nonewprivileges: %w", err)
++ }
++ }
++
+ if err := selinux.SetExecLabel(l.config.ProcessLabel); err != nil {
+ return fmt.Errorf("can't set process label: %w", err)
+ }
+--
+2.43.0
+
diff --git a/build-scripts/components/runc/version.sh b/build-scripts/components/runc/version.sh
new file mode 100755
index 0000000000..d2bc2f9256
--- /dev/null
+++ b/build-scripts/components/runc/version.sh
@@ -0,0 +1,3 @@
+#!/bin/bash
+
+echo "v1.1.15"
diff --git a/build-scripts/fetch-k8s-binaries.sh b/build-scripts/fetch-k8s-binaries.sh
deleted file mode 100755
index c7b46252fe..0000000000
--- a/build-scripts/fetch-k8s-binaries.sh
+++ /dev/null
@@ -1,21 +0,0 @@
-#!/bin/bash
-set -eu
-
-apps="kubectl kube-apiserver kube-controller-manager kube-scheduler kubelet kube-proxy"
-mkdir -p $KUBE_SNAP_BINS
-echo $KUBE_VERSION > $KUBE_SNAP_BINS/version
-(cd $KUBE_SNAP_BINS
- for app in $apps; do
- mkdir -p $KUBE_ARCH
- (cd $KUBE_ARCH
- echo "Fetching $app $KUBE_VERSION $KUBE_ARCH"
- curl -LO \
- https://dl.k8s.io/${KUBE_VERSION}/bin/linux/$KUBE_ARCH/$app
- chmod +x $app
- if ! file ${app} 2>&1 | grep -q 'executable'; then
- echo "${app} is not an executable"
- exit 1
- fi
- )
- done
-)
diff --git a/build-scripts/generate-bom.py b/build-scripts/generate-bom.py
new file mode 100755
index 0000000000..f358439f20
--- /dev/null
+++ b/build-scripts/generate-bom.py
@@ -0,0 +1,99 @@
+#!/usr/bin/env python3
+
+import json
+import os
+from pathlib import Path
+import subprocess
+import sys
+import yaml
+
+DIR = Path(__file__).absolute().parent
+
+SNAPCRAFT_PART_BUILD = Path(os.getenv("SNAPCRAFT_PART_BUILD", ""))
+SNAPCRAFT_PART_INSTALL = Path(os.getenv("SNAPCRAFT_PART_INSTALL", ""))
+
+BUILD_DIRECTORY = SNAPCRAFT_PART_BUILD.exists() and SNAPCRAFT_PART_BUILD or DIR / ".build"
+INSTALL_DIRECTORY = SNAPCRAFT_PART_INSTALL.exists() and SNAPCRAFT_PART_INSTALL or DIR / ".install"
+
+# Location of Python binary
+PYTHON = INSTALL_DIRECTORY / ".." / ".." / "python-runtime" / "install" / "usr" / "bin" / "python3"
+
+# Location of MicroK8s addons
+MICROK8S_ADDONS = INSTALL_DIRECTORY / ".." / ".." / "microk8s-addons" / "install" / "addons"
+
+# List of tools used to build or bundled in the snap
+TOOLS = {
+ "go": ["go", "version"],
+ "gcc": ["gcc", "--version"],
+ "python": [PYTHON, "-B", "-VV"],
+ "python-requirements": [PYTHON, "-B", "-m", "pip", "freeze"],
+}
+
+# Retrieve list of components we care about from the snapcraft.yaml file
+with open(DIR / ".." / "snap" / "snapcraft.yaml") as fin:
+ COMPONENTS = yaml.safe_load(fin)["parts"]["bom"]["after"]
+
+
+def _listdir(dir: Path):
+ try:
+ return sorted(os.listdir(dir))
+ except OSError:
+ return []
+
+
+def _parse_output(*args, **kwargs):
+ return subprocess.check_output(*args, **kwargs).decode().strip()
+
+
+def _read_file(path: Path) -> str:
+ return path.read_text().strip()
+
+
+if __name__ == "__main__":
+ BOM = {
+ "microk8s": {
+ "version": _parse_output(["git", "rev-parse", "--abbrev-ref", "HEAD"]),
+ "revision": _parse_output(["git", "rev-parse", "HEAD"]),
+ },
+ "tools": {},
+ "components": {},
+ "addons": {},
+ }
+
+ for tool_name, version_cmd in TOOLS.items():
+ BOM["tools"][tool_name] = _parse_output(version_cmd).split("\n")
+
+ for component in COMPONENTS:
+ component_dir = DIR / "components" / component
+
+ try:
+ version = _parse_output([component_dir / "version.sh"])
+ patches = _parse_output([PYTHON, DIR / "print-patches-for.py", component, version])
+ clean_patches = []
+ if patches:
+ clean_patches = [p[p.find("build-scripts/") :] for p in patches.split("\n")]
+
+ BOM["components"][component] = {
+ "repository": _read_file(component_dir / "repository"),
+ "version": version,
+ "revision": _parse_output(
+ ["git", "rev-parse", "HEAD"],
+ cwd=BUILD_DIRECTORY / ".." / ".." / component / "build" / component,
+ ),
+ "patches": clean_patches,
+ }
+ except OSError as e:
+ print(f"Could not get info for {component}: {e}", file=sys.stderr)
+
+ for repo in _listdir(MICROK8S_ADDONS):
+ repo_dir = MICROK8S_ADDONS / repo
+ if not repo_dir.is_dir():
+ continue
+
+ BOM["addons"][repo] = {
+ "repository": _parse_output(["git", "remote", "get-url", "origin"], cwd=repo_dir),
+ "version": _parse_output(["git", "rev-parse", "--abbrev-ref", "HEAD"], cwd=repo_dir),
+ "revision": _parse_output(["git", "rev-parse", "HEAD"], cwd=repo_dir),
+ }
+
+ print(json.dumps(BOM, indent=2))
diff --git a/build-scripts/images.txt b/build-scripts/images.txt
new file mode 100644
index 0000000000..0277c53f18
--- /dev/null
+++ b/build-scripts/images.txt
@@ -0,0 +1,9 @@
+docker.io/calico/cni:v3.28.1
+docker.io/calico/kube-controllers:v3.28.1
+docker.io/calico/node:v3.28.1
+docker.io/cdkbot/hostpath-provisioner:1.5.0
+docker.io/coredns/coredns:1.10.1
+docker.io/library/busybox:1.28.4
+registry.k8s.io/ingress-nginx/controller:v1.11.2
+registry.k8s.io/metrics-server/metrics-server:v0.6.3
+registry.k8s.io/pause:3.10
diff --git a/build-scripts/patches/0000-Dqlite-integration.patch b/build-scripts/patches/0000-Dqlite-integration.patch
deleted file mode 100644
index 05d9d491d0..0000000000
--- a/build-scripts/patches/0000-Dqlite-integration.patch
+++ /dev/null
@@ -1,306046 +0,0 @@
-From 27055368ab7dafd005a9f4e3ff72571f3c6810bd Mon Sep 17 00:00:00 2001
-From: Konstantinos Tsakalozos
-Date: Wed, 3 Mar 2021 17:54:14 +0200
-Subject: [PATCH] Dqlite integration
-
----
- cmd/kube-apiserver/app/options/options.go | 2 +-
- cmd/kube-apiserver/app/server.go | 28 +-
- .../apiserver/pkg/server/options/etcd.go | 8 +-
- .../pkg/storage/storagebackend/config.go | 9 +-
- .../storage/storagebackend/factory/factory.go | 12 +
- vendor/github.com/Rican7/retry/.travis.yml | 39 +
- vendor/github.com/Rican7/retry/LICENSE | 19 +
- vendor/github.com/Rican7/retry/Makefile | 83 +
- vendor/github.com/Rican7/retry/README.md | 101 +
- .../Rican7/retry/backoff/backoff.go | 67 +
- .../Rican7/retry/backoff/backoff_test.go | 178 +
- .../github.com/Rican7/retry/example_test.go | 86 +
- .../github.com/Rican7/retry/jitter/jitter.go | 89 +
- .../Rican7/retry/jitter/jitter_test.go | 101 +
- vendor/github.com/Rican7/retry/retry.go | 36 +
- vendor/github.com/Rican7/retry/retry_test.go | 122 +
- .../Rican7/retry/strategy/strategy.go | 85 +
- .../Rican7/retry/strategy/strategy_test.go | 184 +
- .../canonical/go-dqlite/.dir-locals.el | 8 +
- .../github.com/canonical/go-dqlite/.gitignore | 4 +
- .../canonical/go-dqlite/.travis.yml | 31 +
- vendor/github.com/canonical/go-dqlite/AUTHORS | 1 +
- vendor/github.com/canonical/go-dqlite/LICENSE | 201 +
- .../github.com/canonical/go-dqlite/README.md | 133 +
- .../github.com/canonical/go-dqlite/app/app.go | 633 +
- .../canonical/go-dqlite/app/app_test.go | 996 +
- .../canonical/go-dqlite/app/dial.go | 41 +
- .../canonical/go-dqlite/app/example_test.go | 112 +
- .../canonical/go-dqlite/app/files.go | 80 +
- .../canonical/go-dqlite/app/options.go | 207 +
- .../canonical/go-dqlite/app/proxy.go | 167 +
- .../canonical/go-dqlite/app/roles.go | 305 +
- .../go-dqlite/app/testdata/cluster.crt | 30 +
- .../go-dqlite/app/testdata/cluster.key | 52 +
- .../github.com/canonical/go-dqlite/app/tls.go | 114 +
- .../canonical/go-dqlite/client/client.go | 319 +
- .../go-dqlite/client/client_export_test.go | 9 +
- .../canonical/go-dqlite/client/client_test.go | 227 +
- .../canonical/go-dqlite/client/constants.go | 12 +
- .../canonical/go-dqlite/client/dial.go | 37 +
- .../canonical/go-dqlite/client/leader.go | 34 +
- .../canonical/go-dqlite/client/leader_test.go | 46 +
- .../canonical/go-dqlite/client/log.go | 23 +
- .../canonical/go-dqlite/client/store.go | 238 +
- .../canonical/go-dqlite/client/store_test.go | 55 +
- .../go-dqlite/cmd/dqlite-demo/dqlite-demo.go | 131 +
- .../canonical/go-dqlite/cmd/dqlite/dqlite.go | 135 +
- .../github.com/canonical/go-dqlite/config.go | 48 +
- .../canonical/go-dqlite/driver/driver.go | 782 +
- .../canonical/go-dqlite/driver/driver_test.go | 825 +
- .../go-dqlite/driver/integration_test.go | 470 +
- vendor/github.com/canonical/go-dqlite/go.mod | 15 +
- vendor/github.com/canonical/go-dqlite/go.sum | 151 +
- .../go-dqlite/internal/bindings/build.go | 6 +
- .../go-dqlite/internal/bindings/server.go | 274 +
- .../internal/bindings/server_test.go | 228 +
- .../go-dqlite/internal/logging/func.go | 26 +
- .../go-dqlite/internal/logging/func_test.go | 12 +
- .../go-dqlite/internal/logging/level.go | 28 +
- .../go-dqlite/internal/logging/level_test.go | 18 +
- .../go-dqlite/internal/protocol/buffer.go | 11 +
- .../go-dqlite/internal/protocol/config.go | 15 +
- .../go-dqlite/internal/protocol/connector.go | 324 +
- .../internal/protocol/connector_test.go | 368 +
- .../go-dqlite/internal/protocol/constants.go | 153 +
- .../go-dqlite/internal/protocol/dial.go | 30 +
- .../go-dqlite/internal/protocol/errors.go | 39 +
- .../go-dqlite/internal/protocol/message.go | 656 +
- .../internal/protocol/message_export_test.go | 9 +
- .../protocol/message_internal_test.go | 289 +
- .../go-dqlite/internal/protocol/protocol.go | 308 +
- .../internal/protocol/protocol_test.go | 189 +
- .../go-dqlite/internal/protocol/request.go | 172 +
- .../go-dqlite/internal/protocol/response.go | 278 +
- .../go-dqlite/internal/protocol/schema.go | 37 +
- .../go-dqlite/internal/protocol/schema.sh | 145 +
- .../go-dqlite/internal/protocol/store.go | 67 +
- .../go-dqlite/internal/shell/options.go | 48 +
- .../go-dqlite/internal/shell/shell.go | 281 +
- vendor/github.com/canonical/go-dqlite/node.go | 166 +
- .../canonical/go-dqlite/test/dqlite-demo.sh | 123 +
- .../canonical/go-dqlite/test/roles.sh | 247 +
- .../canonical/kvsql-dqlite/.dir-locals.el | 8 +
- .../github.com/canonical/kvsql-dqlite/LICENSE | 177 +
- .../canonical/kvsql-dqlite/cmd/main.go | 13 +
- .../github.com/canonical/kvsql-dqlite/go.mod | 36 +
- .../github.com/canonical/kvsql-dqlite/go.sum | 567 +
- .../kvsql-dqlite/integration_test.go | 180 +
- .../github.com/canonical/kvsql-dqlite/rest.go | 26 +
- .../kvsql-dqlite/server/config/config.go | 107 +
- .../kvsql-dqlite/server/config/dqlite.go | 45 +
- .../kvsql-dqlite/server/config/init.go | 72 +
- .../kvsql-dqlite/server/config/update.go | 45 +
- .../canonical/kvsql-dqlite/server/server.go | 126 +
- .../kvsql-dqlite/server/server_test.go | 150 +
- .../kvsql-dqlite/server/testdata/cluster.crt | 30 +
- .../kvsql-dqlite/server/testdata/cluster.key | 52 +
- .../docker/docker/pkg/locker/README.md | 64 +
- .../docker/docker/pkg/locker/locker.go | 112 +
- .../docker/docker/pkg/locker/locker_test.go | 161 +
- vendor/github.com/ghodss/yaml/.travis.yml | 7 +
- vendor/github.com/ghodss/yaml/LICENSE | 50 +
- vendor/github.com/ghodss/yaml/README.md | 121 +
- vendor/github.com/ghodss/yaml/fields.go | 501 +
- vendor/github.com/ghodss/yaml/yaml.go | 277 +
- vendor/github.com/ghodss/yaml/yaml_test.go | 287 +
- .../mysql/.github/CONTRIBUTING.md | 23 +
- .../mysql/.github/ISSUE_TEMPLATE.md | 21 +
- .../mysql/.github/PULL_REQUEST_TEMPLATE.md | 9 +
- .../github.com/go-sql-driver/mysql/.gitignore | 9 +
- .../go-sql-driver/mysql/.travis.yml | 129 +
- .../go-sql-driver/mysql/.travis/docker.cnf | 5 +
- .../go-sql-driver/mysql/.travis/gofmt.sh | 7 +
- .../go-sql-driver/mysql/.travis/wait_mysql.sh | 8 +
- vendor/github.com/go-sql-driver/mysql/AUTHORS | 103 +
- .../go-sql-driver/mysql/CHANGELOG.md | 167 +
- vendor/github.com/go-sql-driver/mysql/LICENSE | 373 +
- .../github.com/go-sql-driver/mysql/README.md | 489 +
- vendor/github.com/go-sql-driver/mysql/auth.go | 422 +
- .../go-sql-driver/mysql/auth_test.go | 1330 +
- .../go-sql-driver/mysql/benchmark_test.go | 373 +
- .../github.com/go-sql-driver/mysql/buffer.go | 182 +
- .../go-sql-driver/mysql/collations.go | 265 +
- .../go-sql-driver/mysql/conncheck.go | 54 +
- .../go-sql-driver/mysql/conncheck_dummy.go | 17 +
- .../go-sql-driver/mysql/conncheck_test.go | 38 +
- .../go-sql-driver/mysql/connection.go | 651 +
- .../go-sql-driver/mysql/connection_test.go | 175 +
- .../go-sql-driver/mysql/connector.go | 140 +
- .../go-sql-driver/mysql/connector_test.go | 30 +
- .../github.com/go-sql-driver/mysql/const.go | 174 +
- .../github.com/go-sql-driver/mysql/driver.go | 85 +
- .../go-sql-driver/mysql/driver_go110.go | 37 +
- .../go-sql-driver/mysql/driver_go110_test.go | 190 +
- .../go-sql-driver/mysql/driver_test.go | 2996 +
- vendor/github.com/go-sql-driver/mysql/dsn.go | 636 +
- .../go-sql-driver/mysql/dsn_test.go | 415 +
- .../github.com/go-sql-driver/mysql/errors.go | 65 +
- .../go-sql-driver/mysql/errors_test.go | 42 +
- .../github.com/go-sql-driver/mysql/fields.go | 194 +
- vendor/github.com/go-sql-driver/mysql/go.mod | 3 +
- .../github.com/go-sql-driver/mysql/infile.go | 182 +
- .../go-sql-driver/mysql/nulltime.go | 50 +
- .../go-sql-driver/mysql/nulltime_go113.go | 31 +
- .../go-sql-driver/mysql/nulltime_legacy.go | 34 +
- .../go-sql-driver/mysql/nulltime_test.go | 62 +
- .../github.com/go-sql-driver/mysql/packets.go | 1342 +
- .../go-sql-driver/mysql/packets_test.go | 336 +
- .../github.com/go-sql-driver/mysql/result.go | 22 +
- vendor/github.com/go-sql-driver/mysql/rows.go | 223 +
- .../go-sql-driver/mysql/statement.go | 204 +
- .../go-sql-driver/mysql/statement_test.go | 126 +
- .../go-sql-driver/mysql/transaction.go | 31 +
- .../github.com/go-sql-driver/mysql/utils.go | 701 +
- .../go-sql-driver/mysql/utils_test.go | 293 +
- vendor/github.com/gxed/GoEndian/.gitignore | 23 +
- .../github.com/gxed/GoEndian/.gx/lastpubver | 1 +
- vendor/github.com/gxed/GoEndian/LICENSE | 201 +
- vendor/github.com/gxed/GoEndian/README.md | 63 +
- vendor/github.com/gxed/GoEndian/endian.go | 43 +
- vendor/github.com/gxed/GoEndian/package.json | 15 +
- vendor/github.com/lib/pq/.gitignore | 4 +
- vendor/github.com/lib/pq/.travis.sh | 73 +
- vendor/github.com/lib/pq/.travis.yml | 44 +
- vendor/github.com/lib/pq/CONTRIBUTING.md | 29 +
- vendor/github.com/lib/pq/LICENSE.md | 8 +
- vendor/github.com/lib/pq/README.md | 95 +
- vendor/github.com/lib/pq/TESTS.md | 33 +
- vendor/github.com/lib/pq/array.go | 756 +
- vendor/github.com/lib/pq/array_test.go | 1311 +
- vendor/github.com/lib/pq/bench_test.go | 434 +
- vendor/github.com/lib/pq/buf.go | 91 +
- vendor/github.com/lib/pq/buf_test.go | 16 +
- vendor/github.com/lib/pq/certs/README | 3 +
- vendor/github.com/lib/pq/certs/bogus_root.crt | 19 +
- vendor/github.com/lib/pq/certs/postgresql.crt | 69 +
- vendor/github.com/lib/pq/certs/postgresql.key | 15 +
- vendor/github.com/lib/pq/certs/root.crt | 24 +
- vendor/github.com/lib/pq/certs/server.crt | 81 +
- vendor/github.com/lib/pq/certs/server.key | 27 +
- vendor/github.com/lib/pq/conn.go | 1923 +
- vendor/github.com/lib/pq/conn_go18.go | 149 +
- vendor/github.com/lib/pq/conn_test.go | 1777 +
- vendor/github.com/lib/pq/connector.go | 110 +
- .../lib/pq/connector_example_test.go | 29 +
- vendor/github.com/lib/pq/connector_test.go | 67 +
- vendor/github.com/lib/pq/copy.go | 282 +
- vendor/github.com/lib/pq/copy_test.go | 468 +
- vendor/github.com/lib/pq/doc.go | 245 +
- vendor/github.com/lib/pq/encode.go | 602 +
- vendor/github.com/lib/pq/encode_test.go | 766 +
- vendor/github.com/lib/pq/error.go | 515 +
- .../github.com/lib/pq/example/listen/doc.go | 98 +
- vendor/github.com/lib/pq/go.mod | 1 +
- vendor/github.com/lib/pq/go18_test.go | 319 +
- vendor/github.com/lib/pq/go19_test.go | 91 +
- vendor/github.com/lib/pq/hstore/hstore.go | 118 +
- .../github.com/lib/pq/hstore/hstore_test.go | 148 +
- vendor/github.com/lib/pq/issues_test.go | 26 +
- vendor/github.com/lib/pq/notify.go | 797 +
- vendor/github.com/lib/pq/notify_test.go | 570 +
- vendor/github.com/lib/pq/oid/doc.go | 6 +
- vendor/github.com/lib/pq/oid/gen.go | 93 +
- vendor/github.com/lib/pq/oid/types.go | 343 +
- vendor/github.com/lib/pq/rows.go | 93 +
- vendor/github.com/lib/pq/rows_test.go | 218 +
- vendor/github.com/lib/pq/scram/scram.go | 264 +
- vendor/github.com/lib/pq/ssl.go | 175 +
- vendor/github.com/lib/pq/ssl_permissions.go | 20 +
- vendor/github.com/lib/pq/ssl_test.go | 279 +
- vendor/github.com/lib/pq/ssl_windows.go | 9 +
- vendor/github.com/lib/pq/url.go | 76 +
- vendor/github.com/lib/pq/url_test.go | 66 +
- vendor/github.com/lib/pq/user_posix.go | 24 +
- vendor/github.com/lib/pq/user_windows.go | 27 +
- vendor/github.com/lib/pq/uuid.go | 23 +
- vendor/github.com/lib/pq/uuid_test.go | 46 +
- .../mattn/go-sqlite3/.github/FUNDING.yml | 8 +
- vendor/github.com/mattn/go-sqlite3/.gitignore | 14 +
- .../github.com/mattn/go-sqlite3/.travis.yml | 31 +
- vendor/github.com/mattn/go-sqlite3/LICENSE | 21 +
- vendor/github.com/mattn/go-sqlite3/README.md | 521 +
- .../go-sqlite3/_example/custom_func/main.go | 133 +
- .../mattn/go-sqlite3/_example/hook/hook.go | 78 +
- .../mattn/go-sqlite3/_example/limit/limit.go | 113 +
- .../go-sqlite3/_example/mod_regexp/Makefile | 22 +
- .../_example/mod_regexp/extension.go | 43 +
- .../_example/mod_regexp/sqlite3_mod_regexp.c | 31 +
- .../go-sqlite3/_example/mod_vtable/Makefile | 24 +
- .../_example/mod_vtable/extension.go | 37 +
- .../go-sqlite3/_example/mod_vtable/picojson.h | 1040 +
- .../_example/mod_vtable/sqlite3_mod_vtable.cc | 238 +
- .../go-sqlite3/_example/simple/simple.go | 106 +
- .../mattn/go-sqlite3/_example/trace/main.go | 264 +
- .../mattn/go-sqlite3/_example/vtable/main.go | 38 +
- .../go-sqlite3/_example/vtable/vtable.go | 111 +
- vendor/github.com/mattn/go-sqlite3/backup.go | 85 +
- .../mattn/go-sqlite3/backup_test.go | 290 +
- .../github.com/mattn/go-sqlite3/callback.go | 380 +
- .../mattn/go-sqlite3/callback_test.go | 97 +
- vendor/github.com/mattn/go-sqlite3/doc.go | 112 +
- vendor/github.com/mattn/go-sqlite3/error.go | 135 +
- .../github.com/mattn/go-sqlite3/error_test.go | 242 +
- .../mattn/go-sqlite3/sqlite3-binding.c | 224147 +++++++++++++++
- .../mattn/go-sqlite3/sqlite3-binding.h | 11893 +
- vendor/github.com/mattn/go-sqlite3/sqlite3.go | 2070 +
- .../mattn/go-sqlite3/sqlite3_context.go | 103 +
- .../mattn/go-sqlite3/sqlite3_func_crypt.go | 120 +
- .../go-sqlite3/sqlite3_func_crypt_test.go | 57 +
- .../mattn/go-sqlite3/sqlite3_go18.go | 70 +
- .../mattn/go-sqlite3/sqlite3_go18_test.go | 371 +
- .../mattn/go-sqlite3/sqlite3_libsqlite3.go | 17 +
- .../go-sqlite3/sqlite3_load_extension.go | 70 +
- .../go-sqlite3/sqlite3_load_extension_omit.go | 24 +
- .../sqlite3_opt_allow_uri_authority.go | 15 +
- .../mattn/go-sqlite3/sqlite3_opt_app_armor.go | 16 +
- .../go-sqlite3/sqlite3_opt_foreign_keys.go | 15 +
- .../mattn/go-sqlite3/sqlite3_opt_fts3_test.go | 130 +
- .../mattn/go-sqlite3/sqlite3_opt_fts5.go | 14 +
- .../mattn/go-sqlite3/sqlite3_opt_icu.go | 17 +
- .../go-sqlite3/sqlite3_opt_introspect.go | 15 +
- .../mattn/go-sqlite3/sqlite3_opt_json1.go | 13 +
- .../go-sqlite3/sqlite3_opt_secure_delete.go | 15 +
- .../sqlite3_opt_secure_delete_fast.go | 15 +
- .../mattn/go-sqlite3/sqlite3_opt_stat4.go | 15 +
- .../go-sqlite3/sqlite3_opt_unlock_notify.c | 85 +
- .../go-sqlite3/sqlite3_opt_unlock_notify.go | 93 +
- .../sqlite3_opt_unlock_notify_test.go | 222 +
- .../mattn/go-sqlite3/sqlite3_opt_userauth.go | 289 +
- .../go-sqlite3/sqlite3_opt_userauth_omit.go | 152 +
- .../go-sqlite3/sqlite3_opt_userauth_test.go | 619 +
- .../go-sqlite3/sqlite3_opt_vacuum_full.go | 15 +
- .../go-sqlite3/sqlite3_opt_vacuum_incr.go | 15 +
- .../mattn/go-sqlite3/sqlite3_opt_vtable.go | 650 +
- .../go-sqlite3/sqlite3_opt_vtable_test.go | 486 +
- .../mattn/go-sqlite3/sqlite3_other.go | 17 +
- .../mattn/go-sqlite3/sqlite3_solaris.go | 14 +
- .../mattn/go-sqlite3/sqlite3_test.go | 2313 +
- .../mattn/go-sqlite3/sqlite3_trace.go | 288 +
- .../mattn/go-sqlite3/sqlite3_type.go | 57 +
- .../go-sqlite3/sqlite3_usleep_windows.go | 39 +
- .../mattn/go-sqlite3/sqlite3_windows.go | 18 +
- .../github.com/mattn/go-sqlite3/sqlite3ext.h | 639 +
- .../mattn/go-sqlite3/static_mock.go | 21 +
- .../mattn/go-sqlite3/upgrade/package.go | 5 +
- .../mattn/go-sqlite3/upgrade/upgrade.go | 218 +
- vendor/github.com/rancher/kine/.gitignore | 63 +
- vendor/github.com/rancher/kine/Dockerfile | 9 +
- vendor/github.com/rancher/kine/LICENSE | 177 +
- vendor/github.com/rancher/kine/README.md | 9 +
- .../rancher/kine/examples/generate-certs.sh | 11 +
- .../rancher/kine/examples/minimal.md | 115 +
- .../rancher/kine/examples/mysql-ssl.cnf | 5 +
- vendor/github.com/rancher/kine/go.mod | 17 +
- vendor/github.com/rancher/kine/go.sum | 468 +
- vendor/github.com/rancher/kine/main.go | 68 +
- .../kine/pkg/broadcaster/broadcaster.go | 83 +
- .../rancher/kine/pkg/client/client.go | 133 +
- .../rancher/kine/pkg/drivers/dqlite/dqlite.go | 248 +
- .../kine/pkg/drivers/dqlite/no_dqlite.go | 14 +
- .../kine/pkg/drivers/generic/generic.go | 413 +
- .../rancher/kine/pkg/drivers/mysql/mysql.go | 168 +
- .../rancher/kine/pkg/drivers/pgsql/pgsql.go | 172 +
- .../rancher/kine/pkg/drivers/sqlite/sqlite.go | 102 +
- .../kine/pkg/drivers/sqlite/sqlite_nocgo.go | 26 +
- .../rancher/kine/pkg/endpoint/endpoint.go | 160 +
- .../kine/pkg/logstructured/logstructured.go | 362 +
- .../kine/pkg/logstructured/sqllog/sql.go | 519 +
- .../rancher/kine/pkg/server/compact.go | 34 +
- .../rancher/kine/pkg/server/create.go | 54 +
- .../rancher/kine/pkg/server/delete.go | 50 +
- .../github.com/rancher/kine/pkg/server/get.go | 27 +
- .../rancher/kine/pkg/server/lease.go | 32 +
- .../rancher/kine/pkg/server/limited.go | 52 +
- .../rancher/kine/pkg/server/list.go | 56 +
- .../rancher/kine/pkg/server/server.go | 153 +
- .../rancher/kine/pkg/server/types.go | 38 +
- .../rancher/kine/pkg/server/update.go | 72 +
- .../rancher/kine/pkg/server/watch.go | 152 +
- .../github.com/rancher/kine/pkg/tls/config.go | 31 +
- vendor/github.com/rancher/wrangler/LICENSE | 177 +
- vendor/github.com/rancher/wrangler/README.md | 68 +
- .../rancher/wrangler/pkg/apply/apply.go | 216 +
- .../wrangler/pkg/apply/client_factory.go | 18 +
- .../rancher/wrangler/pkg/apply/desiredset.go | 165 +
- .../wrangler/pkg/apply/desiredset_apply.go | 240 +
- .../wrangler/pkg/apply/desiredset_compare.go | 357 +
- .../wrangler/pkg/apply/desiredset_crud.go | 66 +
- .../wrangler/pkg/apply/desiredset_process.go | 356 +
- .../rancher/wrangler/pkg/apply/fake/apply.go | 91 +
- .../wrangler/pkg/apply/injectors/registry.go | 21 +
- .../rancher/wrangler/pkg/apply/reconcilers.go | 130 +
- .../rancher/wrangler/pkg/cleanup/cleanup.go | 31 +
- .../wrangler/pkg/condition/condition.go | 239 +
- .../wrangler/pkg/condition/condition_test.go | 19 +
- .../wrangler/pkg/controller-gen/OWNERS | 10 +
- .../wrangler/pkg/controller-gen/README.md | 4 +
- .../wrangler/pkg/controller-gen/args/args.go | 29 +
- .../pkg/controller-gen/args/groupversion.go | 60 +
- .../generators/client_generator.go | 154 +
- .../controller-gen/generators/factory_go.go | 143 +
- .../generators/group_interface_go.go | 106 +
- .../generators/group_version_interface_go.go | 115 +
- .../controller-gen/generators/list_type_go.go | 70 +
- .../pkg/controller-gen/generators/pkg.go | 22 +
- .../generators/register_group_go.go | 34 +
- .../generators/register_group_version_go.go | 123 +
- .../pkg/controller-gen/generators/type_go.go | 408 +
- .../pkg/controller-gen/generators/util.go | 31 +
- .../wrangler/pkg/controller-gen/main.go | 307 +
- .../rancher/wrangler/pkg/crd/init.go | 308 +
- .../wrangler/pkg/generic/controller.go | 219 +
- .../wrangler/pkg/generic/controllerfactory.go | 181 +
- .../wrangler/pkg/generic/generating.go | 7 +
- .../rancher/wrangler/pkg/generic/handlers.go | 95 +
- .../rancher/wrangler/pkg/generic/remove.go | 119 +
- .../pkg/genericcondition/condition.go | 18 +
- .../rancher/wrangler/pkg/gvk/get.go | 47 +
- .../rancher/wrangler/pkg/kubeconfig/loader.go | 52 +
- .../rancher/wrangler/pkg/kv/split.go | 37 +
- .../rancher/wrangler/pkg/leader/leader.go | 67 +
- .../rancher/wrangler/pkg/merr/error.go | 38 +
- .../rancher/wrangler/pkg/name/name.go | 59 +
- .../wrangler/pkg/objectset/objectset.go | 143 +
- .../wrangler/pkg/relatedresource/changeset.go | 79 +
- .../rancher/wrangler/pkg/resolvehome/main.go | 25 +
- .../rancher/wrangler/pkg/schemes/all.go | 7 +
- .../rancher/wrangler/pkg/signals/signal.go | 44 +
- .../wrangler/pkg/signals/signal_posix.go | 26 +
- .../wrangler/pkg/signals/signal_windows.go | 23 +
- .../rancher/wrangler/pkg/slice/contains.go | 22 +
- .../rancher/wrangler/pkg/start/all.go | 40 +
- .../rancher/wrangler/pkg/ticker/ticker.go | 15 +
- .../rancher/wrangler/pkg/trigger/evalall.go | 63 +
- .../rancher/wrangler/pkg/yaml/yaml.go | 90 +
- 375 files changed, 302928 insertions(+), 7 deletions(-)
- create mode 100644 vendor/github.com/Rican7/retry/.travis.yml
- create mode 100644 vendor/github.com/Rican7/retry/LICENSE
- create mode 100644 vendor/github.com/Rican7/retry/Makefile
- create mode 100644 vendor/github.com/Rican7/retry/README.md
- create mode 100644 vendor/github.com/Rican7/retry/backoff/backoff.go
- create mode 100644 vendor/github.com/Rican7/retry/backoff/backoff_test.go
- create mode 100644 vendor/github.com/Rican7/retry/example_test.go
- create mode 100644 vendor/github.com/Rican7/retry/jitter/jitter.go
- create mode 100644 vendor/github.com/Rican7/retry/jitter/jitter_test.go
- create mode 100644 vendor/github.com/Rican7/retry/retry.go
- create mode 100644 vendor/github.com/Rican7/retry/retry_test.go
- create mode 100644 vendor/github.com/Rican7/retry/strategy/strategy.go
- create mode 100644 vendor/github.com/Rican7/retry/strategy/strategy_test.go
- create mode 100644 vendor/github.com/canonical/go-dqlite/.dir-locals.el
- create mode 100644 vendor/github.com/canonical/go-dqlite/.gitignore
- create mode 100644 vendor/github.com/canonical/go-dqlite/.travis.yml
- create mode 100644 vendor/github.com/canonical/go-dqlite/AUTHORS
- create mode 100644 vendor/github.com/canonical/go-dqlite/LICENSE
- create mode 100644 vendor/github.com/canonical/go-dqlite/README.md
- create mode 100644 vendor/github.com/canonical/go-dqlite/app/app.go
- create mode 100644 vendor/github.com/canonical/go-dqlite/app/app_test.go
- create mode 100644 vendor/github.com/canonical/go-dqlite/app/dial.go
- create mode 100644 vendor/github.com/canonical/go-dqlite/app/example_test.go
- create mode 100644 vendor/github.com/canonical/go-dqlite/app/files.go
- create mode 100644 vendor/github.com/canonical/go-dqlite/app/options.go
- create mode 100644 vendor/github.com/canonical/go-dqlite/app/proxy.go
- create mode 100644 vendor/github.com/canonical/go-dqlite/app/roles.go
- create mode 100644 vendor/github.com/canonical/go-dqlite/app/testdata/cluster.crt
- create mode 100644 vendor/github.com/canonical/go-dqlite/app/testdata/cluster.key
- create mode 100644 vendor/github.com/canonical/go-dqlite/app/tls.go
- create mode 100644 vendor/github.com/canonical/go-dqlite/client/client.go
- create mode 100644 vendor/github.com/canonical/go-dqlite/client/client_export_test.go
- create mode 100644 vendor/github.com/canonical/go-dqlite/client/client_test.go
- create mode 100644 vendor/github.com/canonical/go-dqlite/client/constants.go
- create mode 100644 vendor/github.com/canonical/go-dqlite/client/dial.go
- create mode 100644 vendor/github.com/canonical/go-dqlite/client/leader.go
- create mode 100644 vendor/github.com/canonical/go-dqlite/client/leader_test.go
- create mode 100644 vendor/github.com/canonical/go-dqlite/client/log.go
- create mode 100644 vendor/github.com/canonical/go-dqlite/client/store.go
- create mode 100644 vendor/github.com/canonical/go-dqlite/client/store_test.go
- create mode 100644 vendor/github.com/canonical/go-dqlite/cmd/dqlite-demo/dqlite-demo.go
- create mode 100644 vendor/github.com/canonical/go-dqlite/cmd/dqlite/dqlite.go
- create mode 100644 vendor/github.com/canonical/go-dqlite/config.go
- create mode 100644 vendor/github.com/canonical/go-dqlite/driver/driver.go
- create mode 100644 vendor/github.com/canonical/go-dqlite/driver/driver_test.go
- create mode 100644 vendor/github.com/canonical/go-dqlite/driver/integration_test.go
- create mode 100644 vendor/github.com/canonical/go-dqlite/go.mod
- create mode 100644 vendor/github.com/canonical/go-dqlite/go.sum
- create mode 100644 vendor/github.com/canonical/go-dqlite/internal/bindings/build.go
- create mode 100644 vendor/github.com/canonical/go-dqlite/internal/bindings/server.go
- create mode 100644 vendor/github.com/canonical/go-dqlite/internal/bindings/server_test.go
- create mode 100644 vendor/github.com/canonical/go-dqlite/internal/logging/func.go
- create mode 100644 vendor/github.com/canonical/go-dqlite/internal/logging/func_test.go
- create mode 100644 vendor/github.com/canonical/go-dqlite/internal/logging/level.go
- create mode 100644 vendor/github.com/canonical/go-dqlite/internal/logging/level_test.go
- create mode 100644 vendor/github.com/canonical/go-dqlite/internal/protocol/buffer.go
- create mode 100644 vendor/github.com/canonical/go-dqlite/internal/protocol/config.go
- create mode 100644 vendor/github.com/canonical/go-dqlite/internal/protocol/connector.go
- create mode 100644 vendor/github.com/canonical/go-dqlite/internal/protocol/connector_test.go
- create mode 100644 vendor/github.com/canonical/go-dqlite/internal/protocol/constants.go
- create mode 100644 vendor/github.com/canonical/go-dqlite/internal/protocol/dial.go
- create mode 100644 vendor/github.com/canonical/go-dqlite/internal/protocol/errors.go
- create mode 100644 vendor/github.com/canonical/go-dqlite/internal/protocol/message.go
- create mode 100644 vendor/github.com/canonical/go-dqlite/internal/protocol/message_export_test.go
- create mode 100644 vendor/github.com/canonical/go-dqlite/internal/protocol/message_internal_test.go
- create mode 100644 vendor/github.com/canonical/go-dqlite/internal/protocol/protocol.go
- create mode 100644 vendor/github.com/canonical/go-dqlite/internal/protocol/protocol_test.go
- create mode 100644 vendor/github.com/canonical/go-dqlite/internal/protocol/request.go
- create mode 100644 vendor/github.com/canonical/go-dqlite/internal/protocol/response.go
- create mode 100644 vendor/github.com/canonical/go-dqlite/internal/protocol/schema.go
- create mode 100755 vendor/github.com/canonical/go-dqlite/internal/protocol/schema.sh
- create mode 100644 vendor/github.com/canonical/go-dqlite/internal/protocol/store.go
- create mode 100644 vendor/github.com/canonical/go-dqlite/internal/shell/options.go
- create mode 100644 vendor/github.com/canonical/go-dqlite/internal/shell/shell.go
- create mode 100644 vendor/github.com/canonical/go-dqlite/node.go
- create mode 100755 vendor/github.com/canonical/go-dqlite/test/dqlite-demo.sh
- create mode 100755 vendor/github.com/canonical/go-dqlite/test/roles.sh
- create mode 100644 vendor/github.com/canonical/kvsql-dqlite/.dir-locals.el
- create mode 100644 vendor/github.com/canonical/kvsql-dqlite/LICENSE
- create mode 100644 vendor/github.com/canonical/kvsql-dqlite/cmd/main.go
- create mode 100644 vendor/github.com/canonical/kvsql-dqlite/go.mod
- create mode 100644 vendor/github.com/canonical/kvsql-dqlite/go.sum
- create mode 100644 vendor/github.com/canonical/kvsql-dqlite/integration_test.go
- create mode 100644 vendor/github.com/canonical/kvsql-dqlite/rest.go
- create mode 100644 vendor/github.com/canonical/kvsql-dqlite/server/config/config.go
- create mode 100644 vendor/github.com/canonical/kvsql-dqlite/server/config/dqlite.go
- create mode 100644 vendor/github.com/canonical/kvsql-dqlite/server/config/init.go
- create mode 100644 vendor/github.com/canonical/kvsql-dqlite/server/config/update.go
- create mode 100644 vendor/github.com/canonical/kvsql-dqlite/server/server.go
- create mode 100644 vendor/github.com/canonical/kvsql-dqlite/server/server_test.go
- create mode 100644 vendor/github.com/canonical/kvsql-dqlite/server/testdata/cluster.crt
- create mode 100644 vendor/github.com/canonical/kvsql-dqlite/server/testdata/cluster.key
- create mode 100644 vendor/github.com/docker/docker/pkg/locker/README.md
- create mode 100644 vendor/github.com/docker/docker/pkg/locker/locker.go
- create mode 100644 vendor/github.com/docker/docker/pkg/locker/locker_test.go
- create mode 100644 vendor/github.com/ghodss/yaml/.travis.yml
- create mode 100644 vendor/github.com/ghodss/yaml/LICENSE
- create mode 100644 vendor/github.com/ghodss/yaml/README.md
- create mode 100644 vendor/github.com/ghodss/yaml/fields.go
- create mode 100644 vendor/github.com/ghodss/yaml/yaml.go
- create mode 100644 vendor/github.com/ghodss/yaml/yaml_test.go
- create mode 100644 vendor/github.com/go-sql-driver/mysql/.github/CONTRIBUTING.md
- create mode 100644 vendor/github.com/go-sql-driver/mysql/.github/ISSUE_TEMPLATE.md
- create mode 100644 vendor/github.com/go-sql-driver/mysql/.github/PULL_REQUEST_TEMPLATE.md
- create mode 100644 vendor/github.com/go-sql-driver/mysql/.gitignore
- create mode 100644 vendor/github.com/go-sql-driver/mysql/.travis.yml
- create mode 100644 vendor/github.com/go-sql-driver/mysql/.travis/docker.cnf
- create mode 100755 vendor/github.com/go-sql-driver/mysql/.travis/gofmt.sh
- create mode 100755 vendor/github.com/go-sql-driver/mysql/.travis/wait_mysql.sh
- create mode 100644 vendor/github.com/go-sql-driver/mysql/AUTHORS
- create mode 100644 vendor/github.com/go-sql-driver/mysql/CHANGELOG.md
- create mode 100644 vendor/github.com/go-sql-driver/mysql/LICENSE
- create mode 100644 vendor/github.com/go-sql-driver/mysql/README.md
- create mode 100644 vendor/github.com/go-sql-driver/mysql/auth.go
- create mode 100644 vendor/github.com/go-sql-driver/mysql/auth_test.go
- create mode 100644 vendor/github.com/go-sql-driver/mysql/benchmark_test.go
- create mode 100644 vendor/github.com/go-sql-driver/mysql/buffer.go
- create mode 100644 vendor/github.com/go-sql-driver/mysql/collations.go
- create mode 100644 vendor/github.com/go-sql-driver/mysql/conncheck.go
- create mode 100644 vendor/github.com/go-sql-driver/mysql/conncheck_dummy.go
- create mode 100644 vendor/github.com/go-sql-driver/mysql/conncheck_test.go
- create mode 100644 vendor/github.com/go-sql-driver/mysql/connection.go
- create mode 100644 vendor/github.com/go-sql-driver/mysql/connection_test.go
- create mode 100644 vendor/github.com/go-sql-driver/mysql/connector.go
- create mode 100644 vendor/github.com/go-sql-driver/mysql/connector_test.go
- create mode 100644 vendor/github.com/go-sql-driver/mysql/const.go
- create mode 100644 vendor/github.com/go-sql-driver/mysql/driver.go
- create mode 100644 vendor/github.com/go-sql-driver/mysql/driver_go110.go
- create mode 100644 vendor/github.com/go-sql-driver/mysql/driver_go110_test.go
- create mode 100644 vendor/github.com/go-sql-driver/mysql/driver_test.go
- create mode 100644 vendor/github.com/go-sql-driver/mysql/dsn.go
- create mode 100644 vendor/github.com/go-sql-driver/mysql/dsn_test.go
- create mode 100644 vendor/github.com/go-sql-driver/mysql/errors.go
- create mode 100644 vendor/github.com/go-sql-driver/mysql/errors_test.go
- create mode 100644 vendor/github.com/go-sql-driver/mysql/fields.go
- create mode 100644 vendor/github.com/go-sql-driver/mysql/go.mod
- create mode 100644 vendor/github.com/go-sql-driver/mysql/infile.go
- create mode 100644 vendor/github.com/go-sql-driver/mysql/nulltime.go
- create mode 100644 vendor/github.com/go-sql-driver/mysql/nulltime_go113.go
- create mode 100644 vendor/github.com/go-sql-driver/mysql/nulltime_legacy.go
- create mode 100644 vendor/github.com/go-sql-driver/mysql/nulltime_test.go
- create mode 100644 vendor/github.com/go-sql-driver/mysql/packets.go
- create mode 100644 vendor/github.com/go-sql-driver/mysql/packets_test.go
- create mode 100644 vendor/github.com/go-sql-driver/mysql/result.go
- create mode 100644 vendor/github.com/go-sql-driver/mysql/rows.go
- create mode 100644 vendor/github.com/go-sql-driver/mysql/statement.go
- create mode 100644 vendor/github.com/go-sql-driver/mysql/statement_test.go
- create mode 100644 vendor/github.com/go-sql-driver/mysql/transaction.go
- create mode 100644 vendor/github.com/go-sql-driver/mysql/utils.go
- create mode 100644 vendor/github.com/go-sql-driver/mysql/utils_test.go
- create mode 100644 vendor/github.com/gxed/GoEndian/.gitignore
- create mode 100644 vendor/github.com/gxed/GoEndian/.gx/lastpubver
- create mode 100644 vendor/github.com/gxed/GoEndian/LICENSE
- create mode 100644 vendor/github.com/gxed/GoEndian/README.md
- create mode 100644 vendor/github.com/gxed/GoEndian/endian.go
- create mode 100644 vendor/github.com/gxed/GoEndian/package.json
- create mode 100644 vendor/github.com/lib/pq/.gitignore
- create mode 100755 vendor/github.com/lib/pq/.travis.sh
- create mode 100644 vendor/github.com/lib/pq/.travis.yml
- create mode 100644 vendor/github.com/lib/pq/CONTRIBUTING.md
- create mode 100644 vendor/github.com/lib/pq/LICENSE.md
- create mode 100644 vendor/github.com/lib/pq/README.md
- create mode 100644 vendor/github.com/lib/pq/TESTS.md
- create mode 100644 vendor/github.com/lib/pq/array.go
- create mode 100644 vendor/github.com/lib/pq/array_test.go
- create mode 100644 vendor/github.com/lib/pq/bench_test.go
- create mode 100644 vendor/github.com/lib/pq/buf.go
- create mode 100644 vendor/github.com/lib/pq/buf_test.go
- create mode 100644 vendor/github.com/lib/pq/certs/README
- create mode 100644 vendor/github.com/lib/pq/certs/bogus_root.crt
- create mode 100644 vendor/github.com/lib/pq/certs/postgresql.crt
- create mode 100644 vendor/github.com/lib/pq/certs/postgresql.key
- create mode 100644 vendor/github.com/lib/pq/certs/root.crt
- create mode 100644 vendor/github.com/lib/pq/certs/server.crt
- create mode 100644 vendor/github.com/lib/pq/certs/server.key
- create mode 100644 vendor/github.com/lib/pq/conn.go
- create mode 100644 vendor/github.com/lib/pq/conn_go18.go
- create mode 100644 vendor/github.com/lib/pq/conn_test.go
- create mode 100644 vendor/github.com/lib/pq/connector.go
- create mode 100644 vendor/github.com/lib/pq/connector_example_test.go
- create mode 100644 vendor/github.com/lib/pq/connector_test.go
- create mode 100644 vendor/github.com/lib/pq/copy.go
- create mode 100644 vendor/github.com/lib/pq/copy_test.go
- create mode 100644 vendor/github.com/lib/pq/doc.go
- create mode 100644 vendor/github.com/lib/pq/encode.go
- create mode 100644 vendor/github.com/lib/pq/encode_test.go
- create mode 100644 vendor/github.com/lib/pq/error.go
- create mode 100644 vendor/github.com/lib/pq/example/listen/doc.go
- create mode 100644 vendor/github.com/lib/pq/go.mod
- create mode 100644 vendor/github.com/lib/pq/go18_test.go
- create mode 100644 vendor/github.com/lib/pq/go19_test.go
- create mode 100644 vendor/github.com/lib/pq/hstore/hstore.go
- create mode 100644 vendor/github.com/lib/pq/hstore/hstore_test.go
- create mode 100644 vendor/github.com/lib/pq/issues_test.go
- create mode 100644 vendor/github.com/lib/pq/notify.go
- create mode 100644 vendor/github.com/lib/pq/notify_test.go
- create mode 100644 vendor/github.com/lib/pq/oid/doc.go
- create mode 100644 vendor/github.com/lib/pq/oid/gen.go
- create mode 100644 vendor/github.com/lib/pq/oid/types.go
- create mode 100644 vendor/github.com/lib/pq/rows.go
- create mode 100644 vendor/github.com/lib/pq/rows_test.go
- create mode 100644 vendor/github.com/lib/pq/scram/scram.go
- create mode 100644 vendor/github.com/lib/pq/ssl.go
- create mode 100644 vendor/github.com/lib/pq/ssl_permissions.go
- create mode 100644 vendor/github.com/lib/pq/ssl_test.go
- create mode 100644 vendor/github.com/lib/pq/ssl_windows.go
- create mode 100644 vendor/github.com/lib/pq/url.go
- create mode 100644 vendor/github.com/lib/pq/url_test.go
- create mode 100644 vendor/github.com/lib/pq/user_posix.go
- create mode 100644 vendor/github.com/lib/pq/user_windows.go
- create mode 100644 vendor/github.com/lib/pq/uuid.go
- create mode 100644 vendor/github.com/lib/pq/uuid_test.go
- create mode 100644 vendor/github.com/mattn/go-sqlite3/.github/FUNDING.yml
- create mode 100644 vendor/github.com/mattn/go-sqlite3/.gitignore
- create mode 100644 vendor/github.com/mattn/go-sqlite3/.travis.yml
- create mode 100644 vendor/github.com/mattn/go-sqlite3/LICENSE
- create mode 100644 vendor/github.com/mattn/go-sqlite3/README.md
- create mode 100644 vendor/github.com/mattn/go-sqlite3/_example/custom_func/main.go
- create mode 100644 vendor/github.com/mattn/go-sqlite3/_example/hook/hook.go
- create mode 100644 vendor/github.com/mattn/go-sqlite3/_example/limit/limit.go
- create mode 100644 vendor/github.com/mattn/go-sqlite3/_example/mod_regexp/Makefile
- create mode 100644 vendor/github.com/mattn/go-sqlite3/_example/mod_regexp/extension.go
- create mode 100644 vendor/github.com/mattn/go-sqlite3/_example/mod_regexp/sqlite3_mod_regexp.c
- create mode 100644 vendor/github.com/mattn/go-sqlite3/_example/mod_vtable/Makefile
- create mode 100644 vendor/github.com/mattn/go-sqlite3/_example/mod_vtable/extension.go
- create mode 100644 vendor/github.com/mattn/go-sqlite3/_example/mod_vtable/picojson.h
- create mode 100644 vendor/github.com/mattn/go-sqlite3/_example/mod_vtable/sqlite3_mod_vtable.cc
- create mode 100644 vendor/github.com/mattn/go-sqlite3/_example/simple/simple.go
- create mode 100644 vendor/github.com/mattn/go-sqlite3/_example/trace/main.go
- create mode 100644 vendor/github.com/mattn/go-sqlite3/_example/vtable/main.go
- create mode 100644 vendor/github.com/mattn/go-sqlite3/_example/vtable/vtable.go
- create mode 100644 vendor/github.com/mattn/go-sqlite3/backup.go
- create mode 100644 vendor/github.com/mattn/go-sqlite3/backup_test.go
- create mode 100644 vendor/github.com/mattn/go-sqlite3/callback.go
- create mode 100644 vendor/github.com/mattn/go-sqlite3/callback_test.go
- create mode 100644 vendor/github.com/mattn/go-sqlite3/doc.go
- create mode 100644 vendor/github.com/mattn/go-sqlite3/error.go
- create mode 100644 vendor/github.com/mattn/go-sqlite3/error_test.go
- create mode 100644 vendor/github.com/mattn/go-sqlite3/sqlite3-binding.c
- create mode 100644 vendor/github.com/mattn/go-sqlite3/sqlite3-binding.h
- create mode 100644 vendor/github.com/mattn/go-sqlite3/sqlite3.go
- create mode 100644 vendor/github.com/mattn/go-sqlite3/sqlite3_context.go
- create mode 100644 vendor/github.com/mattn/go-sqlite3/sqlite3_func_crypt.go
- create mode 100644 vendor/github.com/mattn/go-sqlite3/sqlite3_func_crypt_test.go
- create mode 100644 vendor/github.com/mattn/go-sqlite3/sqlite3_go18.go
- create mode 100644 vendor/github.com/mattn/go-sqlite3/sqlite3_go18_test.go
- create mode 100644 vendor/github.com/mattn/go-sqlite3/sqlite3_libsqlite3.go
- create mode 100644 vendor/github.com/mattn/go-sqlite3/sqlite3_load_extension.go
- create mode 100644 vendor/github.com/mattn/go-sqlite3/sqlite3_load_extension_omit.go
- create mode 100644 vendor/github.com/mattn/go-sqlite3/sqlite3_opt_allow_uri_authority.go
- create mode 100644 vendor/github.com/mattn/go-sqlite3/sqlite3_opt_app_armor.go
- create mode 100644 vendor/github.com/mattn/go-sqlite3/sqlite3_opt_foreign_keys.go
- create mode 100644 vendor/github.com/mattn/go-sqlite3/sqlite3_opt_fts3_test.go
- create mode 100644 vendor/github.com/mattn/go-sqlite3/sqlite3_opt_fts5.go
- create mode 100644 vendor/github.com/mattn/go-sqlite3/sqlite3_opt_icu.go
- create mode 100644 vendor/github.com/mattn/go-sqlite3/sqlite3_opt_introspect.go
- create mode 100644 vendor/github.com/mattn/go-sqlite3/sqlite3_opt_json1.go
- create mode 100644 vendor/github.com/mattn/go-sqlite3/sqlite3_opt_secure_delete.go
- create mode 100644 vendor/github.com/mattn/go-sqlite3/sqlite3_opt_secure_delete_fast.go
- create mode 100644 vendor/github.com/mattn/go-sqlite3/sqlite3_opt_stat4.go
- create mode 100644 vendor/github.com/mattn/go-sqlite3/sqlite3_opt_unlock_notify.c
- create mode 100644 vendor/github.com/mattn/go-sqlite3/sqlite3_opt_unlock_notify.go
- create mode 100644 vendor/github.com/mattn/go-sqlite3/sqlite3_opt_unlock_notify_test.go
- create mode 100644 vendor/github.com/mattn/go-sqlite3/sqlite3_opt_userauth.go
- create mode 100644 vendor/github.com/mattn/go-sqlite3/sqlite3_opt_userauth_omit.go
- create mode 100644 vendor/github.com/mattn/go-sqlite3/sqlite3_opt_userauth_test.go
- create mode 100644 vendor/github.com/mattn/go-sqlite3/sqlite3_opt_vacuum_full.go
- create mode 100644 vendor/github.com/mattn/go-sqlite3/sqlite3_opt_vacuum_incr.go
- create mode 100644 vendor/github.com/mattn/go-sqlite3/sqlite3_opt_vtable.go
- create mode 100644 vendor/github.com/mattn/go-sqlite3/sqlite3_opt_vtable_test.go
- create mode 100644 vendor/github.com/mattn/go-sqlite3/sqlite3_other.go
- create mode 100644 vendor/github.com/mattn/go-sqlite3/sqlite3_solaris.go
- create mode 100644 vendor/github.com/mattn/go-sqlite3/sqlite3_test.go
- create mode 100644 vendor/github.com/mattn/go-sqlite3/sqlite3_trace.go
- create mode 100644 vendor/github.com/mattn/go-sqlite3/sqlite3_type.go
- create mode 100644 vendor/github.com/mattn/go-sqlite3/sqlite3_usleep_windows.go
- create mode 100644 vendor/github.com/mattn/go-sqlite3/sqlite3_windows.go
- create mode 100644 vendor/github.com/mattn/go-sqlite3/sqlite3ext.h
- create mode 100644 vendor/github.com/mattn/go-sqlite3/static_mock.go
- create mode 100644 vendor/github.com/mattn/go-sqlite3/upgrade/package.go
- create mode 100644 vendor/github.com/mattn/go-sqlite3/upgrade/upgrade.go
- create mode 100644 vendor/github.com/rancher/kine/.gitignore
- create mode 100644 vendor/github.com/rancher/kine/Dockerfile
- create mode 100644 vendor/github.com/rancher/kine/LICENSE
- create mode 100644 vendor/github.com/rancher/kine/README.md
- create mode 100755 vendor/github.com/rancher/kine/examples/generate-certs.sh
- create mode 100644 vendor/github.com/rancher/kine/examples/minimal.md
- create mode 100644 vendor/github.com/rancher/kine/examples/mysql-ssl.cnf
- create mode 100644 vendor/github.com/rancher/kine/go.mod
- create mode 100644 vendor/github.com/rancher/kine/go.sum
- create mode 100644 vendor/github.com/rancher/kine/main.go
- create mode 100644 vendor/github.com/rancher/kine/pkg/broadcaster/broadcaster.go
- create mode 100644 vendor/github.com/rancher/kine/pkg/client/client.go
- create mode 100644 vendor/github.com/rancher/kine/pkg/drivers/dqlite/dqlite.go
- create mode 100644 vendor/github.com/rancher/kine/pkg/drivers/dqlite/no_dqlite.go
- create mode 100644 vendor/github.com/rancher/kine/pkg/drivers/generic/generic.go
- create mode 100644 vendor/github.com/rancher/kine/pkg/drivers/mysql/mysql.go
- create mode 100644 vendor/github.com/rancher/kine/pkg/drivers/pgsql/pgsql.go
- create mode 100644 vendor/github.com/rancher/kine/pkg/drivers/sqlite/sqlite.go
- create mode 100644 vendor/github.com/rancher/kine/pkg/drivers/sqlite/sqlite_nocgo.go
- create mode 100644 vendor/github.com/rancher/kine/pkg/endpoint/endpoint.go
- create mode 100644 vendor/github.com/rancher/kine/pkg/logstructured/logstructured.go
- create mode 100644 vendor/github.com/rancher/kine/pkg/logstructured/sqllog/sql.go
- create mode 100644 vendor/github.com/rancher/kine/pkg/server/compact.go
- create mode 100644 vendor/github.com/rancher/kine/pkg/server/create.go
- create mode 100644 vendor/github.com/rancher/kine/pkg/server/delete.go
- create mode 100644 vendor/github.com/rancher/kine/pkg/server/get.go
- create mode 100644 vendor/github.com/rancher/kine/pkg/server/lease.go
- create mode 100644 vendor/github.com/rancher/kine/pkg/server/limited.go
- create mode 100644 vendor/github.com/rancher/kine/pkg/server/list.go
- create mode 100644 vendor/github.com/rancher/kine/pkg/server/server.go
- create mode 100644 vendor/github.com/rancher/kine/pkg/server/types.go
- create mode 100644 vendor/github.com/rancher/kine/pkg/server/update.go
- create mode 100644 vendor/github.com/rancher/kine/pkg/server/watch.go
- create mode 100644 vendor/github.com/rancher/kine/pkg/tls/config.go
- create mode 100644 vendor/github.com/rancher/wrangler/LICENSE
- create mode 100644 vendor/github.com/rancher/wrangler/README.md
- create mode 100644 vendor/github.com/rancher/wrangler/pkg/apply/apply.go
- create mode 100644 vendor/github.com/rancher/wrangler/pkg/apply/client_factory.go
- create mode 100644 vendor/github.com/rancher/wrangler/pkg/apply/desiredset.go
- create mode 100644 vendor/github.com/rancher/wrangler/pkg/apply/desiredset_apply.go
- create mode 100644 vendor/github.com/rancher/wrangler/pkg/apply/desiredset_compare.go
- create mode 100644 vendor/github.com/rancher/wrangler/pkg/apply/desiredset_crud.go
- create mode 100644 vendor/github.com/rancher/wrangler/pkg/apply/desiredset_process.go
- create mode 100644 vendor/github.com/rancher/wrangler/pkg/apply/fake/apply.go
- create mode 100644 vendor/github.com/rancher/wrangler/pkg/apply/injectors/registry.go
- create mode 100644 vendor/github.com/rancher/wrangler/pkg/apply/reconcilers.go
- create mode 100644 vendor/github.com/rancher/wrangler/pkg/cleanup/cleanup.go
- create mode 100644 vendor/github.com/rancher/wrangler/pkg/condition/condition.go
- create mode 100644 vendor/github.com/rancher/wrangler/pkg/condition/condition_test.go
- create mode 100644 vendor/github.com/rancher/wrangler/pkg/controller-gen/OWNERS
- create mode 100644 vendor/github.com/rancher/wrangler/pkg/controller-gen/README.md
- create mode 100644 vendor/github.com/rancher/wrangler/pkg/controller-gen/args/args.go
- create mode 100644 vendor/github.com/rancher/wrangler/pkg/controller-gen/args/groupversion.go
- create mode 100644 vendor/github.com/rancher/wrangler/pkg/controller-gen/generators/client_generator.go
- create mode 100644 vendor/github.com/rancher/wrangler/pkg/controller-gen/generators/factory_go.go
- create mode 100644 vendor/github.com/rancher/wrangler/pkg/controller-gen/generators/group_interface_go.go
- create mode 100644 vendor/github.com/rancher/wrangler/pkg/controller-gen/generators/group_version_interface_go.go
- create mode 100644 vendor/github.com/rancher/wrangler/pkg/controller-gen/generators/list_type_go.go
- create mode 100644 vendor/github.com/rancher/wrangler/pkg/controller-gen/generators/pkg.go
- create mode 100644 vendor/github.com/rancher/wrangler/pkg/controller-gen/generators/register_group_go.go
- create mode 100644 vendor/github.com/rancher/wrangler/pkg/controller-gen/generators/register_group_version_go.go
- create mode 100644 vendor/github.com/rancher/wrangler/pkg/controller-gen/generators/type_go.go
- create mode 100644 vendor/github.com/rancher/wrangler/pkg/controller-gen/generators/util.go
- create mode 100644 vendor/github.com/rancher/wrangler/pkg/controller-gen/main.go
- create mode 100644 vendor/github.com/rancher/wrangler/pkg/crd/init.go
- create mode 100644 vendor/github.com/rancher/wrangler/pkg/generic/controller.go
- create mode 100644 vendor/github.com/rancher/wrangler/pkg/generic/controllerfactory.go
- create mode 100644 vendor/github.com/rancher/wrangler/pkg/generic/generating.go
- create mode 100644 vendor/github.com/rancher/wrangler/pkg/generic/handlers.go
- create mode 100644 vendor/github.com/rancher/wrangler/pkg/generic/remove.go
- create mode 100644 vendor/github.com/rancher/wrangler/pkg/genericcondition/condition.go
- create mode 100644 vendor/github.com/rancher/wrangler/pkg/gvk/get.go
- create mode 100644 vendor/github.com/rancher/wrangler/pkg/kubeconfig/loader.go
- create mode 100644 vendor/github.com/rancher/wrangler/pkg/kv/split.go
- create mode 100644 vendor/github.com/rancher/wrangler/pkg/leader/leader.go
- create mode 100644 vendor/github.com/rancher/wrangler/pkg/merr/error.go
- create mode 100644 vendor/github.com/rancher/wrangler/pkg/name/name.go
- create mode 100644 vendor/github.com/rancher/wrangler/pkg/objectset/objectset.go
- create mode 100644 vendor/github.com/rancher/wrangler/pkg/relatedresource/changeset.go
- create mode 100644 vendor/github.com/rancher/wrangler/pkg/resolvehome/main.go
- create mode 100644 vendor/github.com/rancher/wrangler/pkg/schemes/all.go
- create mode 100644 vendor/github.com/rancher/wrangler/pkg/signals/signal.go
- create mode 100644 vendor/github.com/rancher/wrangler/pkg/signals/signal_posix.go
- create mode 100644 vendor/github.com/rancher/wrangler/pkg/signals/signal_windows.go
- create mode 100644 vendor/github.com/rancher/wrangler/pkg/slice/contains.go
- create mode 100644 vendor/github.com/rancher/wrangler/pkg/start/all.go
- create mode 100644 vendor/github.com/rancher/wrangler/pkg/ticker/ticker.go
- create mode 100644 vendor/github.com/rancher/wrangler/pkg/trigger/evalall.go
- create mode 100644 vendor/github.com/rancher/wrangler/pkg/yaml/yaml.go
-
-diff --git a/cmd/kube-apiserver/app/options/options.go b/cmd/kube-apiserver/app/options/options.go
-index a16bc5498c5..b542b7c73a0 100644
---- a/cmd/kube-apiserver/app/options/options.go
-+++ b/cmd/kube-apiserver/app/options/options.go
-@@ -167,7 +167,7 @@ func addDummyInsecureFlags(fs *pflag.FlagSet) {
- func (s *ServerRunOptions) Flags() (fss cliflag.NamedFlagSets) {
- // Add the generic flags.
- s.GenericServerRunOptions.AddUniversalFlags(fss.FlagSet("generic"))
-- s.Etcd.AddFlags(fss.FlagSet("etcd"))
-+ s.Etcd.AddFlags(fss.FlagSet("storage"))
- s.SecureServing.AddFlags(fss.FlagSet("secure serving"))
- addDummyInsecureFlags(fss.FlagSet("insecure serving"))
- s.Audit.AddFlags(fss.FlagSet("auditing"))
-diff --git a/cmd/kube-apiserver/app/server.go b/cmd/kube-apiserver/app/server.go
-index 59413c4be1d..9fb54ec9a64 100644
---- a/cmd/kube-apiserver/app/server.go
-+++ b/cmd/kube-apiserver/app/server.go
-@@ -20,6 +20,7 @@ limitations under the License.
- package app
-
- import (
-+ "context"
- "crypto/tls"
- "fmt"
- "net"
-@@ -33,6 +34,8 @@ import (
- "github.com/spf13/cobra"
- "github.com/spf13/pflag"
-
-+ kvsqlfactory "github.com/canonical/kvsql-dqlite"
-+ kvsqlserver "github.com/canonical/kvsql-dqlite/server"
- extensionsapiserver "k8s.io/apiextensions-apiserver/pkg/apiserver"
- utilerrors "k8s.io/apimachinery/pkg/util/errors"
- utilnet "k8s.io/apimachinery/pkg/util/net"
-@@ -48,6 +51,7 @@ import (
- serveroptions "k8s.io/apiserver/pkg/server/options"
- serverstorage "k8s.io/apiserver/pkg/server/storage"
- "k8s.io/apiserver/pkg/storage/etcd3/preflight"
-+ "k8s.io/apiserver/pkg/storage/storagebackend"
- "k8s.io/apiserver/pkg/util/feature"
- utilfeature "k8s.io/apiserver/pkg/util/feature"
- utilflowcontrol "k8s.io/apiserver/pkg/util/flowcontrol"
-@@ -177,6 +181,17 @@ cluster's shared state through which all other components interact.`,
-
- // Run runs the specified APIServer. This should never exit.
- func Run(completeOptions completedServerRunOptions, stopCh <-chan struct{}) error {
-+ if completeOptions.Etcd.StorageConfig.Type == storagebackend.StorageTypeDqlite {
-+ config := completeOptions.Etcd.StorageConfig
-+ server, err := kvsqlserver.New(config.Dir)
-+ if err != nil {
-+ return err
-+ }
-+ ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
-+ defer cancel()
-+ defer server.Close(ctx)
-+ }
-+
- // To help debugging, immediately log version
- klog.Infof("Version: %+v", version.Get())
-
-@@ -221,6 +236,11 @@ func CreateServerChain(completedOptions completedServerRunOptions, stopCh <-chan
- return nil, err
- }
-
-+ if completedOptions.Etcd.StorageConfig.Type == storagebackend.StorageTypeDqlite {
-+ kvsqlRoutes := kvsqlfactory.Rest{}
-+ kvsqlRoutes.Install(kubeAPIServer.GenericAPIServer.Handler.GoRestfulContainer)
-+ }
-+
- // aggregator comes last in the chain
- aggregatorConfig, err := createAggregatorConfig(*kubeAPIServerConfig.GenericConfig, completedOptions.ServerRunOptions, kubeAPIServerConfig.ExtraConfig.VersionedInformers, serviceResolver, proxyTransport, pluginInitializer)
- if err != nil {
-@@ -308,9 +328,11 @@ func CreateKubeAPIServerConfig(
- return nil, nil, nil, err
- }
-
-- if _, port, err := net.SplitHostPort(s.Etcd.StorageConfig.Transport.ServerList[0]); err == nil && port != "0" && len(port) != 0 {
-- if err := utilwait.PollImmediate(etcdRetryInterval, etcdRetryLimit*etcdRetryInterval, preflight.EtcdConnection{ServerList: s.Etcd.StorageConfig.Transport.ServerList}.CheckEtcdServers); err != nil {
-- return nil, nil, nil, fmt.Errorf("error waiting for etcd connection: %v", err)
-+ if s.Etcd.StorageConfig.Type != storagebackend.StorageTypeDqlite {
-+ if _, port, err := net.SplitHostPort(s.Etcd.StorageConfig.Transport.ServerList[0]); err == nil && port != "0" && len(port) != 0 {
-+ if err := utilwait.PollImmediate(etcdRetryInterval, etcdRetryLimit*etcdRetryInterval, preflight.EtcdConnection{ServerList: s.Etcd.StorageConfig.Transport.ServerList}.CheckEtcdServers); err != nil {
-+ return nil, nil, nil, fmt.Errorf("error waiting for etcd connection: %v", err)
-+ }
- }
- }
-
-diff --git a/staging/src/k8s.io/apiserver/pkg/server/options/etcd.go b/staging/src/k8s.io/apiserver/pkg/server/options/etcd.go
-index d8b45b8198f..48f5ac5c8a9 100644
---- a/staging/src/k8s.io/apiserver/pkg/server/options/etcd.go
-+++ b/staging/src/k8s.io/apiserver/pkg/server/options/etcd.go
-@@ -63,6 +63,7 @@ type EtcdOptions struct {
-
- var storageTypes = sets.NewString(
- storagebackend.StorageTypeETCD3,
-+ storagebackend.StorageTypeDqlite,
- )
-
- func NewEtcdOptions(backendConfig *storagebackend.Config) *EtcdOptions {
-@@ -85,7 +86,9 @@ func (s *EtcdOptions) Validate() []error {
-
- allErrors := []error{}
- if len(s.StorageConfig.Transport.ServerList) == 0 {
-- allErrors = append(allErrors, fmt.Errorf("--etcd-servers must be specified"))
-+ if s.StorageConfig.Type != storagebackend.StorageTypeDqlite {
-+ allErrors = append(allErrors, fmt.Errorf("--etcd-servers must be specified"))
-+ }
- }
-
- if s.StorageConfig.Type != storagebackend.StorageTypeUnset && !storageTypes.Has(s.StorageConfig.Type) {
-@@ -183,6 +186,9 @@ func (s *EtcdOptions) AddFlags(fs *pflag.FlagSet) {
- fs.DurationVar(&s.StorageConfig.DBMetricPollInterval, "etcd-db-metric-poll-interval", s.StorageConfig.DBMetricPollInterval,
- "The interval of requests to poll etcd and update metric. 0 disables the metric collection")
-
-+ fs.StringVar(&s.StorageConfig.Dir, "storage-dir", s.StorageConfig.Dir,
-+ "Directory to use for storing local storage data.")
-+
- fs.DurationVar(&s.StorageConfig.HealthcheckTimeout, "etcd-healthcheck-timeout", s.StorageConfig.HealthcheckTimeout,
- "The timeout to use when checking etcd health.")
-
-diff --git a/staging/src/k8s.io/apiserver/pkg/storage/storagebackend/config.go b/staging/src/k8s.io/apiserver/pkg/storage/storagebackend/config.go
-index 500e55c0206..85a48707aa8 100644
---- a/staging/src/k8s.io/apiserver/pkg/storage/storagebackend/config.go
-+++ b/staging/src/k8s.io/apiserver/pkg/storage/storagebackend/config.go
-@@ -26,12 +26,14 @@ import (
- )
-
- const (
-- StorageTypeUnset = ""
-- StorageTypeETCD3 = "etcd3"
-+ StorageTypeUnset = ""
-+ StorageTypeETCD3 = "etcd3"
-+ StorageTypeDqlite = "dqlite"
-
- DefaultCompactInterval = 5 * time.Minute
- DefaultDBMetricPollInterval = 30 * time.Second
- DefaultHealthcheckTimeout = 2 * time.Second
-+ DefaultDir = "/var/lib/kubernetes/backend"
- )
-
- // TransportConfig holds all connection related info, i.e. equal TransportConfig means equal servers we talk to.
-@@ -78,6 +80,8 @@ type Config struct {
- DBMetricPollInterval time.Duration
- // HealthcheckTimeout specifies the timeout used when checking health
- HealthcheckTimeout time.Duration
-+ // Dir is the directory to use for persisting local data.
-+ Dir string
-
- LeaseManagerConfig etcd3.LeaseManagerConfig
- }
-@@ -91,5 +95,6 @@ func NewDefaultConfig(prefix string, codec runtime.Codec) *Config {
- DBMetricPollInterval: DefaultDBMetricPollInterval,
- HealthcheckTimeout: DefaultHealthcheckTimeout,
- LeaseManagerConfig: etcd3.NewDefaultLeaseManagerConfig(),
-+ Dir: DefaultDir,
- }
- }
-diff --git a/staging/src/k8s.io/apiserver/pkg/storage/storagebackend/factory/factory.go b/staging/src/k8s.io/apiserver/pkg/storage/storagebackend/factory/factory.go
-index 1e8a8cdb0f0..3429191f2cc 100644
---- a/staging/src/k8s.io/apiserver/pkg/storage/storagebackend/factory/factory.go
-+++ b/staging/src/k8s.io/apiserver/pkg/storage/storagebackend/factory/factory.go
-@@ -29,6 +29,12 @@ type DestroyFunc func()
-
- // Create creates a storage backend based on given config.
- func Create(c storagebackend.Config, newFunc func() runtime.Object) (storage.Interface, DestroyFunc, error) {
-+ if c.Type == storagebackend.StorageTypeDqlite {
-+ c.Transport.ServerList = []string{
-+ fmt.Sprintf("unix://%s/kine.sock", c.Dir),
-+ }
-+ c.Type = storagebackend.StorageTypeETCD3
-+ }
- switch c.Type {
- case "etcd2":
- return nil, nil, fmt.Errorf("%v is no longer a supported storage backend", c.Type)
-@@ -41,6 +47,12 @@ func Create(c storagebackend.Config, newFunc func() runtime.Object) (storage.Int
-
- // CreateHealthCheck creates a healthcheck function based on given config.
- func CreateHealthCheck(c storagebackend.Config) (func() error, error) {
-+ if c.Type == storagebackend.StorageTypeDqlite {
-+ c.Transport.ServerList = []string{
-+ fmt.Sprintf("unix://%s/kine.sock", c.Dir),
-+ }
-+ c.Type = storagebackend.StorageTypeETCD3
-+ }
- switch c.Type {
- case "etcd2":
- return nil, fmt.Errorf("%v is no longer a supported storage backend", c.Type)
-diff --git a/vendor/github.com/Rican7/retry/.travis.yml b/vendor/github.com/Rican7/retry/.travis.yml
-new file mode 100644
-index 00000000000..399e7b64f27
---- /dev/null
-+++ b/vendor/github.com/Rican7/retry/.travis.yml
-@@ -0,0 +1,39 @@
-+language: go
-+
-+go:
-+ - 1.6
-+ - tip
-+
-+sudo: false
-+
-+before_install:
-+ # Install tools necessary to report code-coverage to Coveralls.io
-+ - go get github.com/mattn/goveralls
-+
-+ # Export some environment variables
-+ - export GO_TEST_COVERAGE_FILE_NAME='coverage.out'
-+
-+install:
-+ # Get all imported packages
-+ - make install-deps install-deps-dev
-+
-+ # Basic build errors
-+ - make build
-+
-+script:
-+ # Lint
-+ - make format-lint
-+ - make import-lint
-+ - make copyright-lint
-+
-+ # Run tests
-+ - make test-with-coverage-profile
-+
-+after_success:
-+ # Report our code-coverage to Coveralls.io
-+ - goveralls -service=travis-ci -coverprofile="${GO_TEST_COVERAGE_FILE_NAME}"
-+
-+matrix:
-+ allow_failures:
-+ - go: tip
-+ fast_finish: true
-diff --git a/vendor/github.com/Rican7/retry/LICENSE b/vendor/github.com/Rican7/retry/LICENSE
-new file mode 100644
-index 00000000000..361507628d9
---- /dev/null
-+++ b/vendor/github.com/Rican7/retry/LICENSE
-@@ -0,0 +1,19 @@
-+Copyright (C) 2016 Trevor N. Suarez (Rican7)
-+
-+Permission is hereby granted, free of charge, to any person obtaining
-+a copy of this software and associated documentation files (the "Software"),
-+to deal in the Software without restriction, including without limitation
-+the rights to use, copy, modify, merge, publish, distribute, sublicense,
-+and/or sell copies of the Software, and to permit persons to whom the
-+Software is furnished to do so, subject to the following conditions:
-+
-+The above copyright notice and this permission notice shall be included
-+in all copies or substantial portions of the Software.
-+
-+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
-+OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
-+IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
-+DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
-+TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
-+OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-diff --git a/vendor/github.com/Rican7/retry/Makefile b/vendor/github.com/Rican7/retry/Makefile
-new file mode 100644
-index 00000000000..77d4bcd7f72
---- /dev/null
-+++ b/vendor/github.com/Rican7/retry/Makefile
-@@ -0,0 +1,83 @@
-+# Define some VCS context
-+PARENT_BRANCH ?= master
-+
-+# Set the mode for code-coverage
-+GO_TEST_COVERAGE_MODE ?= count
-+GO_TEST_COVERAGE_FILE_NAME ?= coverage.out
-+
-+# Set flags for `gofmt`
-+GOFMT_FLAGS ?= -s
-+
-+# Set a default `min_confidence` value for `golint`
-+GOLINT_MIN_CONFIDENCE ?= 0.3
-+
-+
-+all: install-deps build install
-+
-+clean:
-+ go clean -i -x ./...
-+
-+build:
-+ go build -v ./...
-+
-+install:
-+ go install ./...
-+
-+install-deps:
-+ go get -d -t ./...
-+
-+install-deps-dev: install-deps
-+ go get github.com/golang/lint/golint
-+ go get golang.org/x/tools/cmd/goimports
-+
-+update-deps:
-+ go get -d -t -u ./...
-+
-+update-deps-dev: update-deps
-+ go get -u github.com/golang/lint/golint
-+ go get -u golang.org/x/tools/cmd/goimports
-+
-+test:
-+ go test -v ./...
-+
-+test-with-coverage:
-+ go test -cover ./...
-+
-+test-with-coverage-formatted:
-+ go test -cover ./... | column -t | sort -r
-+
-+test-with-coverage-profile:
-+ echo "mode: ${GO_TEST_COVERAGE_MODE}" > ${GO_TEST_COVERAGE_FILE_NAME}
-+ for package in $$(go list ./...); do \
-+ go test -covermode ${GO_TEST_COVERAGE_MODE} -coverprofile "coverage_$${package##*/}.out" "$${package}"; \
-+ sed '1d' "coverage_$${package##*/}.out" >> ${GO_TEST_COVERAGE_FILE_NAME}; \
-+ done
-+
-+format-lint:
-+ errors=$$(gofmt -l ${GOFMT_FLAGS} .); if [ "$${errors}" != "" ]; then echo "$${errors}"; exit 1; fi
-+
-+import-lint:
-+ errors=$$(goimports -l .); if [ "$${errors}" != "" ]; then echo "$${errors}"; exit 1; fi
-+
-+style-lint:
-+ errors=$$(golint -min_confidence=${GOLINT_MIN_CONFIDENCE} ./...); if [ "$${errors}" != "" ]; then echo "$${errors}"; exit 1; fi
-+
-+copyright-lint:
-+ @old_dates=$$(git diff --diff-filter=ACMRTUXB --name-only "${PARENT_BRANCH}" | xargs grep -E '[Cc]opyright(\s+)[©Cc]?(\s+)[0-9]{4}' | grep -E -v "[Cc]opyright(\s+)[©Cc]?(\s+)$$(date '+%Y')"); if [ "$${old_dates}" != "" ]; then printf "The following files contain outdated copyrights:\n$${old_dates}\n\nThis can be fixed with 'make copyright-fix'\n"; exit 1; fi
-+
-+lint: install-deps-dev format-lint import-lint style-lint copyright-lint
-+
-+format-fix:
-+ gofmt -w ${GOFMT_FLAGS} .
-+
-+import-fix:
-+ goimports -w .
-+
-+copyright-fix:
-+ @git diff --diff-filter=ACMRTUXB --name-only "${PARENT_BRANCH}" | xargs -I '_FILENAME' -- sh -c 'sed -i.bak "s/\([Cc]opyright\([[:space:]][©Cc]\{0,1\}[[:space:]]*\)\)[0-9]\{4\}/\1"$$(date '+%Y')"/g" _FILENAME && rm _FILENAME.bak'
-+
-+vet:
-+ go vet ./...
-+
-+
-+.PHONY: all clean build install install-deps install-deps-dev update-deps update-deps-dev test test-with-coverage test-with-coverage-formatted test-with-coverage-profile format-lint import-lint style-lint copyright-lint lint format-fix import-fix copyright-fix vet
-diff --git a/vendor/github.com/Rican7/retry/README.md b/vendor/github.com/Rican7/retry/README.md
-new file mode 100644
-index 00000000000..bccf4dec76c
---- /dev/null
-+++ b/vendor/github.com/Rican7/retry/README.md
-@@ -0,0 +1,101 @@
-+# retry
-+
-+[![Build Status](https://travis-ci.org/Rican7/retry.svg?branch=master)](https://travis-ci.org/Rican7/retry)
-+[![Coverage Status](https://coveralls.io/repos/github/Rican7/retry/badge.svg)](https://coveralls.io/github/Rican7/retry)
-+[![Go Report Card](https://goreportcard.com/badge/Rican7/retry)](http://goreportcard.com/report/Rican7/retry)
-+[![GoDoc](https://godoc.org/github.com/Rican7/retry?status.png)](https://godoc.org/github.com/Rican7/retry)
-+[![Latest Stable Version](https://img.shields.io/github/release/Rican7/retry.svg?style=flat)](https://github.com/Rican7/retry/releases)
-+
-+A simple, stateless, functional mechanism to perform actions repetitively until successful.
-+
-+
-+## Project Status
-+
-+This project is currently in "pre-release". While the code is heavily tested, the API may change.
-+Vendor (commit or lock) this dependency if you plan on using it.
-+
-+
-+## Install
-+
-+`go get github.com/Rican7/retry`
-+
-+
-+## Examples
-+
-+### Basic
-+
-+```go
-+retry.Retry(func(attempt uint) error {
-+ return nil // Do something that may or may not cause an error
-+})
-+```
-+
-+### File Open
-+
-+```go
-+const logFilePath = "/var/log/myapp.log"
-+
-+var logFile *os.File
-+
-+err := retry.Retry(func(attempt uint) error {
-+ var err error
-+
-+ logFile, err = os.Open(logFilePath)
-+
-+ return err
-+})
-+
-+if nil != err {
-+ log.Fatalf("Unable to open file %q with error %q", logFilePath, err)
-+}
-+
-+logFile.Chdir() // Do something with the file
-+```
-+
-+### HTTP request with strategies and backoff
-+
-+```go
-+var response *http.Response
-+
-+action := func(attempt uint) error {
-+ var err error
-+
-+ response, err = http.Get("https://api.github.com/repos/Rican7/retry")
-+
-+ if nil == err && nil != response && response.StatusCode > 200 {
-+ err = fmt.Errorf("failed to fetch (attempt #%d) with status code: %d", attempt, response.StatusCode)
-+ }
-+
-+ return err
-+}
-+
-+err := retry.Retry(
-+ action,
-+ strategy.Limit(5),
-+ strategy.Backoff(backoff.Fibonacci(10*time.Millisecond)),
-+)
-+
-+if nil != err {
-+ log.Fatalf("Failed to fetch repository with error %q", err)
-+}
-+```
-+
-+### Retry with backoff jitter
-+
-+```go
-+action := func(attempt uint) error {
-+ return errors.New("something happened")
-+}
-+
-+seed := time.Now().UnixNano()
-+random := rand.New(rand.NewSource(seed))
-+
-+retry.Retry(
-+ action,
-+ strategy.Limit(5),
-+ strategy.BackoffWithJitter(
-+ backoff.BinaryExponential(10*time.Millisecond),
-+ jitter.Deviation(random, 0.5),
-+ ),
-+)
-+```
-diff --git a/vendor/github.com/Rican7/retry/backoff/backoff.go b/vendor/github.com/Rican7/retry/backoff/backoff.go
-new file mode 100644
-index 00000000000..5369a75a1c6
---- /dev/null
-+++ b/vendor/github.com/Rican7/retry/backoff/backoff.go
-@@ -0,0 +1,67 @@
-+// Package backoff provides stateless methods of calculating durations based on
-+// a number of attempts made.
-+//
-+// Copyright © 2016 Trevor N. Suarez (Rican7)
-+package backoff
-+
-+import (
-+ "math"
-+ "time"
-+)
-+
-+// Algorithm defines a function that calculates a time.Duration based on
-+// the given retry attempt number.
-+type Algorithm func(attempt uint) time.Duration
-+
-+// Incremental creates a Algorithm that increments the initial duration
-+// by the given increment for each attempt.
-+func Incremental(initial, increment time.Duration) Algorithm {
-+ return func(attempt uint) time.Duration {
-+ return initial + (increment * time.Duration(attempt))
-+ }
-+}
-+
-+// Linear creates a Algorithm that linearly multiplies the factor
-+// duration by the attempt number for each attempt.
-+func Linear(factor time.Duration) Algorithm {
-+ return func(attempt uint) time.Duration {
-+ return (factor * time.Duration(attempt))
-+ }
-+}
-+
-+// Exponential creates a Algorithm that multiplies the factor duration by
-+// an exponentially increasing factor for each attempt, where the factor is
-+// calculated as the given base raised to the attempt number.
-+func Exponential(factor time.Duration, base float64) Algorithm {
-+ return func(attempt uint) time.Duration {
-+ return (factor * time.Duration(math.Pow(base, float64(attempt))))
-+ }
-+}
-+
-+// BinaryExponential creates a Algorithm that multiplies the factor
-+// duration by an exponentially increasing factor for each attempt, where the
-+// factor is calculated as `2` raised to the attempt number (2^attempt).
-+func BinaryExponential(factor time.Duration) Algorithm {
-+ return Exponential(factor, 2)
-+}
-+
-+// Fibonacci creates a Algorithm that multiplies the factor duration by
-+// an increasing factor for each attempt, where the factor is the Nth number in
-+// the Fibonacci sequence.
-+func Fibonacci(factor time.Duration) Algorithm {
-+ return func(attempt uint) time.Duration {
-+ return (factor * time.Duration(fibonacciNumber(attempt)))
-+ }
-+}
-+
-+// fibonacciNumber calculates the Fibonacci sequence number for the given
-+// sequence position.
-+func fibonacciNumber(n uint) uint {
-+ if 0 == n {
-+ return 0
-+ } else if 1 == n {
-+ return 1
-+ } else {
-+ return fibonacciNumber(n-1) + fibonacciNumber(n-2)
-+ }
-+}
-diff --git a/vendor/github.com/Rican7/retry/backoff/backoff_test.go b/vendor/github.com/Rican7/retry/backoff/backoff_test.go
-new file mode 100644
-index 00000000000..6e7fae3d648
---- /dev/null
-+++ b/vendor/github.com/Rican7/retry/backoff/backoff_test.go
-@@ -0,0 +1,178 @@
-+package backoff
-+
-+import (
-+ "fmt"
-+ "math"
-+ "testing"
-+ "time"
-+)
-+
-+func TestIncremental(t *testing.T) {
-+ const duration = time.Millisecond
-+ const increment = time.Nanosecond
-+
-+ algorithm := Incremental(duration, increment)
-+
-+ for i := uint(0); i < 10; i++ {
-+ result := algorithm(i)
-+ expected := duration + (increment * time.Duration(i))
-+
-+ if result != expected {
-+ t.Errorf("algorithm expected to return a %s duration, but received %s instead", expected, result)
-+ }
-+ }
-+}
-+
-+func TestLinear(t *testing.T) {
-+ const duration = time.Millisecond
-+
-+ algorithm := Linear(duration)
-+
-+ for i := uint(0); i < 10; i++ {
-+ result := algorithm(i)
-+ expected := duration * time.Duration(i)
-+
-+ if result != expected {
-+ t.Errorf("algorithm expected to return a %s duration, but received %s instead", expected, result)
-+ }
-+ }
-+}
-+
-+func TestExponential(t *testing.T) {
-+ const duration = time.Second
-+ const base = 3
-+
-+ algorithm := Exponential(duration, base)
-+
-+ for i := uint(0); i < 10; i++ {
-+ result := algorithm(i)
-+ expected := duration * time.Duration(math.Pow(base, float64(i)))
-+
-+ if result != expected {
-+ t.Errorf("algorithm expected to return a %s duration, but received %s instead", expected, result)
-+ }
-+ }
-+}
-+
-+func TestBinaryExponential(t *testing.T) {
-+ const duration = time.Second
-+
-+ algorithm := BinaryExponential(duration)
-+
-+ for i := uint(0); i < 10; i++ {
-+ result := algorithm(i)
-+ expected := duration * time.Duration(math.Pow(2, float64(i)))
-+
-+ if result != expected {
-+ t.Errorf("algorithm expected to return a %s duration, but received %s instead", expected, result)
-+ }
-+ }
-+}
-+
-+func TestFibonacci(t *testing.T) {
-+ const duration = time.Millisecond
-+
-+ algorithm := Fibonacci(duration)
-+
-+ for i := uint(0); i < 10; i++ {
-+ result := algorithm(i)
-+ expected := duration * time.Duration(fibonacciNumber(i))
-+
-+ if result != expected {
-+ t.Errorf("algorithm expected to return a %s duration, but received %s instead", expected, result)
-+ }
-+ }
-+}
-+
-+func TestFibonacciNumber(t *testing.T) {
-+ // Fibonacci sequence
-+ expectedSequence := []uint{0, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144, 233}
-+
-+ for i, expected := range expectedSequence {
-+ result := fibonacciNumber(uint(i))
-+
-+ if result != expected {
-+ t.Errorf("fibonacci %d number expected %d, but got %d", i, expected, result)
-+ }
-+ }
-+}
-+
-+func ExampleIncremental() {
-+ algorithm := Incremental(15*time.Millisecond, 10*time.Millisecond)
-+
-+ for i := uint(1); i <= 5; i++ {
-+ duration := algorithm(i)
-+
-+ fmt.Printf("#%d attempt: %s\n", i, duration)
-+ // Output:
-+ // #1 attempt: 25ms
-+ // #2 attempt: 35ms
-+ // #3 attempt: 45ms
-+ // #4 attempt: 55ms
-+ // #5 attempt: 65ms
-+ }
-+}
-+
-+func ExampleLinear() {
-+ algorithm := Linear(15 * time.Millisecond)
-+
-+ for i := uint(1); i <= 5; i++ {
-+ duration := algorithm(i)
-+
-+ fmt.Printf("#%d attempt: %s\n", i, duration)
-+ // Output:
-+ // #1 attempt: 15ms
-+ // #2 attempt: 30ms
-+ // #3 attempt: 45ms
-+ // #4 attempt: 60ms
-+ // #5 attempt: 75ms
-+ }
-+}
-+
-+func ExampleExponential() {
-+ algorithm := Exponential(15*time.Millisecond, 3)
-+
-+ for i := uint(1); i <= 5; i++ {
-+ duration := algorithm(i)
-+
-+ fmt.Printf("#%d attempt: %s\n", i, duration)
-+ // Output:
-+ // #1 attempt: 45ms
-+ // #2 attempt: 135ms
-+ // #3 attempt: 405ms
-+ // #4 attempt: 1.215s
-+ // #5 attempt: 3.645s
-+ }
-+}
-+
-+func ExampleBinaryExponential() {
-+ algorithm := BinaryExponential(15 * time.Millisecond)
-+
-+ for i := uint(1); i <= 5; i++ {
-+ duration := algorithm(i)
-+
-+ fmt.Printf("#%d attempt: %s\n", i, duration)
-+ // Output:
-+ // #1 attempt: 30ms
-+ // #2 attempt: 60ms
-+ // #3 attempt: 120ms
-+ // #4 attempt: 240ms
-+ // #5 attempt: 480ms
-+ }
-+}
-+
-+func ExampleFibonacci() {
-+ algorithm := Fibonacci(15 * time.Millisecond)
-+
-+ for i := uint(1); i <= 5; i++ {
-+ duration := algorithm(i)
-+
-+ fmt.Printf("#%d attempt: %s\n", i, duration)
-+ // Output:
-+ // #1 attempt: 15ms
-+ // #2 attempt: 15ms
-+ // #3 attempt: 30ms
-+ // #4 attempt: 45ms
-+ // #5 attempt: 75ms
-+ }
-+}
-diff --git a/vendor/github.com/Rican7/retry/example_test.go b/vendor/github.com/Rican7/retry/example_test.go
-new file mode 100644
-index 00000000000..9f8bf4ec7e2
---- /dev/null
-+++ b/vendor/github.com/Rican7/retry/example_test.go
-@@ -0,0 +1,86 @@
-+package retry_test
-+
-+import (
-+ "errors"
-+ "fmt"
-+ "log"
-+ "math/rand"
-+ "net/http"
-+ "os"
-+ "time"
-+
-+ "github.com/Rican7/retry"
-+ "github.com/Rican7/retry/backoff"
-+ "github.com/Rican7/retry/jitter"
-+ "github.com/Rican7/retry/strategy"
-+)
-+
-+func Example() {
-+ retry.Retry(func(attempt uint) error {
-+ return nil // Do something that may or may not cause an error
-+ })
-+}
-+
-+func Example_fileOpen() {
-+ const logFilePath = "/var/log/myapp.log"
-+
-+ var logFile *os.File
-+
-+ err := retry.Retry(func(attempt uint) error {
-+ var err error
-+
-+ logFile, err = os.Open(logFilePath)
-+
-+ return err
-+ })
-+
-+ if nil != err {
-+ log.Fatalf("Unable to open file %q with error %q", logFilePath, err)
-+ }
-+
-+ logFile.Chdir() // Do something with the file
-+}
-+
-+func Example_httpGetWithStrategies() {
-+ var response *http.Response
-+
-+ action := func(attempt uint) error {
-+ var err error
-+
-+ response, err = http.Get("https://api.github.com/repos/Rican7/retry")
-+
-+ if nil == err && nil != response && response.StatusCode > 200 {
-+ err = fmt.Errorf("failed to fetch (attempt #%d) with status code: %d", attempt, response.StatusCode)
-+ }
-+
-+ return err
-+ }
-+
-+ err := retry.Retry(
-+ action,
-+ strategy.Limit(5),
-+ strategy.Backoff(backoff.Fibonacci(10*time.Millisecond)),
-+ )
-+
-+ if nil != err {
-+ log.Fatalf("Failed to fetch repository with error %q", err)
-+ }
-+}
-+
-+func Example_withBackoffJitter() {
-+ action := func(attempt uint) error {
-+ return errors.New("something happened")
-+ }
-+
-+ seed := time.Now().UnixNano()
-+ random := rand.New(rand.NewSource(seed))
-+
-+ retry.Retry(
-+ action,
-+ strategy.Limit(5),
-+ strategy.BackoffWithJitter(
-+ backoff.BinaryExponential(10*time.Millisecond),
-+ jitter.Deviation(random, 0.5),
-+ ),
-+ )
-+}
-diff --git a/vendor/github.com/Rican7/retry/jitter/jitter.go b/vendor/github.com/Rican7/retry/jitter/jitter.go
-new file mode 100644
-index 00000000000..e94ad892796
---- /dev/null
-+++ b/vendor/github.com/Rican7/retry/jitter/jitter.go
-@@ -0,0 +1,89 @@
-+// Package jitter provides methods of transforming durations.
-+//
-+// Copyright © 2016 Trevor N. Suarez (Rican7)
-+package jitter
-+
-+import (
-+ "math"
-+ "math/rand"
-+ "time"
-+)
-+
-+// Transformation defines a function that calculates a time.Duration based on
-+// the given duration.
-+type Transformation func(duration time.Duration) time.Duration
-+
-+// Full creates a Transformation that transforms a duration into a result
-+// duration in [0, n) randomly, where n is the given duration.
-+//
-+// The given generator is what is used to determine the random transformation.
-+// If a nil generator is passed, a default one will be provided.
-+//
-+// Inspired by https://www.awsarchitectureblog.com/2015/03/backoff.html
-+func Full(generator *rand.Rand) Transformation {
-+ random := fallbackNewRandom(generator)
-+
-+ return func(duration time.Duration) time.Duration {
-+ return time.Duration(random.Int63n(int64(duration)))
-+ }
-+}
-+
-+// Equal creates a Transformation that transforms a duration into a result
-+// duration in [n/2, n) randomly, where n is the given duration.
-+//
-+// The given generator is what is used to determine the random transformation.
-+// If a nil generator is passed, a default one will be provided.
-+//
-+// Inspired by https://www.awsarchitectureblog.com/2015/03/backoff.html
-+func Equal(generator *rand.Rand) Transformation {
-+ random := fallbackNewRandom(generator)
-+
-+ return func(duration time.Duration) time.Duration {
-+ return (duration / 2) + time.Duration(random.Int63n(int64(duration))/2)
-+ }
-+}
-+
-+// Deviation creates a Transformation that transforms a duration into a result
-+// duration that deviates from the input randomly by a given factor.
-+//
-+// The given generator is what is used to determine the random transformation.
-+// If a nil generator is passed, a default one will be provided.
-+//
-+// Inspired by https://developers.google.com/api-client-library/java/google-http-java-client/backoff
-+func Deviation(generator *rand.Rand, factor float64) Transformation {
-+ random := fallbackNewRandom(generator)
-+
-+ return func(duration time.Duration) time.Duration {
-+ min := int64(math.Floor(float64(duration) * (1 - factor)))
-+ max := int64(math.Ceil(float64(duration) * (1 + factor)))
-+
-+ return time.Duration(random.Int63n(max-min) + min)
-+ }
-+}
-+
-+// NormalDistribution creates a Transformation that transforms a duration into a
-+// result duration based on a normal distribution of the input and the given
-+// standard deviation.
-+//
-+// The given generator is what is used to determine the random transformation.
-+// If a nil generator is passed, a default one will be provided.
-+func NormalDistribution(generator *rand.Rand, standardDeviation float64) Transformation {
-+ random := fallbackNewRandom(generator)
-+
-+ return func(duration time.Duration) time.Duration {
-+ return time.Duration(random.NormFloat64()*standardDeviation + float64(duration))
-+ }
-+}
-+
-+// fallbackNewRandom returns the passed in random instance if it's not nil,
-+// and otherwise returns a new random instance seeded with the current time.
-+func fallbackNewRandom(random *rand.Rand) *rand.Rand {
-+ // Return the passed in value if it's already not null
-+ if nil != random {
-+ return random
-+ }
-+
-+ seed := time.Now().UnixNano()
-+
-+ return rand.New(rand.NewSource(seed))
-+}
-diff --git a/vendor/github.com/Rican7/retry/jitter/jitter_test.go b/vendor/github.com/Rican7/retry/jitter/jitter_test.go
-new file mode 100644
-index 00000000000..4dcd2ba7a96
---- /dev/null
-+++ b/vendor/github.com/Rican7/retry/jitter/jitter_test.go
-@@ -0,0 +1,101 @@
-+package jitter
-+
-+import (
-+ "math/rand"
-+ "testing"
-+ "time"
-+)
-+
-+func TestFull(t *testing.T) {
-+ const seed = 0
-+ const duration = time.Millisecond
-+
-+ generator := rand.New(rand.NewSource(seed))
-+
-+ transformation := Full(generator)
-+
-+ // Based on constant seed
-+ expectedDurations := []time.Duration{165505, 393152, 995827, 197794, 376202}
-+
-+ for _, expected := range expectedDurations {
-+ result := transformation(duration)
-+
-+ if result != expected {
-+ t.Errorf("transformation expected to return a %s duration, but received %s instead", expected, result)
-+ }
-+ }
-+}
-+
-+func TestEqual(t *testing.T) {
-+ const seed = 0
-+ const duration = time.Millisecond
-+
-+ generator := rand.New(rand.NewSource(seed))
-+
-+ transformation := Equal(generator)
-+
-+ // Based on constant seed
-+ expectedDurations := []time.Duration{582752, 696576, 997913, 598897, 688101}
-+
-+ for _, expected := range expectedDurations {
-+ result := transformation(duration)
-+
-+ if result != expected {
-+ t.Errorf("transformation expected to return a %s duration, but received %s instead", expected, result)
-+ }
-+ }
-+}
-+
-+func TestDeviation(t *testing.T) {
-+ const seed = 0
-+ const duration = time.Millisecond
-+ const factor = 0.5
-+
-+ generator := rand.New(rand.NewSource(seed))
-+
-+ transformation := Deviation(generator, factor)
-+
-+ // Based on constant seed
-+ expectedDurations := []time.Duration{665505, 893152, 1495827, 697794, 876202}
-+
-+ for _, expected := range expectedDurations {
-+ result := transformation(duration)
-+
-+ if result != expected {
-+ t.Errorf("transformation expected to return a %s duration, but received %s instead", expected, result)
-+ }
-+ }
-+}
-+
-+func TestNormalDistribution(t *testing.T) {
-+ const seed = 0
-+ const duration = time.Millisecond
-+ const standardDeviation = float64(duration / 2)
-+
-+ generator := rand.New(rand.NewSource(seed))
-+
-+ transformation := NormalDistribution(generator, standardDeviation)
-+
-+ // Based on constant seed
-+ expectedDurations := []time.Duration{859207, 1285466, 153990, 1099811, 1959759}
-+
-+ for _, expected := range expectedDurations {
-+ result := transformation(duration)
-+
-+ if result != expected {
-+ t.Errorf("transformation expected to return a %s duration, but received %s instead", expected, result)
-+ }
-+ }
-+}
-+
-+func TestFallbackNewRandom(t *testing.T) {
-+ generator := rand.New(rand.NewSource(0))
-+
-+ if result := fallbackNewRandom(generator); generator != result {
-+ t.Errorf("result expected to match parameter, received %+v instead", result)
-+ }
-+
-+ if result := fallbackNewRandom(nil); nil == result {
-+ t.Error("recieved unexpected nil result")
-+ }
-+}
-diff --git a/vendor/github.com/Rican7/retry/retry.go b/vendor/github.com/Rican7/retry/retry.go
-new file mode 100644
-index 00000000000..15015db257f
---- /dev/null
-+++ b/vendor/github.com/Rican7/retry/retry.go
-@@ -0,0 +1,36 @@
-+// Package retry provides a simple, stateless, functional mechanism to perform
-+// actions repetitively until successful.
-+//
-+// Copyright © 2016 Trevor N. Suarez (Rican7)
-+package retry
-+
-+import "github.com/Rican7/retry/strategy"
-+
-+// Action defines a callable function that package retry can handle.
-+type Action func(attempt uint) error
-+
-+// Retry takes an action and performs it, repetitively, until successful.
-+//
-+// Optionally, strategies may be passed that assess whether or not an attempt
-+// should be made.
-+func Retry(action Action, strategies ...strategy.Strategy) error {
-+ var err error
-+
-+ for attempt := uint(0); (0 == attempt || nil != err) && shouldAttempt(attempt, strategies...); attempt++ {
-+ err = action(attempt)
-+ }
-+
-+ return err
-+}
-+
-+// shouldAttempt evaluates the provided strategies with the given attempt to
-+// determine if the Retry loop should make another attempt.
-+func shouldAttempt(attempt uint, strategies ...strategy.Strategy) bool {
-+ shouldAttempt := true
-+
-+ for i := 0; shouldAttempt && i < len(strategies); i++ {
-+ shouldAttempt = shouldAttempt && strategies[i](attempt)
-+ }
-+
-+ return shouldAttempt
-+}
-diff --git a/vendor/github.com/Rican7/retry/retry_test.go b/vendor/github.com/Rican7/retry/retry_test.go
-new file mode 100644
-index 00000000000..8340a156b3c
---- /dev/null
-+++ b/vendor/github.com/Rican7/retry/retry_test.go
-@@ -0,0 +1,122 @@
-+package retry
-+
-+import (
-+ "errors"
-+ "testing"
-+)
-+
-+func TestRetry(t *testing.T) {
-+ action := func(attempt uint) error {
-+ return nil
-+ }
-+
-+ err := Retry(action)
-+
-+ if nil != err {
-+ t.Error("expected a nil error")
-+ }
-+}
-+
-+func TestRetryRetriesUntilNoErrorReturned(t *testing.T) {
-+ const errorUntilAttemptNumber = 5
-+
-+ var attemptsMade uint
-+
-+ action := func(attempt uint) error {
-+ attemptsMade = attempt
-+
-+ if errorUntilAttemptNumber == attempt {
-+ return nil
-+ }
-+
-+ return errors.New("erroring")
-+ }
-+
-+ err := Retry(action)
-+
-+ if nil != err {
-+ t.Error("expected a nil error")
-+ }
-+
-+ if errorUntilAttemptNumber != attemptsMade {
-+ t.Errorf(
-+ "expected %d attempts to be made, but %d were made instead",
-+ errorUntilAttemptNumber,
-+ attemptsMade,
-+ )
-+ }
-+}
-+
-+func TestShouldAttempt(t *testing.T) {
-+ shouldAttempt := shouldAttempt(1)
-+
-+ if !shouldAttempt {
-+ t.Error("expected to return true")
-+ }
-+}
-+
-+func TestShouldAttemptWithStrategy(t *testing.T) {
-+ const attemptNumberShouldReturnFalse = 7
-+
-+ strategy := func(attempt uint) bool {
-+ return (attemptNumberShouldReturnFalse != attempt)
-+ }
-+
-+ should := shouldAttempt(1, strategy)
-+
-+ if !should {
-+ t.Error("expected to return true")
-+ }
-+
-+ should = shouldAttempt(1+attemptNumberShouldReturnFalse, strategy)
-+
-+ if !should {
-+ t.Error("expected to return true")
-+ }
-+
-+ should = shouldAttempt(attemptNumberShouldReturnFalse, strategy)
-+
-+ if should {
-+ t.Error("expected to return false")
-+ }
-+}
-+
-+func TestShouldAttemptWithMultipleStrategies(t *testing.T) {
-+ trueStrategy := func(attempt uint) bool {
-+ return true
-+ }
-+
-+ falseStrategy := func(attempt uint) bool {
-+ return false
-+ }
-+
-+ should := shouldAttempt(1, trueStrategy)
-+
-+ if !should {
-+ t.Error("expected to return true")
-+ }
-+
-+ should = shouldAttempt(1, falseStrategy)
-+
-+ if should {
-+ t.Error("expected to return false")
-+ }
-+
-+ should = shouldAttempt(1, trueStrategy, trueStrategy, trueStrategy)
-+
-+ if !should {
-+ t.Error("expected to return true")
-+ }
-+
-+ should = shouldAttempt(1, falseStrategy, falseStrategy, falseStrategy)
-+
-+ if should {
-+ t.Error("expected to return false")
-+ }
-+
-+ should = shouldAttempt(1, trueStrategy, trueStrategy, falseStrategy)
-+
-+ if should {
-+ t.Error("expected to return false")
-+ }
-+}
-diff --git a/vendor/github.com/Rican7/retry/strategy/strategy.go b/vendor/github.com/Rican7/retry/strategy/strategy.go
-new file mode 100644
-index 00000000000..a315fa02cbf
---- /dev/null
-+++ b/vendor/github.com/Rican7/retry/strategy/strategy.go
-@@ -0,0 +1,85 @@
-+// Package strategy provides a way to change the way that retry is performed.
-+//
-+// Copyright © 2016 Trevor N. Suarez (Rican7)
-+package strategy
-+
-+import (
-+ "time"
-+
-+ "github.com/Rican7/retry/backoff"
-+ "github.com/Rican7/retry/jitter"
-+)
-+
-+// Strategy defines a function that Retry calls before every successive attempt
-+// to determine whether it should make the next attempt or not. Returning `true`
-+// allows for the next attempt to be made. Returning `false` halts the retrying
-+// process and returns the last error returned by the called Action.
-+//
-+// The strategy will be passed an "attempt" number on each successive retry
-+// iteration, starting with a `0` value before the first attempt is actually
-+// made. This allows for a pre-action delay, etc.
-+type Strategy func(attempt uint) bool
-+
-+// Limit creates a Strategy that limits the number of attempts that Retry will
-+// make.
-+func Limit(attemptLimit uint) Strategy {
-+ return func(attempt uint) bool {
-+ return (attempt <= attemptLimit)
-+ }
-+}
-+
-+// Delay creates a Strategy that waits the given duration before the first
-+// attempt is made.
-+func Delay(duration time.Duration) Strategy {
-+ return func(attempt uint) bool {
-+ if 0 == attempt {
-+ time.Sleep(duration)
-+ }
-+
-+ return true
-+ }
-+}
-+
-+// Wait creates a Strategy that waits the given durations for each attempt after
-+// the first. If the number of attempts is greater than the number of durations
-+// provided, then the strategy uses the last duration provided.
-+func Wait(durations ...time.Duration) Strategy {
-+ return func(attempt uint) bool {
-+ if 0 < attempt && 0 < len(durations) {
-+ durationIndex := int(attempt - 1)
-+
-+ if len(durations) <= durationIndex {
-+ durationIndex = len(durations) - 1
-+ }
-+
-+ time.Sleep(durations[durationIndex])
-+ }
-+
-+ return true
-+ }
-+}
-+
-+// Backoff creates a Strategy that waits before each attempt, with a duration as
-+// defined by the given backoff.Algorithm.
-+func Backoff(algorithm backoff.Algorithm) Strategy {
-+ return BackoffWithJitter(algorithm, noJitter())
-+}
-+
-+// BackoffWithJitter creates a Strategy that waits before each attempt, with a
-+// duration as defined by the given backoff.Algorithm and jitter.Transformation.
-+func BackoffWithJitter(algorithm backoff.Algorithm, transformation jitter.Transformation) Strategy {
-+ return func(attempt uint) bool {
-+ if 0 < attempt {
-+ time.Sleep(transformation(algorithm(attempt)))
-+ }
-+
-+ return true
-+ }
-+}
-+
-+// noJitter creates a jitter.Transformation that simply returns the input.
-+func noJitter() jitter.Transformation {
-+ return func(duration time.Duration) time.Duration {
-+ return duration
-+ }
-+}
-diff --git a/vendor/github.com/Rican7/retry/strategy/strategy_test.go b/vendor/github.com/Rican7/retry/strategy/strategy_test.go
-new file mode 100644
-index 00000000000..17488f54f3a
---- /dev/null
-+++ b/vendor/github.com/Rican7/retry/strategy/strategy_test.go
-@@ -0,0 +1,184 @@
-+package strategy
-+
-+import (
-+ "testing"
-+ "time"
-+)
-+
-+// timeMarginOfError represents the acceptable amount of time that may pass for
-+// a time-based (sleep) unit before considering invalid.
-+const timeMarginOfError = time.Millisecond
-+
-+func TestLimit(t *testing.T) {
-+ const attemptLimit = 3
-+
-+ strategy := Limit(attemptLimit)
-+
-+ if !strategy(1) {
-+ t.Error("strategy expected to return true")
-+ }
-+
-+ if !strategy(2) {
-+ t.Error("strategy expected to return true")
-+ }
-+
-+ if !strategy(3) {
-+ t.Error("strategy expected to return true")
-+ }
-+
-+ if strategy(4) {
-+ t.Error("strategy expected to return false")
-+ }
-+}
-+
-+func TestDelay(t *testing.T) {
-+ const delayDuration = time.Duration(10 * timeMarginOfError)
-+
-+ strategy := Delay(delayDuration)
-+
-+ if now := time.Now(); !strategy(0) || delayDuration > time.Since(now) {
-+ t.Errorf(
-+ "strategy expected to return true in %s",
-+ time.Duration(delayDuration),
-+ )
-+ }
-+
-+ if now := time.Now(); !strategy(5) || (delayDuration/10) < time.Since(now) {
-+ t.Error("strategy expected to return true in ~0 time")
-+ }
-+}
-+
-+func TestWait(t *testing.T) {
-+ strategy := Wait()
-+
-+ if now := time.Now(); !strategy(0) || timeMarginOfError < time.Since(now) {
-+ t.Error("strategy expected to return true in ~0 time")
-+ }
-+
-+ if now := time.Now(); !strategy(999) || timeMarginOfError < time.Since(now) {
-+ t.Error("strategy expected to return true in ~0 time")
-+ }
-+}
-+
-+func TestWaitWithDuration(t *testing.T) {
-+ const waitDuration = time.Duration(10 * timeMarginOfError)
-+
-+ strategy := Wait(waitDuration)
-+
-+ if now := time.Now(); !strategy(0) || timeMarginOfError < time.Since(now) {
-+ t.Error("strategy expected to return true in ~0 time")
-+ }
-+
-+ if now := time.Now(); !strategy(1) || waitDuration > time.Since(now) {
-+ t.Errorf(
-+ "strategy expected to return true in %s",
-+ time.Duration(waitDuration),
-+ )
-+ }
-+}
-+
-+func TestWaitWithMultipleDurations(t *testing.T) {
-+ waitDurations := []time.Duration{
-+ time.Duration(10 * timeMarginOfError),
-+ time.Duration(20 * timeMarginOfError),
-+ time.Duration(30 * timeMarginOfError),
-+ time.Duration(40 * timeMarginOfError),
-+ }
-+
-+ strategy := Wait(waitDurations...)
-+
-+ if now := time.Now(); !strategy(0) || timeMarginOfError < time.Since(now) {
-+ t.Error("strategy expected to return true in ~0 time")
-+ }
-+
-+ if now := time.Now(); !strategy(1) || waitDurations[0] > time.Since(now) {
-+ t.Errorf(
-+ "strategy expected to return true in %s",
-+ time.Duration(waitDurations[0]),
-+ )
-+ }
-+
-+ if now := time.Now(); !strategy(3) || waitDurations[2] > time.Since(now) {
-+ t.Errorf(
-+ "strategy expected to return true in %s",
-+ waitDurations[2],
-+ )
-+ }
-+
-+ if now := time.Now(); !strategy(999) || waitDurations[len(waitDurations)-1] > time.Since(now) {
-+ t.Errorf(
-+ "strategy expected to return true in %s",
-+ waitDurations[len(waitDurations)-1],
-+ )
-+ }
-+}
-+
-+func TestBackoff(t *testing.T) {
-+ const backoffDuration = time.Duration(10 * timeMarginOfError)
-+ const algorithmDurationBase = timeMarginOfError
-+
-+ algorithm := func(attempt uint) time.Duration {
-+ return backoffDuration - (algorithmDurationBase * time.Duration(attempt))
-+ }
-+
-+ strategy := Backoff(algorithm)
-+
-+ if now := time.Now(); !strategy(0) || timeMarginOfError < time.Since(now) {
-+ t.Error("strategy expected to return true in ~0 time")
-+ }
-+
-+ for i := uint(1); i < 10; i++ {
-+ expectedResult := algorithm(i)
-+
-+ if now := time.Now(); !strategy(i) || expectedResult > time.Since(now) {
-+ t.Errorf(
-+ "strategy expected to return true in %s",
-+ expectedResult,
-+ )
-+ }
-+ }
-+}
-+
-+func TestBackoffWithJitter(t *testing.T) {
-+ const backoffDuration = time.Duration(10 * timeMarginOfError)
-+ const algorithmDurationBase = timeMarginOfError
-+
-+ algorithm := func(attempt uint) time.Duration {
-+ return backoffDuration - (algorithmDurationBase * time.Duration(attempt))
-+ }
-+
-+ transformation := func(duration time.Duration) time.Duration {
-+ return duration - time.Duration(10*timeMarginOfError)
-+ }
-+
-+ strategy := BackoffWithJitter(algorithm, transformation)
-+
-+ if now := time.Now(); !strategy(0) || timeMarginOfError < time.Since(now) {
-+ t.Error("strategy expected to return true in ~0 time")
-+ }
-+
-+ for i := uint(1); i < 10; i++ {
-+ expectedResult := transformation(algorithm(i))
-+
-+ if now := time.Now(); !strategy(i) || expectedResult > time.Since(now) {
-+ t.Errorf(
-+ "strategy expected to return true in %s",
-+ expectedResult,
-+ )
-+ }
-+ }
-+}
-+
-+func TestNoJitter(t *testing.T) {
-+ transformation := noJitter()
-+
-+ for i := uint(0); i < 10; i++ {
-+ duration := time.Duration(i) * timeMarginOfError
-+ result := transformation(duration)
-+ expected := duration
-+
-+ if result != expected {
-+ t.Errorf("transformation expected to return a %s duration, but received %s instead", expected, result)
-+ }
-+ }
-+}
-diff --git a/vendor/github.com/canonical/go-dqlite/.dir-locals.el b/vendor/github.com/canonical/go-dqlite/.dir-locals.el
-new file mode 100644
-index 00000000000..300939c293d
---- /dev/null
-+++ b/vendor/github.com/canonical/go-dqlite/.dir-locals.el
-@@ -0,0 +1,8 @@
-+;;; Directory Local Variables
-+;;; For more information see (info "(emacs) Directory Variables")
-+((go-mode
-+ . ((go-test-args . "-tags libsqlite3 -timeout 10s")
-+ (eval
-+ . (set
-+ (make-local-variable 'flycheck-go-build-tags)
-+ '("libsqlite3"))))))
-diff --git a/vendor/github.com/canonical/go-dqlite/.gitignore b/vendor/github.com/canonical/go-dqlite/.gitignore
-new file mode 100644
-index 00000000000..d3da31a83a9
---- /dev/null
-+++ b/vendor/github.com/canonical/go-dqlite/.gitignore
-@@ -0,0 +1,4 @@
-+.sqlite
-+demo
-+profile.coverprofile
-+overalls.coverprofile
-diff --git a/vendor/github.com/canonical/go-dqlite/.travis.yml b/vendor/github.com/canonical/go-dqlite/.travis.yml
-new file mode 100644
-index 00000000000..5a64007eb4a
---- /dev/null
-+++ b/vendor/github.com/canonical/go-dqlite/.travis.yml
-@@ -0,0 +1,31 @@
-+dist: xenial
-+language: go
-+
-+addons:
-+ apt:
-+ sources:
-+ - sourceline: 'ppa:dqlite/master'
-+ packages:
-+ - golint
-+ - libsqlite3-dev
-+ - libuv1-dev
-+ - libraft-dev
-+ - libco-dev
-+ - libdqlite-dev
-+
-+before_install:
-+ - go get github.com/go-playground/overalls
-+ - go get github.com/mattn/goveralls
-+ - go get github.com/tsenart/deadcode
-+
-+script:
-+ - go get -t -tags libsqlite3 ./...
-+ - go vet -tags libsqlite3 ./...
-+ - golint
-+ - deadcode
-+ - project=github.com/canonical/go-dqlite
-+ - $GOPATH/bin/overalls -project $project -covermode=count -- -tags libsqlite3 -timeout 240s
-+ - $GOPATH/bin/goveralls -coverprofile overalls.coverprofile -service=travis-ci
-+
-+go:
-+ - "1.12"
-diff --git a/vendor/github.com/canonical/go-dqlite/AUTHORS b/vendor/github.com/canonical/go-dqlite/AUTHORS
-new file mode 100644
-index 00000000000..6e13f86ebb2
---- /dev/null
-+++ b/vendor/github.com/canonical/go-dqlite/AUTHORS
-@@ -0,0 +1 @@
-+Free Ekanayaka
-diff --git a/vendor/github.com/canonical/go-dqlite/LICENSE b/vendor/github.com/canonical/go-dqlite/LICENSE
-new file mode 100644
-index 00000000000..261eeb9e9f8
---- /dev/null
-+++ b/vendor/github.com/canonical/go-dqlite/LICENSE
-@@ -0,0 +1,201 @@
-+ Apache License
-+ Version 2.0, January 2004
-+ http://www.apache.org/licenses/
-+
-+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-+
-+ 1. Definitions.
-+
-+ "License" shall mean the terms and conditions for use, reproduction,
-+ and distribution as defined by Sections 1 through 9 of this document.
-+
-+ "Licensor" shall mean the copyright owner or entity authorized by
-+ the copyright owner that is granting the License.
-+
-+ "Legal Entity" shall mean the union of the acting entity and all
-+ other entities that control, are controlled by, or are under common
-+ control with that entity. For the purposes of this definition,
-+ "control" means (i) the power, direct or indirect, to cause the
-+ direction or management of such entity, whether by contract or
-+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
-+ outstanding shares, or (iii) beneficial ownership of such entity.
-+
-+ "You" (or "Your") shall mean an individual or Legal Entity
-+ exercising permissions granted by this License.
-+
-+ "Source" form shall mean the preferred form for making modifications,
-+ including but not limited to software source code, documentation
-+ source, and configuration files.
-+
-+ "Object" form shall mean any form resulting from mechanical
-+ transformation or translation of a Source form, including but
-+ not limited to compiled object code, generated documentation,
-+ and conversions to other media types.
-+
-+ "Work" shall mean the work of authorship, whether in Source or
-+ Object form, made available under the License, as indicated by a
-+ copyright notice that is included in or attached to the work
-+ (an example is provided in the Appendix below).
-+
-+ "Derivative Works" shall mean any work, whether in Source or Object
-+ form, that is based on (or derived from) the Work and for which the
-+ editorial revisions, annotations, elaborations, or other modifications
-+ represent, as a whole, an original work of authorship. For the purposes
-+ of this License, Derivative Works shall not include works that remain
-+ separable from, or merely link (or bind by name) to the interfaces of,
-+ the Work and Derivative Works thereof.
-+
-+ "Contribution" shall mean any work of authorship, including
-+ the original version of the Work and any modifications or additions
-+ to that Work or Derivative Works thereof, that is intentionally
-+ submitted to Licensor for inclusion in the Work by the copyright owner
-+ or by an individual or Legal Entity authorized to submit on behalf of
-+ the copyright owner. For the purposes of this definition, "submitted"
-+ means any form of electronic, verbal, or written communication sent
-+ to the Licensor or its representatives, including but not limited to
-+ communication on electronic mailing lists, source code control systems,
-+ and issue tracking systems that are managed by, or on behalf of, the
-+ Licensor for the purpose of discussing and improving the Work, but
-+ excluding communication that is conspicuously marked or otherwise
-+ designated in writing by the copyright owner as "Not a Contribution."
-+
-+ "Contributor" shall mean Licensor and any individual or Legal Entity
-+ on behalf of whom a Contribution has been received by Licensor and
-+ subsequently incorporated within the Work.
-+
-+ 2. Grant of Copyright License. Subject to the terms and conditions of
-+ this License, each Contributor hereby grants to You a perpetual,
-+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
-+ copyright license to reproduce, prepare Derivative Works of,
-+ publicly display, publicly perform, sublicense, and distribute the
-+ Work and such Derivative Works in Source or Object form.
-+
-+ 3. Grant of Patent License. Subject to the terms and conditions of
-+ this License, each Contributor hereby grants to You a perpetual,
-+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
-+ (except as stated in this section) patent license to make, have made,
-+ use, offer to sell, sell, import, and otherwise transfer the Work,
-+ where such license applies only to those patent claims licensable
-+ by such Contributor that are necessarily infringed by their
-+ Contribution(s) alone or by combination of their Contribution(s)
-+ with the Work to which such Contribution(s) was submitted. If You
-+ institute patent litigation against any entity (including a
-+ cross-claim or counterclaim in a lawsuit) alleging that the Work
-+ or a Contribution incorporated within the Work constitutes direct
-+ or contributory patent infringement, then any patent licenses
-+ granted to You under this License for that Work shall terminate
-+ as of the date such litigation is filed.
-+
-+ 4. Redistribution. You may reproduce and distribute copies of the
-+ Work or Derivative Works thereof in any medium, with or without
-+ modifications, and in Source or Object form, provided that You
-+ meet the following conditions:
-+
-+ (a) You must give any other recipients of the Work or
-+ Derivative Works a copy of this License; and
-+
-+ (b) You must cause any modified files to carry prominent notices
-+ stating that You changed the files; and
-+
-+ (c) You must retain, in the Source form of any Derivative Works
-+ that You distribute, all copyright, patent, trademark, and
-+ attribution notices from the Source form of the Work,
-+ excluding those notices that do not pertain to any part of
-+ the Derivative Works; and
-+
-+ (d) If the Work includes a "NOTICE" text file as part of its
-+ distribution, then any Derivative Works that You distribute must
-+ include a readable copy of the attribution notices contained
-+ within such NOTICE file, excluding those notices that do not
-+ pertain to any part of the Derivative Works, in at least one
-+ of the following places: within a NOTICE text file distributed
-+ as part of the Derivative Works; within the Source form or
-+ documentation, if provided along with the Derivative Works; or,
-+ within a display generated by the Derivative Works, if and
-+ wherever such third-party notices normally appear. The contents
-+ of the NOTICE file are for informational purposes only and
-+ do not modify the License. You may add Your own attribution
-+ notices within Derivative Works that You distribute, alongside
-+ or as an addendum to the NOTICE text from the Work, provided
-+ that such additional attribution notices cannot be construed
-+ as modifying the License.
-+
-+ You may add Your own copyright statement to Your modifications and
-+ may provide additional or different license terms and conditions
-+ for use, reproduction, or distribution of Your modifications, or
-+ for any such Derivative Works as a whole, provided Your use,
-+ reproduction, and distribution of the Work otherwise complies with
-+ the conditions stated in this License.
-+
-+ 5. Submission of Contributions. Unless You explicitly state otherwise,
-+ any Contribution intentionally submitted for inclusion in the Work
-+ by You to the Licensor shall be under the terms and conditions of
-+ this License, without any additional terms or conditions.
-+ Notwithstanding the above, nothing herein shall supersede or modify
-+ the terms of any separate license agreement you may have executed
-+ with Licensor regarding such Contributions.
-+
-+ 6. Trademarks. This License does not grant permission to use the trade
-+ names, trademarks, service marks, or product names of the Licensor,
-+ except as required for reasonable and customary use in describing the
-+ origin of the Work and reproducing the content of the NOTICE file.
-+
-+ 7. Disclaimer of Warranty. Unless required by applicable law or
-+ agreed to in writing, Licensor provides the Work (and each
-+ Contributor provides its Contributions) on an "AS IS" BASIS,
-+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-+ implied, including, without limitation, any warranties or conditions
-+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
-+ PARTICULAR PURPOSE. You are solely responsible for determining the
-+ appropriateness of using or redistributing the Work and assume any
-+ risks associated with Your exercise of permissions under this License.
-+
-+ 8. Limitation of Liability. In no event and under no legal theory,
-+ whether in tort (including negligence), contract, or otherwise,
-+ unless required by applicable law (such as deliberate and grossly
-+ negligent acts) or agreed to in writing, shall any Contributor be
-+ liable to You for damages, including any direct, indirect, special,
-+ incidental, or consequential damages of any character arising as a
-+ result of this License or out of the use or inability to use the
-+ Work (including but not limited to damages for loss of goodwill,
-+ work stoppage, computer failure or malfunction, or any and all
-+ other commercial damages or losses), even if such Contributor
-+ has been advised of the possibility of such damages.
-+
-+ 9. Accepting Warranty or Additional Liability. While redistributing
-+ the Work or Derivative Works thereof, You may choose to offer,
-+ and charge a fee for, acceptance of support, warranty, indemnity,
-+ or other liability obligations and/or rights consistent with this
-+ License. However, in accepting such obligations, You may act only
-+ on Your own behalf and on Your sole responsibility, not on behalf
-+ of any other Contributor, and only if You agree to indemnify,
-+ defend, and hold each Contributor harmless for any liability
-+ incurred by, or claims asserted against, such Contributor by reason
-+ of your accepting any such warranty or additional liability.
-+
-+ END OF TERMS AND CONDITIONS
-+
-+ APPENDIX: How to apply the Apache License to your work.
-+
-+ To apply the Apache License to your work, attach the following
-+ boilerplate notice, with the fields enclosed by brackets "[]"
-+ replaced with your own identifying information. (Don't include
-+ the brackets!) The text should be enclosed in the appropriate
-+ comment syntax for the file format. We also recommend that a
-+ file or class name and description of purpose be included on the
-+ same "printed page" as the copyright notice for easier
-+ identification within third-party archives.
-+
-+ Copyright [yyyy] [name of copyright owner]
-+
-+ Licensed under the Apache License, Version 2.0 (the "License");
-+ you may not use this file except in compliance with the License.
-+ You may obtain a copy of the License at
-+
-+ http://www.apache.org/licenses/LICENSE-2.0
-+
-+ Unless required by applicable law or agreed to in writing, software
-+ distributed under the License is distributed on an "AS IS" BASIS,
-+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-+ See the License for the specific language governing permissions and
-+ limitations under the License.
-diff --git a/vendor/github.com/canonical/go-dqlite/README.md b/vendor/github.com/canonical/go-dqlite/README.md
-new file mode 100644
-index 00000000000..adbbf786067
---- /dev/null
-+++ b/vendor/github.com/canonical/go-dqlite/README.md
-@@ -0,0 +1,133 @@
-+go-dqlite [![Build Status](https://travis-ci.org/canonical/go-dqlite.png)](https://travis-ci.org/canonical/go-dqlite) [![Coverage Status](https://coveralls.io/repos/github/canonical/go-dqlite/badge.svg?branch=master)](https://coveralls.io/github/canonical/go-dqlite?branch=master) [![Go Report Card](https://goreportcard.com/badge/github.com/canonical/go-dqlite)](https://goreportcard.com/report/github.com/canonical/go-dqlite) [![GoDoc](https://godoc.org/github.com/canonical/go-dqlite?status.svg)](https://godoc.org/github.com/canonical/go-dqlite)
-+======
-+
-+This repository provides the `go-dqlite` Go package, containing bindings for the
-+[dqlite](https://github.com/canonical/dqlite) C library and a pure-Go
-+client for the dqlite wire [protocol](https://github.com/canonical/dqlite/blob/master/doc/protocol.md).
-+
-+Usage
-+-----
-+
-+The best way to understand how to use the ```go-dqlite``` package is probably by
-+looking at the source code of the [demo
-+program](https://github.com/canonical/go-dqlite/blob/master/cmd/dqlite-demo/dqlite-demo.go) and
-+use it as example.
-+
-+In general your application will use code such as:
-+
-+
-+```go
-+dir := "/path/to/data/directory"
-+address := "1.2.3.4:666" // Unique node address
-+cluster := []string{...} // Optional list of existing nodes, when starting a new node
-+app, err := app.New(dir, app.WithAddress(address), app.WithCluster(cluster))
-+if err != nil {
-+ // ...
-+}
-+
-+db, err := app.Open(context.Background(), "my-database")
-+if err != nil {
-+ // ...
-+}
-+
-+// db is a *sql.DB object
-+if _, err := db.Exec("CREATE TABLE my_table (n INT)"); err != nil
-+ // ...
-+}
-+```
-+
-+Build
-+-----
-+
-+In order to use the go-dqlite package in your application, you'll need to have
-+the [dqlite](https://github.com/canonical/dqlite) C library installed on your
-+system, along with its dependencies. You then need to pass the ```-tags```
-+argument to the Go tools when building or testing your packages, for example:
-+
-+```bash
-+go build -tags libsqlite3
-+go test -tags libsqlite3
-+```
-+
-+Documentation
-+-------------
-+
-+The documentation for this package can be found on [Godoc](http://godoc.org/github.com/canonical/go-dqlite).
-+
-+Demo
-+----
-+
-+To see dqlite in action, either install the Debian package from the PPA:
-+
-+```bash
-+sudo add-apt-repository -y ppa:dqlite/stable
-+sudo apt install dqlite libdqlite-dev
-+```
-+
-+or build the dqlite C library and its dependencies from source, as described
-+[here](https://github.com/canonical/dqlite#build), and then run:
-+
-+```
-+go install -tags libsqlite3 ./cmd/dqlite-demo
-+```
-+
-+from the top-level directory of this repository.
-+
-+This builds a demo dqlite application, which exposes a simple key/value store
-+over an HTTP API.
-+
-+Once the `dqlite-demo` binary is installed (normally under `~/go/bin`),
-+start three nodes of the demo application:
-+
-+```bash
-+dqlite-demo --api 127.0.0.1:8001 --db 127.0.0.1:9001 &
-+dqlite-demo --api 127.0.0.1:8002 --db 127.0.0.1:9002 --join 127.0.0.1:9001 &
-+dqlite-demo --api 127.0.0.1:8003 --db 127.0.0.1:9003 --join 127.0.0.1:9001 &
-+```
-+
-+The `--api` flag tells the demo program where to expose its HTTP API.
-+
-+The `--db` flag tells the demo program to use the given address for internal
-+database replication.
-+
-+The `--join` flag is optional and should be used only for additional nodes after
-+the first one. It informs them about the existing cluster, so they can
-+automatically join it.
-+
-+Now we can start using the cluster. Let's insert a key pair:
-+
-+```bash
-+curl -X PUT -d my-key http://127.0.0.1:8001/my-value
-+```
-+
-+and then retrive it from the database:
-+
-+```bash
-+curl http://127.0.0.1:8001/my-value
-+```
-+
-+Currently the first node is the leader. If we stop it and then try to query the
-+key again curl will fail, but we can simply change the endpoint to another node
-+and things will work since an automatic failover has taken place:
-+
-+```bash
-+kill -TERM %1; curl http://127.0.0.1:8002/my-value
-+```
-+
-+Shell
-+------
-+
-+A basic SQLite-like dqlite shell can be built with:
-+
-+```
-+go install -tags libsqlite3 ./cmd/dqlite
-+```
-+
-+You can test it with the `dqlite-demo` with:
-+
-+```
-+dqlite -s 127.0.0.1:9001
-+```
-+
-+It supports normal SQL queries plus the special `.cluster` and `.leader`
-+commands to inspect the cluster members and the current leader.
-diff --git a/vendor/github.com/canonical/go-dqlite/app/app.go b/vendor/github.com/canonical/go-dqlite/app/app.go
-new file mode 100644
-index 00000000000..93f3e477c5c
---- /dev/null
-+++ b/vendor/github.com/canonical/go-dqlite/app/app.go
-@@ -0,0 +1,633 @@
-+package app
-+
-+import (
-+ "context"
-+ "database/sql"
-+ "fmt"
-+ "net"
-+ "os"
-+ "path/filepath"
-+ "sync"
-+ "time"
-+
-+ "github.com/canonical/go-dqlite"
-+ "github.com/canonical/go-dqlite/client"
-+ "github.com/canonical/go-dqlite/driver"
-+ "github.com/pkg/errors"
-+)
-+
-+// App is a high-level helper for initializing a typical dqlite-based Go
-+// application.
-+//
-+// It takes care of starting a dqlite node and registering a dqlite Go SQL
-+// driver.
-+type App struct {
-+ id uint64
-+ address string
-+ dir string
-+ node *dqlite.Node
-+ nodeBindAddress string
-+ listener net.Listener
-+ tls *tlsSetup
-+ store client.NodeStore
-+ driver *driver.Driver
-+ driverName string
-+ log client.LogFunc
-+ stop context.CancelFunc // Signal App.run() to stop.
-+ proxyCh chan struct{} // Waits for App.proxy() to return.
-+ runCh chan struct{} // Waits for App.run() to return.
-+ readyCh chan struct{} // Waits for startup tasks
-+ voters int
-+ standbys int
-+ roles RolesConfig
-+}
-+
-+// New creates a new application node.
-+func New(dir string, options ...Option) (app *App, err error) {
-+ o := defaultOptions()
-+ for _, option := range options {
-+ option(o)
-+ }
-+
-+ // List of cleanup functions to run in case of errors.
-+ cleanups := []func(){}
-+ defer func() {
-+ if err == nil {
-+ return
-+ }
-+ for i := range cleanups {
-+ i = len(cleanups) - 1 - i // Reverse order
-+ cleanups[i]()
-+ }
-+ }()
-+
-+ // Load our ID, or generate one if we are joining.
-+ info := client.NodeInfo{}
-+ infoFileExists, err := fileExists(dir, infoFile)
-+ if err != nil {
-+ return nil, err
-+ }
-+ if !infoFileExists {
-+ if o.Address == "" {
-+ o.Address = defaultAddress()
-+ }
-+ if len(o.Cluster) == 0 {
-+ info.ID = dqlite.BootstrapID
-+ } else {
-+ info.ID = dqlite.GenerateID(o.Address)
-+ if err := fileWrite(dir, joinFile, []byte{}); err != nil {
-+ return nil, err
-+ }
-+ }
-+ info.Address = o.Address
-+
-+ if err := fileMarshal(dir, infoFile, info); err != nil {
-+ return nil, err
-+ }
-+
-+ cleanups = append(cleanups, func() { fileRemove(dir, infoFile) })
-+ } else {
-+ if err := fileUnmarshal(dir, infoFile, &info); err != nil {
-+ return nil, err
-+ }
-+ if o.Address != "" && o.Address != info.Address {
-+ return nil, fmt.Errorf("address %q in info.yaml does not match %q", info.Address, o.Address)
-+ }
-+ }
-+
-+ joinFileExists, err := fileExists(dir, joinFile)
-+ if err != nil {
-+ return nil, err
-+ }
-+
-+ if info.ID == dqlite.BootstrapID && joinFileExists {
-+ return nil, fmt.Errorf("bootstrap node can't join a cluster")
-+ }
-+
-+ // Open the nodes store.
-+ storeFileExists, err := fileExists(dir, storeFile)
-+ if err != nil {
-+ return nil, err
-+ }
-+ store, err := client.NewYamlNodeStore(filepath.Join(dir, storeFile))
-+ if err != nil {
-+ return nil, fmt.Errorf("open cluster.yaml node store: %w", err)
-+ }
-+
-+ // The info file and the store file should both exists or none of them
-+ // exist.
-+ if infoFileExists != storeFileExists {
-+ return nil, fmt.Errorf("inconsistent info.yaml and cluster.yaml")
-+ }
-+
-+ if !storeFileExists {
-+ // If this is a brand new application node, populate the store
-+ // either with the node's address (for bootstrap nodes) or with
-+ // the given cluster addresses (for joining nodes).
-+ nodes := []client.NodeInfo{}
-+ if info.ID == dqlite.BootstrapID {
-+ nodes = append(nodes, client.NodeInfo{Address: info.Address})
-+ } else {
-+ if len(o.Cluster) == 0 {
-+ return nil, fmt.Errorf("no cluster addresses provided")
-+ }
-+ for _, address := range o.Cluster {
-+ nodes = append(nodes, client.NodeInfo{Address: address})
-+ }
-+ }
-+ if err := store.Set(context.Background(), nodes); err != nil {
-+ return nil, fmt.Errorf("initialize node store: %w", err)
-+ }
-+ cleanups = append(cleanups, func() { fileRemove(dir, storeFile) })
-+ }
-+
-+ // Start the local dqlite engine.
-+ var nodeBindAddress string
-+ var nodeDial client.DialFunc
-+ if o.TLS != nil {
-+ nodeBindAddress = fmt.Sprintf("@dqlite-%d", info.ID)
-+
-+ // Within a snap we need to choose a different name for the abstract unix domain
-+ // socket to get it past the AppArmor confinement.
-+ // See https://github.com/snapcore/snapd/blob/master/interfaces/apparmor/template.go#L357
-+ snapInstanceName := os.Getenv("SNAP_INSTANCE_NAME")
-+ if len(snapInstanceName) > 0 {
-+ nodeBindAddress = fmt.Sprintf("@snap.%s.dqlite-%d", snapInstanceName, info.ID)
-+ }
-+
-+ nodeDial = makeNodeDialFunc(o.TLS.Dial)
-+ } else {
-+ nodeBindAddress = info.Address
-+ nodeDial = client.DefaultDialFunc
-+ }
-+ node, err := dqlite.New(
-+ info.ID, info.Address, dir,
-+ dqlite.WithBindAddress(nodeBindAddress),
-+ dqlite.WithDialFunc(nodeDial),
-+ dqlite.WithFailureDomain(o.FailureDomain),
-+ dqlite.WithNetworkLatency(o.NetworkLatency),
-+ )
-+ if err != nil {
-+ return nil, fmt.Errorf("create node: %w", err)
-+ }
-+ if err := node.Start(); err != nil {
-+ return nil, fmt.Errorf("start node: %w", err)
-+ }
-+ cleanups = append(cleanups, func() { node.Close() })
-+
-+ // Register the local dqlite driver.
-+ driverDial := client.DefaultDialFunc
-+ if o.TLS != nil {
-+ driverDial = client.DialFuncWithTLS(driverDial, o.TLS.Dial)
-+ }
-+
-+ driver, err := driver.New(store, driver.WithDialFunc(driverDial), driver.WithLogFunc(o.Log))
-+ if err != nil {
-+ return nil, fmt.Errorf("create driver: %w", err)
-+ }
-+ driverIndex++
-+ driverName := fmt.Sprintf("dqlite-%d", driverIndex)
-+ sql.Register(driverName, driver)
-+
-+ if o.Voters < 3 || o.Voters%2 == 0 {
-+ return nil, fmt.Errorf("invalid voters %d: must be an odd number greater than 1", o.Voters)
-+ }
-+
-+ if o.StandBys%2 == 0 {
-+ return nil, fmt.Errorf("invalid stand-bys %d: must be an odd number", o.StandBys)
-+ }
-+
-+ ctx, stop := context.WithCancel(context.Background())
-+
-+ app = &App{
-+ id: info.ID,
-+ address: info.Address,
-+ dir: dir,
-+ node: node,
-+ nodeBindAddress: nodeBindAddress,
-+ store: store,
-+ driver: driver,
-+ driverName: driverName,
-+ log: o.Log,
-+ tls: o.TLS,
-+ stop: stop,
-+ runCh: make(chan struct{}, 0),
-+ readyCh: make(chan struct{}, 0),
-+ voters: o.Voters,
-+ standbys: o.StandBys,
-+ roles: RolesConfig{Voters: o.Voters, StandBys: o.StandBys},
-+ }
-+
-+ // Start the proxy if a TLS configuration was provided.
-+ if o.TLS != nil {
-+ listener, err := net.Listen("tcp", info.Address)
-+ if err != nil {
-+ return nil, fmt.Errorf("listen to %s: %w", info.Address, err)
-+ }
-+ proxyCh := make(chan struct{}, 0)
-+
-+ app.listener = listener
-+ app.proxyCh = proxyCh
-+
-+ go app.proxy()
-+
-+ cleanups = append(cleanups, func() { listener.Close(); <-proxyCh })
-+
-+ }
-+
-+ go app.run(ctx, o.RolesAdjustmentFrequency, joinFileExists)
-+
-+ return app, nil
-+}
-+
-+// Handover transfers all responsibilities for this node (such has leadership
-+// and voting rights) to another node, if one is available.
-+//
-+// This method should always be called before invoking Close(), in order to
-+// gracefully shutdown a node.
-+func (a *App) Handover(ctx context.Context) error {
-+ // Set a hard limit of one minute, in case the user-provided context
-+ // has no expiration. That avoids the call to hang forever in case a
-+ // majority of the cluster is down and no leader is available.
-+ var cancel context.CancelFunc
-+ ctx, cancel = context.WithTimeout(ctx, time.Minute)
-+ defer cancel()
-+
-+ cli, err := a.Leader(ctx)
-+ if err != nil {
-+ return fmt.Errorf("find leader: %w", err)
-+ }
-+ defer cli.Close()
-+
-+ // Possibly transfer our role.
-+ nodes, err := cli.Cluster(ctx)
-+ if err != nil {
-+ return fmt.Errorf("cluster servers: %w", err)
-+ }
-+
-+ changes := a.makeRolesChanges(nodes)
-+
-+ role, candidates := changes.Handover(a.id)
-+
-+ if role != -1 {
-+ for i, node := range candidates {
-+ if err := cli.Assign(ctx, node.ID, role); err != nil {
-+ a.warn("promote %s from %s to %s: %v", node.Address, node.Role, role, err)
-+ if i == len(candidates)-1 {
-+ // We could not promote any node
-+ return fmt.Errorf("could not promote any online node to %s", role)
-+ }
-+ continue
-+ }
-+ a.debug("promoted %s from %s to %s", node.Address, node.Role, role)
-+ break
-+ }
-+ }
-+
-+ // Check if we are the current leader and transfer leadership if so.
-+ leader, err := cli.Leader(ctx)
-+ if err != nil {
-+ return fmt.Errorf("leader address: %w", err)
-+ }
-+ if leader != nil && leader.Address == a.address {
-+ nodes, err := cli.Cluster(ctx)
-+ if err != nil {
-+ return fmt.Errorf("cluster servers: %w", err)
-+ }
-+ changes := a.makeRolesChanges(nodes)
-+ voters := changes.list(client.Voter, true)
-+
-+ for i, voter := range voters {
-+ if voter.Address == a.address {
-+ continue
-+ }
-+ if err := cli.Transfer(ctx, voter.ID); err != nil {
-+ a.warn("transfer leadership to %s: %v", voter.Address, err)
-+ if i == len(voters)-1 {
-+ return fmt.Errorf("transfer leadership: %w", err)
-+ }
-+ }
-+ cli, err = a.Leader(ctx)
-+ if err != nil {
-+ return fmt.Errorf("find new leader: %w", err)
-+ }
-+ defer cli.Close()
-+ }
-+ }
-+
-+ // Demote ourselves if we have promoted someone else.
-+ if role != -1 {
-+ if err := cli.Assign(ctx, a.ID(), client.Spare); err != nil {
-+ return fmt.Errorf("demote ourselves: %w", err)
-+ }
-+ }
-+
-+ return nil
-+}
-+
-+// Close the application node, releasing all resources it created.
-+func (a *App) Close() error {
-+ // Stop the run goroutine.
-+ a.stop()
-+ <-a.runCh
-+
-+ if a.listener != nil {
-+ a.listener.Close()
-+ <-a.proxyCh
-+ }
-+ if err := a.node.Close(); err != nil {
-+ return err
-+ }
-+ return nil
-+}
-+
-+// ID returns the dqlite ID of this application node.
-+func (a *App) ID() uint64 {
-+ return a.id
-+}
-+
-+// Address returns the dqlite address of this application node.
-+func (a *App) Address() string {
-+ return a.address
-+}
-+
-+// Driver returns the name used to register the dqlite driver.
-+func (a *App) Driver() string {
-+ return a.driverName
-+}
-+
-+// Ready can be used to wait for a node to complete some initial tasks that are
-+// initiated at startup. For example a brand new node will attempt to join the
-+// cluster, a restarted node will check if it should assume some particular
-+// role, etc.
-+//
-+// If this method returns without error it means that those initial tasks have
-+// succeeded and follow-up operations like Open() are more likely to succeeed
-+// quickly.
-+func (a *App) Ready(ctx context.Context) error {
-+ select {
-+ case <-a.readyCh:
-+ return nil
-+ case <-ctx.Done():
-+ return ctx.Err()
-+ }
-+}
-+
-+// Open the dqlite database with the given name
-+func (a *App) Open(ctx context.Context, database string) (*sql.DB, error) {
-+ db, err := sql.Open(a.Driver(), database)
-+ if err != nil {
-+ return nil, err
-+ }
-+
-+ for i := 0; i < 60; i++ {
-+ err = db.PingContext(ctx)
-+ if err == nil {
-+ break
-+ }
-+ cause := errors.Cause(err)
-+ if cause != driver.ErrNoAvailableLeader {
-+ return nil, err
-+ }
-+ time.Sleep(time.Second)
-+ }
-+ if err != nil {
-+ return nil, err
-+ }
-+
-+ return db, nil
-+}
-+
-+// Leader returns a client connected to the current cluster leader, if any.
-+func (a *App) Leader(ctx context.Context) (*client.Client, error) {
-+ return client.FindLeader(ctx, a.store, a.clientOptions()...)
-+}
-+
-+// Client returns a client connected to the local node.
-+func (a *App) Client(ctx context.Context) (*client.Client, error) {
-+ return client.New(ctx, a.nodeBindAddress)
-+}
-+
-+// Proxy incoming TLS connections.
-+func (a *App) proxy() {
-+ wg := sync.WaitGroup{}
-+ ctx, cancel := context.WithCancel(context.Background())
-+ for {
-+ client, err := a.listener.Accept()
-+ if err != nil {
-+ cancel()
-+ wg.Wait()
-+ close(a.proxyCh)
-+ return
-+ }
-+ address := client.RemoteAddr()
-+ a.debug("new connection from %s", address)
-+ server, err := net.Dial("unix", a.nodeBindAddress)
-+ if err != nil {
-+ a.error("dial local node: %v", err)
-+ client.Close()
-+ continue
-+ }
-+ wg.Add(1)
-+ go func() {
-+ defer wg.Done()
-+ if err := proxy(ctx, client, server, a.tls.Listen); err != nil {
-+ a.error("proxy: %v", err)
-+ }
-+ }()
-+ }
-+}
-+
-+// Run background tasks. The join flag is true if the node is a brand new one
-+// and should join the cluster.
-+func (a *App) run(ctx context.Context, frequency time.Duration, join bool) {
-+ defer close(a.runCh)
-+
-+ delay := time.Duration(0)
-+ ready := false
-+ for {
-+ select {
-+ case <-ctx.Done():
-+ // If we didn't become ready yet, close the ready
-+ // channel, to unblock any call to Ready().
-+ if !ready {
-+ close(a.readyCh)
-+ }
-+ return
-+ case <-time.After(delay):
-+ cli, err := a.Leader(ctx)
-+ if err != nil {
-+ continue
-+ }
-+
-+ // Attempt to join the cluster if this is a brand new node.
-+ if join {
-+ info := client.NodeInfo{ID: a.id, Address: a.address, Role: client.Spare}
-+ if err := cli.Add(ctx, info); err != nil {
-+ a.warn("join cluster: %v", err)
-+ delay = time.Second
-+ cli.Close()
-+ continue
-+ }
-+ join = false
-+ if err := fileRemove(a.dir, joinFile); err != nil {
-+ a.error("remove join file: %v", err)
-+ }
-+
-+ }
-+
-+ // Refresh our node store.
-+ servers, err := cli.Cluster(ctx)
-+ if err != nil {
-+ cli.Close()
-+ continue
-+ }
-+ a.store.Set(ctx, servers)
-+
-+ // If we are starting up, let's see if we should
-+ // promote ourselves.
-+ if !ready {
-+ if err := a.maybePromoteOurselves(ctx, cli, servers); err != nil {
-+ a.warn("%v", err)
-+ delay = time.Second
-+ cli.Close()
-+ continue
-+ }
-+ ready = true
-+ delay = frequency
-+ close(a.readyCh)
-+ cli.Close()
-+ continue
-+ }
-+
-+ // If we are the leader, let's see if there's any
-+ // adjustment we should make to node roles.
-+ if err := a.maybeAdjustRoles(ctx, cli); err != nil {
-+ a.warn("adjust roles: %v", err)
-+ }
-+ cli.Close()
-+ }
-+ }
-+}
-+
-+// Possibly change our own role at startup.
-+func (a *App) maybePromoteOurselves(ctx context.Context, cli *client.Client, nodes []client.NodeInfo) error {
-+ roles := a.makeRolesChanges(nodes)
-+
-+ role := roles.Assume(a.id)
-+ if role == -1 {
-+ return nil
-+ }
-+
-+ // Promote ourselves.
-+ if err := cli.Assign(ctx, a.id, role); err != nil {
-+ return fmt.Errorf("assign %s role to ourselves: %v", role, err)
-+ }
-+
-+ // Possibly try to promote another node as well if we've reached the 3
-+ // node threshold. If we don't succeed in doing that, errors are
-+ // ignored since the leader will eventually notice that don't have
-+ // enough voters and will retry.
-+ if role == client.Voter && roles.count(client.Voter, true) == 1 {
-+ for node := range roles.State {
-+ if node.ID == a.id || node.Role == client.Voter {
-+ continue
-+ }
-+ if err := cli.Assign(ctx, node.ID, client.Voter); err == nil {
-+ break
-+ } else {
-+ a.warn("promote %s from %s to voter: %v", node.Address, node.Role, err)
-+ }
-+ }
-+ }
-+
-+ return nil
-+}
-+
-+// Check if any adjustment needs to be made to existing roles.
-+func (a *App) maybeAdjustRoles(ctx context.Context, cli *client.Client) error {
-+again:
-+ info, err := cli.Leader(ctx)
-+ if err != nil {
-+ return err
-+ }
-+ if info.ID != a.id {
-+ return nil
-+ }
-+
-+ nodes, err := cli.Cluster(ctx)
-+ if err != nil {
-+ return err
-+ }
-+
-+ roles := a.makeRolesChanges(nodes)
-+
-+ role, nodes := roles.Adjust(a.id)
-+ if role == -1 {
-+ return nil
-+ }
-+
-+ for i, node := range nodes {
-+ if err := cli.Assign(ctx, node.ID, role); err != nil {
-+ a.warn("change %s from %s to %s: %v", node.Address, node.Role, role, err)
-+ if i == len(nodes)-1 {
-+ // We could not change any node
-+ return fmt.Errorf("could not assign role %s to any node", role)
-+ }
-+ continue
-+ }
-+ break
-+ }
-+
-+ goto again
-+}
-+
-+// Probe all given nodes for connectivity and metadata, then return a
-+// RolesChanges object.
-+func (a *App) makeRolesChanges(nodes []client.NodeInfo) RolesChanges {
-+ state := map[client.NodeInfo]*client.NodeMetadata{}
-+
-+ for _, node := range nodes {
-+ state[node] = nil
-+ ctx, cancel := context.WithTimeout(context.Background(), time.Second)
-+ defer cancel()
-+
-+ cli, err := client.New(ctx, node.Address, a.clientOptions()...)
-+ if err == nil {
-+ metadata, err := cli.Describe(ctx)
-+ if err == nil {
-+ state[node] = metadata
-+ }
-+ cli.Close()
-+ }
-+ }
-+
-+ return RolesChanges{Config: a.roles, State: state}
-+}
-+
-+// Return the options to use for client.FindLeader() or client.New()
-+func (a *App) clientOptions() []client.Option {
-+ dial := client.DefaultDialFunc
-+ if a.tls != nil {
-+ dial = client.DialFuncWithTLS(dial, a.tls.Dial)
-+ }
-+ return []client.Option{client.WithDialFunc(dial), client.WithLogFunc(a.log)}
-+}
-+
-+func (a *App) debug(format string, args ...interface{}) {
-+ a.log(client.LogDebug, format, args...)
-+}
-+
-+func (a *App) info(format string, args ...interface{}) {
-+ a.log(client.LogInfo, format, args...)
-+}
-+
-+func (a *App) warn(format string, args ...interface{}) {
-+ a.log(client.LogWarn, format, args...)
-+}
-+
-+func (a *App) error(format string, args ...interface{}) {
-+ a.log(client.LogError, format, args...)
-+}
-+
-+var driverIndex = 0
-diff --git a/vendor/github.com/canonical/go-dqlite/app/app_test.go b/vendor/github.com/canonical/go-dqlite/app/app_test.go
-new file mode 100644
-index 00000000000..68f578716df
---- /dev/null
-+++ b/vendor/github.com/canonical/go-dqlite/app/app_test.go
-@@ -0,0 +1,996 @@
-+package app_test
-+
-+import (
-+ "context"
-+ "crypto/tls"
-+ "crypto/x509"
-+ "database/sql"
-+ "encoding/binary"
-+ "fmt"
-+ "io/ioutil"
-+ "os"
-+ "path/filepath"
-+ "testing"
-+ "time"
-+
-+ "github.com/canonical/go-dqlite/app"
-+ "github.com/canonical/go-dqlite/client"
-+ "github.com/stretchr/testify/assert"
-+ "github.com/stretchr/testify/require"
-+)
-+
-+// Create a pristine bootstrap node with default value.
-+func TestNew_PristineDefault(t *testing.T) {
-+ _, cleanup := newApp(t)
-+ defer cleanup()
-+}
-+
-+// Create a pristine joining node.
-+func TestNew_PristineJoiner(t *testing.T) {
-+ addr1 := "127.0.0.1:9001"
-+ addr2 := "127.0.0.1:9002"
-+
-+ app1, cleanup := newApp(t, app.WithAddress(addr1))
-+ defer cleanup()
-+
-+ app2, cleanup := newApp(t, app.WithAddress(addr2), app.WithCluster([]string{addr1}))
-+ defer cleanup()
-+
-+ require.NoError(t, app2.Ready(context.Background()))
-+
-+ // The joining node to appear in the cluster list.
-+ cli, err := app1.Leader(context.Background())
-+ require.NoError(t, err)
-+ defer cli.Close()
-+
-+ cluster, err := cli.Cluster(context.Background())
-+ require.NoError(t, err)
-+ assert.Equal(t, addr1, cluster[0].Address)
-+ assert.Equal(t, addr2, cluster[1].Address)
-+
-+ // Initially the node joins as spare.
-+ assert.Equal(t, client.Voter, cluster[0].Role)
-+ assert.Equal(t, client.Spare, cluster[1].Role)
-+}
-+
-+// Restart a node that had previously joined the cluster successfully.
-+func TestNew_JoinerRestart(t *testing.T) {
-+ addr1 := "127.0.0.1:9001"
-+ addr2 := "127.0.0.1:9002"
-+
-+ app1, cleanup := newApp(t, app.WithAddress(addr1))
-+ defer cleanup()
-+
-+ require.NoError(t, app1.Ready(context.Background()))
-+
-+ dir2, cleanup := newDir(t)
-+ defer cleanup()
-+
-+ app2, cleanup := newAppWithDir(t, dir2, app.WithAddress(addr2), app.WithCluster([]string{addr1}))
-+ require.NoError(t, app2.Ready(context.Background()))
-+ cleanup()
-+
-+ app2, cleanup = newAppWithDir(t, dir2, app.WithAddress(addr2))
-+ defer cleanup()
-+
-+ require.NoError(t, app2.Ready(context.Background()))
-+}
-+
-+// The second joiner promotes itself and also the first joiner.
-+func TestNew_SecondJoiner(t *testing.T) {
-+ addr1 := "127.0.0.1:9001"
-+ addr2 := "127.0.0.1:9002"
-+ addr3 := "127.0.0.1:9003"
-+
-+ app1, cleanup := newApp(t, app.WithAddress(addr1))
-+ defer cleanup()
-+
-+ app2, cleanup := newApp(t, app.WithAddress(addr2), app.WithCluster([]string{addr1}))
-+ defer cleanup()
-+
-+ require.NoError(t, app2.Ready(context.Background()))
-+
-+ app3, cleanup := newApp(t, app.WithAddress(addr3), app.WithCluster([]string{addr1}))
-+ defer cleanup()
-+
-+ require.NoError(t, app3.Ready(context.Background()))
-+
-+ cli, err := app1.Leader(context.Background())
-+ require.NoError(t, err)
-+ defer cli.Close()
-+
-+ cluster, err := cli.Cluster(context.Background())
-+ require.NoError(t, err)
-+
-+ assert.Equal(t, addr1, cluster[0].Address)
-+ assert.Equal(t, addr2, cluster[1].Address)
-+ assert.Equal(t, addr3, cluster[2].Address)
-+
-+ assert.Equal(t, client.Voter, cluster[0].Role)
-+ assert.Equal(t, client.Voter, cluster[1].Role)
-+ assert.Equal(t, client.Voter, cluster[2].Role)
-+}
-+
-+// The third joiner gets the stand-by role.
-+func TestNew_ThirdJoiner(t *testing.T) {
-+ apps := []*app.App{}
-+
-+ for i := 0; i < 4; i++ {
-+ addr := fmt.Sprintf("127.0.0.1:900%d", i+1)
-+ options := []app.Option{app.WithAddress(addr)}
-+ if i > 0 {
-+ options = append(options, app.WithCluster([]string{"127.0.0.1:9001"}))
-+ }
-+
-+ app, cleanup := newApp(t, options...)
-+ defer cleanup()
-+
-+ require.NoError(t, app.Ready(context.Background()))
-+
-+ apps = append(apps, app)
-+
-+ }
-+
-+ cli, err := apps[0].Leader(context.Background())
-+ require.NoError(t, err)
-+ defer cli.Close()
-+
-+ cluster, err := cli.Cluster(context.Background())
-+ require.NoError(t, err)
-+
-+ assert.Equal(t, client.Voter, cluster[0].Role)
-+ assert.Equal(t, client.Voter, cluster[1].Role)
-+ assert.Equal(t, client.Voter, cluster[2].Role)
-+ assert.Equal(t, client.StandBy, cluster[3].Role)
-+}
-+
-+// The fourth joiner gets the stand-by role.
-+func TestNew_FourthJoiner(t *testing.T) {
-+ apps := []*app.App{}
-+
-+ for i := 0; i < 5; i++ {
-+ addr := fmt.Sprintf("127.0.0.1:900%d", i+1)
-+ options := []app.Option{app.WithAddress(addr)}
-+ if i > 0 {
-+ options = append(options, app.WithCluster([]string{"127.0.0.1:9001"}))
-+ }
-+
-+ app, cleanup := newApp(t, options...)
-+ defer cleanup()
-+
-+ require.NoError(t, app.Ready(context.Background()))
-+
-+ apps = append(apps, app)
-+
-+ }
-+
-+ cli, err := apps[0].Leader(context.Background())
-+ require.NoError(t, err)
-+ defer cli.Close()
-+
-+ cluster, err := cli.Cluster(context.Background())
-+ require.NoError(t, err)
-+
-+ assert.Equal(t, client.Voter, cluster[0].Role)
-+ assert.Equal(t, client.Voter, cluster[1].Role)
-+ assert.Equal(t, client.Voter, cluster[2].Role)
-+ assert.Equal(t, client.StandBy, cluster[3].Role)
-+ assert.Equal(t, client.StandBy, cluster[4].Role)
-+}
-+
-+// The fifth joiner gets the stand-by role.
-+func TestNew_FifthJoiner(t *testing.T) {
-+ apps := []*app.App{}
-+
-+ for i := 0; i < 6; i++ {
-+ addr := fmt.Sprintf("127.0.0.1:900%d", i+1)
-+ options := []app.Option{app.WithAddress(addr)}
-+ if i > 0 {
-+ options = append(options, app.WithCluster([]string{"127.0.0.1:9001"}))
-+ }
-+
-+ app, cleanup := newApp(t, options...)
-+ defer cleanup()
-+
-+ require.NoError(t, app.Ready(context.Background()))
-+
-+ apps = append(apps, app)
-+
-+ }
-+
-+ cli, err := apps[0].Leader(context.Background())
-+ require.NoError(t, err)
-+ defer cli.Close()
-+
-+ cluster, err := cli.Cluster(context.Background())
-+ require.NoError(t, err)
-+
-+ assert.Equal(t, client.Voter, cluster[0].Role)
-+ assert.Equal(t, client.Voter, cluster[1].Role)
-+ assert.Equal(t, client.Voter, cluster[2].Role)
-+ assert.Equal(t, client.StandBy, cluster[3].Role)
-+ assert.Equal(t, client.StandBy, cluster[4].Role)
-+ assert.Equal(t, client.StandBy, cluster[5].Role)
-+}
-+
-+// The sixth joiner gets the spare role.
-+func TestNew_SixthJoiner(t *testing.T) {
-+ apps := []*app.App{}
-+
-+ for i := 0; i < 7; i++ {
-+ addr := fmt.Sprintf("127.0.0.1:900%d", i+1)
-+ options := []app.Option{app.WithAddress(addr)}
-+ if i > 0 {
-+ options = append(options, app.WithCluster([]string{"127.0.0.1:9001"}))
-+ }
-+
-+ app, cleanup := newApp(t, options...)
-+ defer cleanup()
-+
-+ require.NoError(t, app.Ready(context.Background()))
-+
-+ apps = append(apps, app)
-+
-+ }
-+
-+ cli, err := apps[0].Leader(context.Background())
-+ require.NoError(t, err)
-+ defer cli.Close()
-+
-+ cluster, err := cli.Cluster(context.Background())
-+ require.NoError(t, err)
-+
-+ assert.Equal(t, client.Voter, cluster[0].Role)
-+ assert.Equal(t, client.Voter, cluster[1].Role)
-+ assert.Equal(t, client.Voter, cluster[2].Role)
-+ assert.Equal(t, client.StandBy, cluster[3].Role)
-+ assert.Equal(t, client.StandBy, cluster[4].Role)
-+ assert.Equal(t, client.StandBy, cluster[5].Role)
-+ assert.Equal(t, client.Spare, cluster[6].Role)
-+}
-+
-+// Transfer voting rights to another online node.
-+func TestHandover_Voter(t *testing.T) {
-+ n := 4
-+ apps := make([]*app.App, n)
-+
-+ for i := 0; i < n; i++ {
-+ addr := fmt.Sprintf("127.0.0.1:900%d", i+1)
-+ options := []app.Option{app.WithAddress(addr)}
-+ if i > 0 {
-+ options = append(options, app.WithCluster([]string{"127.0.0.1:9001"}))
-+ }
-+
-+ app, cleanup := newApp(t, options...)
-+ defer cleanup()
-+
-+ require.NoError(t, app.Ready(context.Background()))
-+
-+ apps[i] = app
-+ }
-+
-+ cli, err := apps[0].Leader(context.Background())
-+ require.NoError(t, err)
-+ defer cli.Close()
-+
-+ cluster, err := cli.Cluster(context.Background())
-+ require.NoError(t, err)
-+
-+ assert.Equal(t, client.Voter, cluster[0].Role)
-+ assert.Equal(t, client.Voter, cluster[1].Role)
-+ assert.Equal(t, client.Voter, cluster[2].Role)
-+ assert.Equal(t, client.StandBy, cluster[3].Role)
-+
-+ require.NoError(t, apps[2].Handover(context.Background()))
-+
-+ cluster, err = cli.Cluster(context.Background())
-+ require.NoError(t, err)
-+
-+ assert.Equal(t, client.Voter, cluster[0].Role)
-+ assert.Equal(t, client.Voter, cluster[1].Role)
-+ assert.Equal(t, client.Spare, cluster[2].Role)
-+ assert.Equal(t, client.Voter, cluster[3].Role)
-+}
-+
-+// In a two-node cluster only one of them is a voter. When Handover() is called
-+// on the voter, the role and leadership are transfered.
-+func TestHandover_TwoNodes(t *testing.T) {
-+ n := 2
-+ apps := make([]*app.App, n)
-+
-+ for i := 0; i < n; i++ {
-+ addr := fmt.Sprintf("127.0.0.1:900%d", i+1)
-+ options := []app.Option{app.WithAddress(addr)}
-+ if i > 0 {
-+ options = append(options, app.WithCluster([]string{"127.0.0.1:9001"}))
-+ }
-+
-+ app, cleanup := newApp(t, options...)
-+ defer cleanup()
-+
-+ require.NoError(t, app.Ready(context.Background()))
-+
-+ apps[i] = app
-+ }
-+
-+ err := apps[0].Handover(context.Background())
-+ require.NoError(t, err)
-+
-+ cli, err := apps[1].Leader(context.Background())
-+ require.NoError(t, err)
-+ defer cli.Close()
-+
-+ cluster, err := cli.Cluster(context.Background())
-+ require.NoError(t, err)
-+
-+ assert.Equal(t, client.Spare, cluster[0].Role)
-+ assert.Equal(t, client.Voter, cluster[1].Role)
-+}
-+
-+// Transfer voting rights to another online node. Failure domains are taken
-+// into account.
-+func TestHandover_VoterHonorFailureDomain(t *testing.T) {
-+ n := 6
-+ apps := make([]*app.App, n)
-+
-+ for i := 0; i < n; i++ {
-+ addr := fmt.Sprintf("127.0.0.1:900%d", i+1)
-+ options := []app.Option{
-+ app.WithAddress(addr),
-+ app.WithFailureDomain(uint64(i % 3)),
-+ }
-+ if i > 0 {
-+ options = append(options, app.WithCluster([]string{"127.0.0.1:9001"}))
-+ }
-+
-+ app, cleanup := newApp(t, options...)
-+ defer cleanup()
-+
-+ require.NoError(t, app.Ready(context.Background()))
-+
-+ apps[i] = app
-+ }
-+
-+ cli, err := apps[0].Leader(context.Background())
-+ require.NoError(t, err)
-+ defer cli.Close()
-+
-+ cluster, err := cli.Cluster(context.Background())
-+ require.NoError(t, err)
-+
-+ require.NoError(t, apps[2].Handover(context.Background()))
-+
-+ cluster, err = cli.Cluster(context.Background())
-+ require.NoError(t, err)
-+
-+ assert.Equal(t, client.Voter, cluster[0].Role)
-+ assert.Equal(t, client.Voter, cluster[1].Role)
-+ assert.Equal(t, client.Spare, cluster[2].Role)
-+ assert.Equal(t, client.StandBy, cluster[3].Role)
-+ assert.Equal(t, client.StandBy, cluster[4].Role)
-+ assert.Equal(t, client.Voter, cluster[5].Role)
-+}
-+
-+// Handover with a sinle node.
-+func TestHandover_SingleNode(t *testing.T) {
-+ dir, cleanup := newDir(t)
-+ defer cleanup()
-+
-+ app, err := app.New(dir, app.WithAddress("127.0.0.1:9001"))
-+ require.NoError(t, err)
-+
-+ require.NoError(t, app.Ready(context.Background()))
-+
-+ require.NoError(t, app.Handover(context.Background()))
-+ require.NoError(t, app.Close())
-+}
-+
-+// Exercise a sequential graceful shutdown of a 3-node cluster.
-+func TestHandover_GracefulShutdown(t *testing.T) {
-+ n := 3
-+ apps := make([]*app.App, n)
-+
-+ for i := 0; i < n; i++ {
-+ dir, cleanup := newDir(t)
-+ defer cleanup()
-+
-+ addr := fmt.Sprintf("127.0.0.1:900%d", i+1)
-+ options := []app.Option{
-+ app.WithAddress(addr),
-+ }
-+ if i > 0 {
-+ options = append(options, app.WithCluster([]string{"127.0.0.1:9001"}))
-+ }
-+
-+ app, err := app.New(dir, options...)
-+ require.NoError(t, err)
-+
-+ require.NoError(t, app.Ready(context.Background()))
-+
-+ apps[i] = app
-+ }
-+
-+ db, err := sql.Open(apps[0].Driver(), "test.db")
-+ require.NoError(t, err)
-+
-+ _, err = db.Exec("CREATE TABLE test (n INT)")
-+ require.NoError(t, err)
-+
-+ require.NoError(t, db.Close())
-+
-+ require.NoError(t, apps[0].Handover(context.Background()))
-+ require.NoError(t, apps[0].Close())
-+
-+ require.NoError(t, apps[1].Handover(context.Background()))
-+ require.NoError(t, apps[1].Close())
-+
-+ require.NoError(t, apps[2].Handover(context.Background()))
-+ require.NoError(t, apps[2].Close())
-+}
-+
-+// Transfer the stand-by role to another online node.
-+func TestHandover_StandBy(t *testing.T) {
-+ n := 7
-+ apps := make([]*app.App, n)
-+
-+ for i := 0; i < n; i++ {
-+ addr := fmt.Sprintf("127.0.0.1:900%d", i+1)
-+ options := []app.Option{app.WithAddress(addr)}
-+ if i > 0 {
-+ options = append(options, app.WithCluster([]string{"127.0.0.1:9001"}))
-+ }
-+
-+ app, cleanup := newApp(t, options...)
-+ defer cleanup()
-+
-+ require.NoError(t, app.Ready(context.Background()))
-+
-+ apps[i] = app
-+ }
-+
-+ cli, err := apps[0].Leader(context.Background())
-+ require.NoError(t, err)
-+ defer cli.Close()
-+
-+ cluster, err := cli.Cluster(context.Background())
-+ require.NoError(t, err)
-+
-+ assert.Equal(t, client.Voter, cluster[0].Role)
-+ assert.Equal(t, client.Voter, cluster[1].Role)
-+ assert.Equal(t, client.Voter, cluster[2].Role)
-+ assert.Equal(t, client.StandBy, cluster[3].Role)
-+ assert.Equal(t, client.StandBy, cluster[4].Role)
-+ assert.Equal(t, client.StandBy, cluster[5].Role)
-+ assert.Equal(t, client.Spare, cluster[6].Role)
-+
-+ require.NoError(t, apps[4].Handover(context.Background()))
-+
-+ cluster, err = cli.Cluster(context.Background())
-+ require.NoError(t, err)
-+
-+ assert.Equal(t, client.Voter, cluster[0].Role)
-+ assert.Equal(t, client.Voter, cluster[1].Role)
-+ assert.Equal(t, client.Voter, cluster[2].Role)
-+ assert.Equal(t, client.StandBy, cluster[3].Role)
-+ assert.Equal(t, client.Spare, cluster[4].Role)
-+ assert.Equal(t, client.StandBy, cluster[5].Role)
-+ assert.Equal(t, client.StandBy, cluster[6].Role)
-+}
-+
-+// Transfer leadership and voting rights to another node.
-+func TestHandover_TransferLeadership(t *testing.T) {
-+ n := 4
-+ apps := make([]*app.App, n)
-+
-+ for i := 0; i < n; i++ {
-+ addr := fmt.Sprintf("127.0.0.1:900%d", i+1)
-+ options := []app.Option{app.WithAddress(addr)}
-+ if i > 0 {
-+ options = append(options, app.WithCluster([]string{"127.0.0.1:9001"}))
-+ }
-+
-+ app, cleanup := newApp(t, options...)
-+ defer cleanup()
-+
-+ require.NoError(t, app.Ready(context.Background()))
-+
-+ apps[i] = app
-+ }
-+
-+ cli, err := apps[0].Leader(context.Background())
-+ require.NoError(t, err)
-+ defer cli.Close()
-+
-+ leader, err := cli.Leader(context.Background())
-+ require.NoError(t, err)
-+
-+ require.NotNil(t, leader)
-+ require.Equal(t, apps[0].ID(), leader.ID)
-+ require.NoError(t, apps[0].Handover(context.Background()))
-+
-+ cli, err = apps[0].Leader(context.Background())
-+ require.NoError(t, err)
-+ defer cli.Close()
-+
-+ leader, err = cli.Leader(context.Background())
-+ require.NoError(t, err)
-+
-+ assert.NotEqual(t, apps[0].ID(), leader.ID)
-+
-+ cluster, err := cli.Cluster(context.Background())
-+ require.NoError(t, err)
-+
-+ assert.Equal(t, client.Spare, cluster[0].Role)
-+ assert.Equal(t, client.Voter, cluster[1].Role)
-+ assert.Equal(t, client.Voter, cluster[2].Role)
-+ assert.Equal(t, client.Voter, cluster[3].Role)
-+}
-+
-+// If a voter goes offline, another node takes its place.
-+func TestRolesAdjustment_ReplaceVoter(t *testing.T) {
-+ n := 4
-+ apps := make([]*app.App, n)
-+ cleanups := make([]func(), n)
-+
-+ for i := 0; i < n; i++ {
-+ addr := fmt.Sprintf("127.0.0.1:900%d", i+1)
-+ options := []app.Option{
-+ app.WithAddress(addr),
-+ app.WithRolesAdjustmentFrequency(500 * time.Millisecond),
-+ }
-+ if i > 0 {
-+ options = append(options, app.WithCluster([]string{"127.0.0.1:9001"}))
-+ }
-+
-+ app, cleanup := newApp(t, options...)
-+
-+ require.NoError(t, app.Ready(context.Background()))
-+
-+ apps[i] = app
-+ cleanups[i] = cleanup
-+ }
-+
-+ defer cleanups[0]()
-+ defer cleanups[1]()
-+ defer cleanups[3]()
-+
-+ // A voter goes offline.
-+ cleanups[2]()
-+
-+ time.Sleep(2 * time.Second)
-+
-+ cli, err := apps[0].Leader(context.Background())
-+ require.NoError(t, err)
-+ defer cli.Close()
-+
-+ cluster, err := cli.Cluster(context.Background())
-+ require.NoError(t, err)
-+
-+ assert.Equal(t, client.Voter, cluster[0].Role)
-+ assert.Equal(t, client.Voter, cluster[1].Role)
-+ assert.Equal(t, client.Spare, cluster[2].Role)
-+ assert.Equal(t, client.Voter, cluster[3].Role)
-+}
-+
-+// If a voter goes offline, another node takes its place. If possible, pick a
-+// voter from a failure domain which differs from the one of the two other
-+// voters.
-+func TestRolesAdjustment_ReplaceVoterHonorFailureDomain(t *testing.T) {
-+ n := 6
-+ apps := make([]*app.App, n)
-+ cleanups := make([]func(), n)
-+
-+ for i := 0; i < n; i++ {
-+ addr := fmt.Sprintf("127.0.0.1:900%d", i+1)
-+ options := []app.Option{
-+ app.WithAddress(addr),
-+ app.WithRolesAdjustmentFrequency(500 * time.Millisecond),
-+ app.WithFailureDomain(uint64(i % 3)),
-+ }
-+ if i > 0 {
-+ options = append(options, app.WithCluster([]string{"127.0.0.1:9001"}))
-+ }
-+
-+ app, cleanup := newApp(t, options...)
-+
-+ require.NoError(t, app.Ready(context.Background()))
-+
-+ apps[i] = app
-+ cleanups[i] = cleanup
-+ }
-+
-+ defer cleanups[0]()
-+ defer cleanups[1]()
-+ defer cleanups[3]()
-+ defer cleanups[4]()
-+ defer cleanups[5]()
-+
-+ // A voter in failure domain 2 goes offline.
-+ cleanups[2]()
-+
-+ time.Sleep(2 * time.Second)
-+
-+ cli, err := apps[0].Leader(context.Background())
-+ require.NoError(t, err)
-+ defer cli.Close()
-+
-+ cluster, err := cli.Cluster(context.Background())
-+ require.NoError(t, err)
-+
-+ // The replacement was picked in the same failure domain.
-+ assert.Equal(t, client.Voter, cluster[0].Role)
-+ assert.Equal(t, client.Voter, cluster[1].Role)
-+ assert.Equal(t, client.Spare, cluster[2].Role)
-+ assert.Equal(t, client.StandBy, cluster[3].Role)
-+ assert.Equal(t, client.StandBy, cluster[4].Role)
-+ assert.Equal(t, client.Voter, cluster[5].Role)
-+}
-+
-+// If a voter goes offline, another node takes its place. Preference will be
-+// given to candidates with lower weights.
-+func TestRolesAdjustment_ReplaceVoterHonorWeight(t *testing.T) {
-+ n := 6
-+ apps := make([]*app.App, n)
-+ cleanups := make([]func(), n)
-+
-+ for i := 0; i < n; i++ {
-+ addr := fmt.Sprintf("127.0.0.1:900%d", i+1)
-+ options := []app.Option{
-+ app.WithAddress(addr),
-+ app.WithRolesAdjustmentFrequency(500 * time.Millisecond),
-+ }
-+ if i > 0 {
-+ options = append(options, app.WithCluster([]string{"127.0.0.1:9001"}))
-+ }
-+
-+ app, cleanup := newApp(t, options...)
-+
-+ require.NoError(t, app.Ready(context.Background()))
-+
-+ apps[i] = app
-+ cleanups[i] = cleanup
-+ }
-+
-+ defer cleanups[0]()
-+ defer cleanups[1]()
-+ defer cleanups[3]()
-+ defer cleanups[4]()
-+ defer cleanups[5]()
-+
-+ // A voter in failure domain 2 goes offline.
-+ cleanups[2]()
-+
-+ cli, err := apps[3].Client(context.Background())
-+ require.NoError(t, err)
-+ require.NoError(t, cli.Weight(context.Background(), uint64(15)))
-+ defer cli.Close()
-+
-+ cli, err = apps[4].Client(context.Background())
-+ require.NoError(t, err)
-+ require.NoError(t, cli.Weight(context.Background(), uint64(5)))
-+ defer cli.Close()
-+
-+ cli, err = apps[5].Client(context.Background())
-+ require.NoError(t, err)
-+ require.NoError(t, cli.Weight(context.Background(), uint64(10)))
-+ defer cli.Close()
-+
-+ time.Sleep(2 * time.Second)
-+
-+ cli, err = apps[0].Leader(context.Background())
-+ require.NoError(t, err)
-+ defer cli.Close()
-+
-+ cluster, err := cli.Cluster(context.Background())
-+ require.NoError(t, err)
-+
-+ // The stand-by with the lowest weight was picked.
-+ assert.Equal(t, client.Voter, cluster[0].Role)
-+ assert.Equal(t, client.Voter, cluster[1].Role)
-+ assert.Equal(t, client.Spare, cluster[2].Role)
-+ assert.Equal(t, client.StandBy, cluster[3].Role)
-+ assert.Equal(t, client.Voter, cluster[4].Role)
-+ assert.Equal(t, client.StandBy, cluster[5].Role)
-+}
-+
-+// If a voter goes offline, but no another node can its place, then nothing
-+// chagnes.
-+func TestRolesAdjustment_CantReplaceVoter(t *testing.T) {
-+ n := 4
-+ apps := make([]*app.App, n)
-+ cleanups := make([]func(), n)
-+
-+ for i := 0; i < n; i++ {
-+ addr := fmt.Sprintf("127.0.0.1:900%d", i+1)
-+ options := []app.Option{
-+ app.WithAddress(addr),
-+ app.WithRolesAdjustmentFrequency(500 * time.Millisecond),
-+ }
-+ if i > 0 {
-+ options = append(options, app.WithCluster([]string{"127.0.0.1:9001"}))
-+ }
-+
-+ app, cleanup := newApp(t, options...)
-+
-+ require.NoError(t, app.Ready(context.Background()))
-+
-+ apps[i] = app
-+ cleanups[i] = cleanup
-+ }
-+
-+ defer cleanups[0]()
-+ defer cleanups[1]()
-+
-+ // A voter and a spare go offline.
-+ cleanups[3]()
-+ cleanups[2]()
-+
-+ time.Sleep(2 * time.Second)
-+
-+ cli, err := apps[0].Leader(context.Background())
-+ require.NoError(t, err)
-+ defer cli.Close()
-+
-+ cluster, err := cli.Cluster(context.Background())
-+ require.NoError(t, err)
-+
-+ assert.Equal(t, client.Voter, cluster[0].Role)
-+ assert.Equal(t, client.Voter, cluster[1].Role)
-+ assert.Equal(t, client.Voter, cluster[2].Role)
-+ assert.Equal(t, client.StandBy, cluster[3].Role)
-+}
-+
-+// If a stand-by goes offline, another node takes its place.
-+func TestRolesAdjustment_ReplaceStandBy(t *testing.T) {
-+ n := 7
-+ apps := make([]*app.App, n)
-+ cleanups := make([]func(), n)
-+
-+ for i := 0; i < n; i++ {
-+ addr := fmt.Sprintf("127.0.0.1:900%d", i+1)
-+ options := []app.Option{
-+ app.WithAddress(addr),
-+ app.WithRolesAdjustmentFrequency(500 * time.Millisecond),
-+ }
-+ if i > 0 {
-+ options = append(options, app.WithCluster([]string{"127.0.0.1:9001"}))
-+ }
-+
-+ app, cleanup := newApp(t, options...)
-+
-+ require.NoError(t, app.Ready(context.Background()))
-+
-+ apps[i] = app
-+ cleanups[i] = cleanup
-+ }
-+
-+ defer cleanups[0]()
-+ defer cleanups[1]()
-+ defer cleanups[2]()
-+ defer cleanups[3]()
-+ defer cleanups[5]()
-+ defer cleanups[6]()
-+
-+ // A stand-by goes offline.
-+ cleanups[4]()
-+
-+ time.Sleep(2 * time.Second)
-+
-+ cli, err := apps[0].Leader(context.Background())
-+ require.NoError(t, err)
-+ defer cli.Close()
-+
-+ cluster, err := cli.Cluster(context.Background())
-+ require.NoError(t, err)
-+
-+ assert.Equal(t, client.Voter, cluster[0].Role)
-+ assert.Equal(t, client.Voter, cluster[1].Role)
-+ assert.Equal(t, client.Voter, cluster[2].Role)
-+ assert.Equal(t, client.StandBy, cluster[3].Role)
-+ assert.Equal(t, client.Spare, cluster[4].Role)
-+ assert.Equal(t, client.StandBy, cluster[5].Role)
-+ assert.Equal(t, client.StandBy, cluster[6].Role)
-+}
-+
-+// If a stand-by goes offline, another node takes its place. If possible, pick
-+// a stand-by from a failure domain which differs from the one of the two other
-+// stand-bys.
-+func TestRolesAdjustment_ReplaceStandByHonorFailureDomains(t *testing.T) {
-+ n := 9
-+ apps := make([]*app.App, n)
-+ cleanups := make([]func(), n)
-+
-+ for i := 0; i < n; i++ {
-+ addr := fmt.Sprintf("127.0.0.1:900%d", i+1)
-+ options := []app.Option{
-+ app.WithAddress(addr),
-+ app.WithRolesAdjustmentFrequency(500 * time.Millisecond),
-+ app.WithFailureDomain(uint64(i % 3)),
-+ }
-+ if i > 0 {
-+ options = append(options, app.WithCluster([]string{"127.0.0.1:9001"}))
-+ }
-+
-+ app, cleanup := newApp(t, options...)
-+
-+ require.NoError(t, app.Ready(context.Background()))
-+
-+ apps[i] = app
-+ cleanups[i] = cleanup
-+ }
-+
-+ defer cleanups[0]()
-+ defer cleanups[1]()
-+ defer cleanups[2]()
-+ defer cleanups[3]()
-+ defer cleanups[5]()
-+ defer cleanups[6]()
-+ defer cleanups[7]()
-+ defer cleanups[8]()
-+
-+ // A stand-by from failure domain 1 goes offline.
-+ cleanups[4]()
-+
-+ time.Sleep(2 * time.Second)
-+
-+ cli, err := apps[0].Leader(context.Background())
-+ require.NoError(t, err)
-+ defer cli.Close()
-+
-+ cluster, err := cli.Cluster(context.Background())
-+ require.NoError(t, err)
-+
-+ // The replacement was picked in the same failure domain.
-+ assert.Equal(t, client.Voter, cluster[0].Role)
-+ assert.Equal(t, client.Voter, cluster[1].Role)
-+ assert.Equal(t, client.Voter, cluster[2].Role)
-+ assert.Equal(t, client.StandBy, cluster[3].Role)
-+ assert.Equal(t, client.Spare, cluster[4].Role)
-+ assert.Equal(t, client.StandBy, cluster[5].Role)
-+ assert.Equal(t, client.Spare, cluster[6].Role)
-+ assert.Equal(t, client.StandBy, cluster[7].Role)
-+ assert.Equal(t, client.Spare, cluster[8].Role)
-+}
-+
-+// Open a database on a fresh one-node cluster.
-+func TestOpen(t *testing.T) {
-+ app, cleanup := newApp(t)
-+ defer cleanup()
-+
-+ db, err := app.Open(context.Background(), "test")
-+ require.NoError(t, err)
-+ defer db.Close()
-+
-+ _, err = db.ExecContext(context.Background(), "CREATE TABLE foo(n INT)")
-+ assert.NoError(t, err)
-+}
-+
-+// Test client connections dropping uncleanly.
-+func TestProxy_Error(t *testing.T) {
-+ cert, pool := loadCert(t)
-+ dial := client.DialFuncWithTLS(client.DefaultDialFunc, app.SimpleDialTLSConfig(cert, pool))
-+
-+ _, cleanup := newApp(t, app.WithAddress("127.0.0.1:9000"))
-+ defer cleanup()
-+
-+ // Simulate a client which writes the protocol header, then a Leader
-+ // request and finally drops before reading the response.
-+ conn, err := dial(context.Background(), "127.0.0.1:9000")
-+ require.NoError(t, err)
-+
-+ protocol := make([]byte, 8)
-+ binary.LittleEndian.PutUint64(protocol, uint64(1))
-+
-+ n, err := conn.Write(protocol)
-+ require.NoError(t, err)
-+ assert.Equal(t, n, 8)
-+
-+ header := make([]byte, 8)
-+ binary.LittleEndian.PutUint32(header[0:], 1)
-+ header[4] = 0
-+ header[5] = 0
-+ binary.LittleEndian.PutUint16(header[6:], 0)
-+
-+ n, err = conn.Write(header)
-+ require.NoError(t, err)
-+ assert.Equal(t, n, 8)
-+
-+ body := make([]byte, 8)
-+ n, err = conn.Write(body)
-+ require.NoError(t, err)
-+ assert.Equal(t, n, 8)
-+
-+ time.Sleep(100 * time.Millisecond)
-+ conn.Close()
-+ time.Sleep(250 * time.Millisecond)
-+}
-+
-+// If the given context is cancelled before initial tasks are completed, an
-+// error is returned.
-+func TestReady_Cancel(t *testing.T) {
-+ app, cleanup := newApp(t, app.WithAddress("127.0.0.1:9002"), app.WithCluster([]string{"127.0.0.1:9001"}))
-+ defer cleanup()
-+
-+ ctx, cancel := context.WithTimeout(context.Background(), 50*time.Millisecond)
-+ defer cancel()
-+
-+ err := app.Ready(ctx)
-+
-+ assert.Equal(t, ctx.Err(), err)
-+}
-+
-+func newApp(t *testing.T, options ...app.Option) (*app.App, func()) {
-+ t.Helper()
-+
-+ dir, dirCleanup := newDir(t)
-+
-+ app, appCleanup := newAppWithDir(t, dir, options...)
-+
-+ cleanup := func() {
-+ appCleanup()
-+ dirCleanup()
-+ }
-+
-+ return app, cleanup
-+}
-+
-+func newAppWithDir(t *testing.T, dir string, options ...app.Option) (*app.App, func()) {
-+ t.Helper()
-+
-+ appIndex++
-+
-+ index := appIndex
-+ log := func(l client.LogLevel, format string, a ...interface{}) {
-+ format = fmt.Sprintf("%s - %d: %s: %s", time.Now().Format("15:04:01.000"), index, l.String(), format)
-+ t.Logf(format, a...)
-+ }
-+
-+ cert, pool := loadCert(t)
-+ options = append(options, app.WithLogFunc(log), app.WithTLS(app.SimpleTLSConfig(cert, pool)))
-+
-+ app, err := app.New(dir, options...)
-+ require.NoError(t, err)
-+
-+ cleanup := func() {
-+ require.NoError(t, app.Close())
-+ }
-+
-+ return app, cleanup
-+}
-+
-+// Loads the test TLS certificates.
-+func loadCert(t *testing.T) (tls.Certificate, *x509.CertPool) {
-+ t.Helper()
-+
-+ crt := filepath.Join("testdata", "cluster.crt")
-+ key := filepath.Join("testdata", "cluster.key")
-+
-+ keypair, err := tls.LoadX509KeyPair(crt, key)
-+ require.NoError(t, err)
-+
-+ data, err := ioutil.ReadFile(crt)
-+ require.NoError(t, err)
-+
-+ pool := x509.NewCertPool()
-+ if !pool.AppendCertsFromPEM(data) {
-+ t.Fatal("bad certificate")
-+ }
-+
-+ return keypair, pool
-+}
-+
-+var appIndex int
-+
-+// Return a new temporary directory.
-+func newDir(t *testing.T) (string, func()) {
-+ t.Helper()
-+
-+ dir, err := ioutil.TempDir("", "dqlite-app-test-")
-+ assert.NoError(t, err)
-+
-+ cleanup := func() {
-+ os.RemoveAll(dir)
-+ }
-+
-+ return dir, cleanup
-+}
-diff --git a/vendor/github.com/canonical/go-dqlite/app/dial.go b/vendor/github.com/canonical/go-dqlite/app/dial.go
-new file mode 100644
-index 00000000000..fbe75e7248e
---- /dev/null
-+++ b/vendor/github.com/canonical/go-dqlite/app/dial.go
-@@ -0,0 +1,41 @@
-+package app
-+
-+import (
-+ "context"
-+ "crypto/tls"
-+ "net"
-+
-+ "github.com/canonical/go-dqlite/client"
-+ "github.com/pkg/errors"
-+)
-+
-+// Like client.DialFuncWithTLS but also starts the proxy, since the raft
-+// connect function only supports Unix and TCP connections.
-+func makeNodeDialFunc(config *tls.Config) client.DialFunc {
-+ dial := func(ctx context.Context, addr string) (net.Conn, error) {
-+ clonedConfig := config.Clone()
-+ if len(clonedConfig.ServerName) == 0 {
-+
-+ remoteIP, _, err := net.SplitHostPort(addr)
-+ if err != nil {
-+ return nil, err
-+ }
-+ clonedConfig.ServerName = remoteIP
-+ }
-+ dialer := &net.Dialer{}
-+ conn, err := dialer.DialContext(ctx, "tcp", addr)
-+ if err != nil {
-+ return nil, err
-+ }
-+ goUnix, cUnix, err := socketpair()
-+ if err != nil {
-+ return nil, errors.Wrap(err, "create pair of Unix sockets")
-+ }
-+
-+ go proxy(context.Background(), conn, goUnix, clonedConfig)
-+
-+ return cUnix, nil
-+ }
-+
-+ return dial
-+}
-diff --git a/vendor/github.com/canonical/go-dqlite/app/example_test.go b/vendor/github.com/canonical/go-dqlite/app/example_test.go
-new file mode 100644
-index 00000000000..43ce237ba02
---- /dev/null
-+++ b/vendor/github.com/canonical/go-dqlite/app/example_test.go
-@@ -0,0 +1,112 @@
-+package app_test
-+
-+import (
-+ "fmt"
-+ "io/ioutil"
-+ "os"
-+
-+ "github.com/canonical/go-dqlite/app"
-+)
-+
-+// To start the first node of a dqlite cluster for the first time, its network
-+// address should be specified using the app.WithAddress() option.
-+//
-+// When the node is restarted a second time, the app.WithAddress() option might
-+// be omitted, since the node address will be persisted in the info.yaml file.
-+//
-+// The very first node has always the same ID (dqlite.BootstrapID).
-+func Example() {
-+ dir, err := ioutil.TempDir("", "dqlite-app-example-")
-+ if err != nil {
-+ return
-+ }
-+ defer os.RemoveAll(dir)
-+
-+ node, err := app.New(dir, app.WithAddress("127.0.0.1:9001"))
-+ if err != nil {
-+ return
-+ }
-+
-+ fmt.Printf("0x%x %s\n", node.ID(), node.Address())
-+ // Output: 0x2dc171858c3155be 127.0.0.1:9001
-+
-+ if err := node.Close(); err != nil {
-+ return
-+ }
-+
-+ node, err = app.New(dir)
-+ if err != nil {
-+ return
-+ }
-+ defer node.Close()
-+
-+ fmt.Printf("0x%x %s\n", node.ID(), node.Address())
-+ // Output: 0x2dc171858c3155be 127.0.0.1:9001
-+ // 0x2dc171858c3155be 127.0.0.1:9001
-+}
-+
-+// After starting the very first node, a second node can be started by passing
-+// the address of the first node using the app.WithCluster() option.
-+//
-+// In general additional nodes can be started by specifying one or more
-+// addresses of existing nodes using the app.Cluster() option.
-+//
-+// When the node is restarted a second time, the app.WithCluster() option might
-+// be omitted, since the node has already joined the cluster.
-+//
-+// Each additional node will be automatically assigned a unique ID.
-+func ExampleWithCluster() {
-+ dir1, err := ioutil.TempDir("", "dqlite-app-example-")
-+ if err != nil {
-+ return
-+ }
-+ defer os.RemoveAll(dir1)
-+
-+ dir2, err := ioutil.TempDir("", "dqlite-app-example-")
-+ if err != nil {
-+ return
-+ }
-+ defer os.RemoveAll(dir2)
-+
-+ dir3, err := ioutil.TempDir("", "dqlite-app-example-")
-+ if err != nil {
-+ return
-+ }
-+ defer os.RemoveAll(dir3)
-+
-+ node1, err := app.New(dir1, app.WithAddress("127.0.0.1:9001"))
-+ if err != nil {
-+ return
-+ }
-+ defer node1.Close()
-+
-+ node2, err := app.New(dir2, app.WithAddress("127.0.0.1:9002"), app.WithCluster([]string{"127.0.0.1:9001"}))
-+ if err != nil {
-+ return
-+ }
-+ defer node2.Close()
-+
-+ node3, err := app.New(dir3, app.WithAddress("127.0.0.1:9003"), app.WithCluster([]string{"127.0.0.1:9001"}))
-+ if err != nil {
-+ return
-+ }
-+
-+ fmt.Println(node1.ID() != node2.ID(), node1.ID() != node3.ID(), node2.ID() != node3.ID())
-+ // Output: true true true
-+
-+ // Restart the third node, the only argument we need to pass to
-+ // app.New() is its dir.
-+ id3 := node3.ID()
-+ if err := node3.Close(); err != nil {
-+ return
-+ }
-+
-+ node3, err = app.New(dir3)
-+ if err != nil {
-+ return
-+ }
-+ defer node3.Close()
-+
-+ fmt.Println(node3.ID() == id3, node3.Address())
-+ // true 127.0.0.1:9003
-+}
-diff --git a/vendor/github.com/canonical/go-dqlite/app/files.go b/vendor/github.com/canonical/go-dqlite/app/files.go
-new file mode 100644
-index 00000000000..758882e8195
---- /dev/null
-+++ b/vendor/github.com/canonical/go-dqlite/app/files.go
-@@ -0,0 +1,80 @@
-+package app
-+
-+import (
-+ "fmt"
-+ "io/ioutil"
-+ "os"
-+ "path/filepath"
-+
-+ "github.com/ghodss/yaml"
-+)
-+
-+const (
-+ // Store the node ID and address.
-+ infoFile = "info.yaml"
-+
-+ // The node store file.
-+ storeFile = "cluster.yaml"
-+
-+ // This is a "flag" file to signal when a brand new node needs to join
-+ // the cluster. In case the node doesn't successfully make it to join
-+ // the cluster first time it's started, it will re-try the next time.
-+ joinFile = "join"
-+)
-+
-+// Return true if the given file exists in the given directory.
-+func fileExists(dir, file string) (bool, error) {
-+ path := filepath.Join(dir, file)
-+
-+ if _, err := os.Stat(path); err != nil {
-+ if !os.IsNotExist(err) {
-+ return false, fmt.Errorf("check if %s exists: %w", file, err)
-+ }
-+ return false, nil
-+ }
-+
-+ return true, nil
-+}
-+
-+// Write a file in the given directory.
-+func fileWrite(dir, file string, data []byte) error {
-+ path := filepath.Join(dir, file)
-+
-+ if err := ioutil.WriteFile(path, data, 0600); err != nil {
-+ return fmt.Errorf("write %s: %w", file, err)
-+ }
-+
-+ return nil
-+}
-+
-+// Marshal the given object as YAML into the given file.
-+func fileMarshal(dir, file string, object interface{}) error {
-+ data, err := yaml.Marshal(object)
-+ if err != nil {
-+ return fmt.Errorf("marshall %s: %w", file, err)
-+ }
-+ if err := fileWrite(dir, file, data); err != nil {
-+ return err
-+ }
-+ return nil
-+}
-+
-+// Unmarshal the given YAML file into the given object.
-+func fileUnmarshal(dir, file string, object interface{}) error {
-+ path := filepath.Join(dir, file)
-+
-+ data, err := ioutil.ReadFile(path)
-+ if err != nil {
-+ return fmt.Errorf("read %s: %w", file, err)
-+ }
-+ if err := yaml.Unmarshal(data, object); err != nil {
-+ return fmt.Errorf("unmarshall %s: %w", file, err)
-+ }
-+
-+ return nil
-+}
-+
-+// Remove a file in the given directory.
-+func fileRemove(dir, file string) error {
-+ return os.Remove(filepath.Join(dir, file))
-+}
-diff --git a/vendor/github.com/canonical/go-dqlite/app/options.go b/vendor/github.com/canonical/go-dqlite/app/options.go
-new file mode 100644
-index 00000000000..29ecf3f8896
---- /dev/null
-+++ b/vendor/github.com/canonical/go-dqlite/app/options.go
-@@ -0,0 +1,207 @@
-+package app
-+
-+import (
-+ "crypto/tls"
-+ "fmt"
-+ "log"
-+ "net"
-+ "time"
-+
-+ "github.com/canonical/go-dqlite/client"
-+)
-+
-+// Option can be used to tweak app parameters.
-+type Option func(*options)
-+
-+// WithAddress sets the network address of the application node.
-+//
-+// Other application nodes must be able to connect to this application node
-+// using the given address.
-+//
-+// If the application node is not the first one in the cluster, the address
-+// must match the value that was passed to the App.Add() method upon
-+// registration.
-+//
-+// If not given the first non-loopback IP address of any of the system network
-+// interfaces will be used, with port 9000.
-+//
-+// The address must be stable across application restarts.
-+func WithAddress(address string) Option {
-+ return func(options *options) {
-+ options.Address = address
-+ }
-+}
-+
-+// WithCluster must be used when starting a newly added application node for
-+// the first time.
-+//
-+// It should contain the addresses of one or more applications nodes which are
-+// already part of the cluster.
-+func WithCluster(cluster []string) Option {
-+ return func(options *options) {
-+ options.Cluster = cluster
-+ }
-+}
-+
-+// WithTLS enables TLS encryption of network traffic.
-+//
-+// The "listen" parameter must hold the TLS configuration to use when accepting
-+// incoming connections clients or application nodes.
-+//
-+// The "dial" parameter must hold the TLS configuration to use when
-+// establishing outgoing connections to other application nodes.
-+func WithTLS(listen *tls.Config, dial *tls.Config) Option {
-+ return func(options *options) {
-+ options.TLS = &tlsSetup{
-+ Listen: listen,
-+ Dial: dial,
-+ }
-+ }
-+}
-+
-+// WithVoters sets the number of nodes in the cluster that should have the
-+// Voter role.
-+//
-+// When a new node is added to the cluster or it is started again after a
-+// shutdown it will be assigned the Voter role in case the current number of
-+// voters is below n.
-+//
-+// Similarly when a node with the Voter role is shutdown gracefully by calling
-+// the Handover() method, it will try to transfer its Voter role to another
-+// non-Voter node, if one is available.
-+//
-+// All App instances in a cluster must be created with the same WithVoters
-+// setting.
-+//
-+// The given value must be an odd number greater than one.
-+//
-+// The default value is 3.
-+func WithVoters(n int) Option {
-+ return func(options *options) {
-+ options.Voters = n
-+ }
-+}
-+
-+// WithStandBys sets the number of nodes in the cluster that should have the
-+// StandBy role.
-+//
-+// When a new node is added to the cluster or it is started again after a
-+// shutdown it will be assigned the StandBy role in case there are already
-+// enough online voters, but the current number of stand-bys is below n.
-+//
-+// Similarly when a node with the StandBy role is shutdown gracefully by
-+// calling the Handover() method, it will try to transfer its StandBy role to
-+// another non-StandBy node, if one is available.
-+//
-+// All App instances in a cluster must be created with the same WithStandBys
-+// setting.
-+//
-+// The given value must be an odd number.
-+//
-+// The default value is 3.
-+func WithStandBys(n int) Option {
-+ return func(options *options) {
-+ options.StandBys = n
-+ }
-+}
-+
-+// WithRolesAdjustmentFrequency sets the frequency at which the current cluster
-+// leader will check if the roles of the various nodes in the cluster matches
-+// the desired setup and perform promotions/demotions to adjust the situation
-+// if needed.
-+//
-+// The default is 30 seconds.
-+func WithRolesAdjustmentFrequency(frequency time.Duration) Option {
-+ return func(options *options) {
-+ options.RolesAdjustmentFrequency = frequency
-+ }
-+}
-+
-+// WithLogFunc sets a custom log function.
-+func WithLogFunc(log client.LogFunc) Option {
-+ return func(options *options) {
-+ options.Log = log
-+ }
-+}
-+
-+// WithFailureDomain sets the node's failure domain.
-+//
-+// Failure domains are taken into account when deciding which nodes to promote
-+// to Voter or StandBy when needed.
-+func WithFailureDomain(code uint64) Option {
-+ return func(options *options) {
-+ options.FailureDomain = code
-+ }
-+}
-+
-+// WithNetworkLatency sets the average one-way network latency.
-+func WithNetworkLatency(latency time.Duration) Option {
-+ return func(options *options) {
-+ options.NetworkLatency = latency
-+ }
-+}
-+
-+type tlsSetup struct {
-+ Listen *tls.Config
-+ Dial *tls.Config
-+}
-+
-+type options struct {
-+ Address string
-+ Cluster []string
-+ Log client.LogFunc
-+ TLS *tlsSetup
-+ Voters int
-+ StandBys int
-+ RolesAdjustmentFrequency time.Duration
-+ FailureDomain uint64
-+ NetworkLatency time.Duration
-+}
-+
-+// Create a options object with sane defaults.
-+func defaultOptions() *options {
-+ return &options{
-+ Log: defaultLogFunc,
-+ Voters: 3,
-+ StandBys: 3,
-+ RolesAdjustmentFrequency: 30 * time.Second,
-+ }
-+}
-+
-+func isLoopback(iface *net.Interface) bool {
-+ return int(iface.Flags&net.FlagLoopback) > 0
-+}
-+
-+func defaultAddress() string {
-+ ifaces, err := net.Interfaces()
-+ if err != nil {
-+ return ""
-+ }
-+ for _, iface := range ifaces {
-+ if isLoopback(&iface) {
-+ continue
-+ }
-+ addrs, err := iface.Addrs()
-+ if err != nil {
-+ continue
-+ }
-+ if len(addrs) == 0 {
-+ continue
-+ }
-+ addr, ok := addrs[0].(*net.IPNet)
-+ if !ok {
-+ continue
-+ }
-+ return addr.IP.String() + ":9000"
-+ }
-+ return ""
-+}
-+
-+func defaultLogFunc(l client.LogLevel, format string, a ...interface{}) {
-+ // Log only error messages
-+ if l != client.LogError {
-+ return
-+ }
-+ msg := fmt.Sprintf("["+l.String()+"]"+" dqlite: "+format, a...)
-+ log.Printf(msg)
-+}
-diff --git a/vendor/github.com/canonical/go-dqlite/app/proxy.go b/vendor/github.com/canonical/go-dqlite/app/proxy.go
-new file mode 100644
-index 00000000000..38870551aaf
---- /dev/null
-+++ b/vendor/github.com/canonical/go-dqlite/app/proxy.go
-@@ -0,0 +1,167 @@
-+package app
-+
-+import (
-+ "context"
-+ "crypto/tls"
-+ "fmt"
-+ "io"
-+ "net"
-+ "os"
-+ "syscall"
-+ "time"
-+)
-+
-+// Copies data between a remote TCP network connection (possibly with TLS) and
-+// a local unix socket.
-+//
-+// The function will return if one of the following events occurs:
-+//
-+// - the other end of the remote network socket closes the connection
-+// - the other end of the local unix socket closes the connection
-+// - the context is cancelled
-+// - an error occurs when writing or reading data
-+//
-+// In case of errors, details are returned.
-+func proxy(ctx context.Context, remote net.Conn, local net.Conn, config *tls.Config) error {
-+ tcp := remote.(*net.TCPConn)
-+
-+ if err := setKeepalive(tcp); err != nil {
-+ return err
-+ }
-+
-+ if config != nil {
-+ if config.ClientCAs != nil {
-+ remote = tls.Server(remote, config)
-+ } else {
-+ remote = tls.Client(remote, config)
-+ }
-+ }
-+
-+ remoteToLocal := make(chan error, 0)
-+ localToRemote := make(chan error, 0)
-+
-+ // Start copying data back and forth until either the client or the
-+ // server get closed or hit an error.
-+ go func() {
-+ _, err := io.Copy(local, remote)
-+ remoteToLocal <- err
-+ }()
-+
-+ go func() {
-+ _, err := io.Copy(remote, local)
-+ localToRemote <- err
-+ }()
-+
-+ errs := make([]error, 2)
-+
-+ select {
-+ case <-ctx.Done():
-+ // Force closing, ignore errors.
-+ remote.Close()
-+ local.Close()
-+ <-remoteToLocal
-+ <-localToRemote
-+ case err := <-remoteToLocal:
-+ if err != nil {
-+ errs[0] = fmt.Errorf("remote -> local: %v", err)
-+ }
-+ local.(*net.UnixConn).CloseRead()
-+ if err := <-localToRemote; err != nil {
-+ errs[1] = fmt.Errorf("local -> remote: %v", err)
-+ }
-+ remote.Close()
-+ local.Close()
-+ case err := <-localToRemote:
-+ if err != nil {
-+ errs[0] = fmt.Errorf("local -> remote: %v", err)
-+ }
-+ tcp.CloseRead()
-+ if err := <-remoteToLocal; err != nil {
-+ errs[1] = fmt.Errorf("remote -> local: %v", err)
-+ }
-+ local.Close()
-+
-+ }
-+
-+ if errs[0] != nil || errs[1] != nil {
-+ return proxyError{first: errs[0], second: errs[1]}
-+ }
-+
-+ return nil
-+}
-+
-+// Set TCP keepalive with 30 seconds idle time, 3 seconds retry interval with
-+// at most 3 retries.
-+//
-+// See https://thenotexpert.com/golang-tcp-keepalive/.
-+func setKeepalive(conn *net.TCPConn) error {
-+ conn.SetKeepAlive(true)
-+ conn.SetKeepAlivePeriod(time.Second * 30)
-+
-+ raw, err := conn.SyscallConn()
-+ if err != nil {
-+ return err
-+ }
-+
-+ raw.Control(
-+ func(ptr uintptr) {
-+ fd := int(ptr)
-+ // Number of probes.
-+ err = syscall.SetsockoptInt(fd, syscall.IPPROTO_TCP, syscall.TCP_KEEPCNT, 3)
-+ if err != nil {
-+ return
-+ }
-+ // Wait time after an unsuccessful probe.
-+ err = syscall.SetsockoptInt(fd, syscall.IPPROTO_TCP, syscall.TCP_KEEPINTVL, 3)
-+ if err != nil {
-+ return
-+ }
-+ })
-+ return err
-+}
-+
-+// Returns a pair of connected unix sockets.
-+func socketpair() (net.Conn, net.Conn, error) {
-+ fds, err := syscall.Socketpair(syscall.AF_LOCAL, syscall.SOCK_STREAM, 0)
-+ if err != nil {
-+ return nil, nil, err
-+ }
-+
-+ c1, err := fdToFileConn(fds[0])
-+ if err != nil {
-+ return nil, nil, err
-+ }
-+
-+ c2, err := fdToFileConn(fds[1])
-+ if err != nil {
-+ c1.Close()
-+ return nil, nil, err
-+ }
-+
-+ return c1, c2, err
-+}
-+
-+func fdToFileConn(fd int) (net.Conn, error) {
-+ f := os.NewFile(uintptr(fd), "")
-+ defer f.Close()
-+ return net.FileConn(f)
-+}
-+
-+type proxyError struct {
-+ first error
-+ second error
-+}
-+
-+func (e proxyError) Error() string {
-+ msg := ""
-+ if e.first != nil {
-+ msg += "first: " + e.first.Error()
-+ }
-+ if e.second != nil {
-+ if e.first != nil {
-+ msg += " "
-+ }
-+ msg += "second: " + e.second.Error()
-+ }
-+ return msg
-+}
-diff --git a/vendor/github.com/canonical/go-dqlite/app/roles.go b/vendor/github.com/canonical/go-dqlite/app/roles.go
-new file mode 100644
-index 00000000000..5b24bdf3c73
---- /dev/null
-+++ b/vendor/github.com/canonical/go-dqlite/app/roles.go
-@@ -0,0 +1,305 @@
-+package app
-+
-+import (
-+ "sort"
-+
-+ "github.com/canonical/go-dqlite/client"
-+)
-+
-+const minVoters = 3
-+
-+// RolesConfig can be used to tweak the algorithm implemented by RolesChanges.
-+type RolesConfig struct {
-+ Voters int // Target number of voters, 3 by default.
-+ StandBys int // Target number of stand-bys, 3 by default.
-+}
-+
-+// RolesChanges implements an algorithm to take decisions about which node
-+// should have which role in a cluster.
-+//
-+// You normally don't need to use this data structure since it's already
-+// transparently wired into the high-level App object. However this is exposed
-+// for users who don't want to use the high-level App object but still want to
-+// implement the same roles management algorithm.
-+type RolesChanges struct {
-+ // Algorithm configuration.
-+ Config RolesConfig
-+
-+ // Current state of the cluster. Each node in the cluster must be
-+ // present as a key in the map, and its value should be its associated
-+ // failure domain and weight metadata or nil if the node is currently
-+ // offline.
-+ State map[client.NodeInfo]*client.NodeMetadata
-+}
-+
-+// Assume decides if a node should assume a different role than the one it
-+// currently has. It should normally be run at node startup, where the
-+// algorithm might decide that the node should assume the Voter or Stand-By
-+// role in case there's a shortage of them.
-+//
-+// Return -1 in case no role change is needed.
-+func (c *RolesChanges) Assume(id uint64) client.NodeRole {
-+ // If the cluster is still too small, do nothing.
-+ if c.size() < minVoters {
-+ return -1
-+ }
-+
-+ node := c.get(id)
-+
-+ // If we are not in the cluster, it means we were removed, just do nothing.
-+ if node == nil {
-+ return -1
-+ }
-+
-+ // If we already have the Voter or StandBy role, there's nothing to do.
-+ if node.Role == client.Voter || node.Role == client.StandBy {
-+ return -1
-+ }
-+
-+ onlineVoters := c.list(client.Voter, true)
-+ onlineStandbys := c.list(client.StandBy, true)
-+
-+ // If we have already the desired number of online voters and
-+ // stand-bys, there's nothing to do.
-+ if len(onlineVoters) >= c.Config.Voters && len(onlineStandbys) >= c.Config.StandBys {
-+ return -1
-+ }
-+
-+ // Figure if we need to become stand-by or voter.
-+ role := client.StandBy
-+ if len(onlineVoters) < c.Config.Voters {
-+ role = client.Voter
-+ }
-+
-+ return role
-+}
-+
-+// Handover decides if a node should transfer its current role to another
-+// node. This is typically run when the node is shutting down and is hence going to be offline soon.
-+//
-+// Return the role that should be handed over and list of candidates that
-+// should receive it, in order of preference.
-+func (c *RolesChanges) Handover(id uint64) (client.NodeRole, []client.NodeInfo) {
-+ node := c.get(id)
-+
-+ // If we are not in the cluster, it means we were removed, just do nothing.
-+ if node == nil {
-+ return -1, nil
-+ }
-+
-+ // If we aren't a voter or a stand-by, there's nothing to do.
-+ if node.Role != client.Voter && node.Role != client.StandBy {
-+ return -1, nil
-+ }
-+
-+ // Make a list of all online nodes with the same role and get their
-+ // failure domains.
-+ peers := c.list(node.Role, true)
-+ for i := range peers {
-+ if peers[i].ID == node.ID {
-+ peers = append(peers[:i], peers[i+1:]...)
-+ break
-+ }
-+ }
-+ domains := c.failureDomains(peers)
-+
-+ // Online spare nodes are always candidates.
-+ candidates := c.list(client.Spare, true)
-+
-+ // Stand-by nodes are candidates if we need to transfer voting
-+ // rights, and they are preferred over spares.
-+ if node.Role == client.Voter {
-+ candidates = append(c.list(client.StandBy, true), candidates...)
-+ }
-+
-+ if len(candidates) == 0 {
-+ // No online node available to be promoted.
-+ return -1, nil
-+ }
-+
-+ c.sortCandidates(candidates, domains)
-+
-+ return node.Role, candidates
-+}
-+
-+// Adjust decides if there should be changes in the current roles.
-+//
-+// Return the role that should be assigned and a list of candidates that should
-+// assume it, in order of preference.
-+func (c *RolesChanges) Adjust(leader uint64) (client.NodeRole, []client.NodeInfo) {
-+ if c.size() == 1 {
-+ return -1, nil
-+ }
-+
-+ // If the cluster is too small, make sure we have just one voter (us).
-+ if c.size() < minVoters {
-+ for node := range c.State {
-+ if node.ID == leader || node.Role != client.Voter {
-+ continue
-+ }
-+ return client.Spare, []client.NodeInfo{node}
-+ }
-+ return -1, nil
-+ }
-+
-+ onlineVoters := c.list(client.Voter, true)
-+ onlineStandbys := c.list(client.StandBy, true)
-+ offlineVoters := c.list(client.Voter, false)
-+ offlineStandbys := c.list(client.StandBy, false)
-+
-+ // If we have exactly the desired number of voters and stand-bys, and they are all
-+ // online, we're good.
-+ if len(offlineVoters) == 0 && len(onlineVoters) == c.Config.Voters && len(offlineStandbys) == 0 && len(onlineStandbys) == c.Config.StandBys {
-+ return -1, nil
-+ }
-+
-+ // If we have less online voters than desired, let's try to promote
-+ // some other node.
-+ if n := len(onlineVoters); n < c.Config.Voters {
-+ candidates := c.list(client.StandBy, true)
-+ candidates = append(candidates, c.list(client.Spare, true)...)
-+
-+ if len(candidates) == 0 {
-+ return -1, nil
-+ }
-+
-+ domains := c.failureDomains(onlineVoters)
-+ c.sortCandidates(candidates, domains)
-+
-+ return client.Voter, candidates
-+ }
-+
-+ // If we have more online voters than desired, let's demote one of
-+ // them.
-+ if n := len(onlineVoters); n > c.Config.Voters {
-+ nodes := []client.NodeInfo{}
-+ for _, node := range onlineVoters {
-+ // Don't demote the leader.
-+ if node.ID == leader {
-+ continue
-+ }
-+ nodes = append(nodes, node)
-+ }
-+
-+ return client.Spare, nodes
-+ }
-+
-+ // If we have offline voters, let's demote one of them.
-+ if n := len(offlineVoters); n > 0 {
-+ return client.Spare, offlineVoters
-+ }
-+
-+ // If we have less online stand-bys than desired, let's try to promote
-+ // some other node.
-+ if n := len(onlineStandbys); n < c.Config.StandBys {
-+ candidates := c.list(client.Spare, true)
-+
-+ if len(candidates) == 0 {
-+ return -1, nil
-+ }
-+
-+ domains := c.failureDomains(onlineStandbys)
-+ c.sortCandidates(candidates, domains)
-+
-+ return client.StandBy, candidates
-+ }
-+
-+ // If we have more online stand-bys than desired, let's demote one of
-+ // them.
-+ if n := len(onlineStandbys); n > c.Config.StandBys {
-+ nodes := []client.NodeInfo{}
-+ for _, node := range onlineStandbys {
-+ // Don't demote the leader.
-+ if node.ID == leader {
-+ continue
-+ }
-+ nodes = append(nodes, node)
-+ }
-+
-+ return client.Spare, nodes
-+ }
-+
-+ // If we have offline stand-bys, let's demote one of them.
-+ if n := len(offlineStandbys); n > 0 {
-+ return client.Spare, offlineStandbys
-+ }
-+
-+ return -1, nil
-+}
-+
-+// Return the number of nodes il the cluster.
-+func (c *RolesChanges) size() int {
-+ return len(c.State)
-+}
-+
-+// Return information about the node with the given ID, or nil if no node
-+// matches.
-+func (c *RolesChanges) get(id uint64) *client.NodeInfo {
-+ for node := range c.State {
-+ if node.ID == id {
-+ return &node
-+ }
-+ }
-+ return nil
-+}
-+
-+// Return the online or offline nodes with the given role.
-+func (c *RolesChanges) list(role client.NodeRole, online bool) []client.NodeInfo {
-+ nodes := []client.NodeInfo{}
-+ for node, metadata := range c.State {
-+ if node.Role == role && metadata != nil == online {
-+ nodes = append(nodes, node)
-+ }
-+ }
-+ return nodes
-+}
-+
-+// Return the number of online or offline nodes with the given role.
-+func (c *RolesChanges) count(role client.NodeRole, online bool) int {
-+ return len(c.list(role, online))
-+}
-+
-+// Return a map of the failure domains associated with the
-+// given nodes.
-+func (c *RolesChanges) failureDomains(nodes []client.NodeInfo) map[uint64]bool {
-+ domains := map[uint64]bool{}
-+ for _, node := range nodes {
-+ metadata := c.State[node]
-+ if metadata == nil {
-+ continue
-+ }
-+ domains[metadata.FailureDomain] = true
-+ }
-+ return domains
-+}
-+
-+// Sort the given candidates according to their failure domain and
-+// weight. Candidates belonging to a failure domain different from the given
-+// domains take precedence.
-+func (c *RolesChanges) sortCandidates(candidates []client.NodeInfo, domains map[uint64]bool) {
-+ less := func(i, j int) bool {
-+ metadata1 := c.metadata(candidates[i])
-+ metadata2 := c.metadata(candidates[j])
-+
-+ // If i's failure domain is not in the given list, but j's is,
-+ // then i takes precedence.
-+ if !domains[metadata1.FailureDomain] && domains[metadata2.FailureDomain] {
-+ return true
-+ }
-+
-+ // If j's failure domain is not in the given list, but i's is,
-+ // then j takes precedence.
-+ if !domains[metadata2.FailureDomain] && domains[metadata1.FailureDomain] {
-+ return false
-+ }
-+
-+ return metadata1.Weight < metadata2.Weight
-+ }
-+
-+ sort.Slice(candidates, less)
-+}
-+
-+// Return the metadata of the given node, if any.
-+func (c *RolesChanges) metadata(node client.NodeInfo) *client.NodeMetadata {
-+ return c.State[node]
-+}
-diff --git a/vendor/github.com/canonical/go-dqlite/app/testdata/cluster.crt b/vendor/github.com/canonical/go-dqlite/app/testdata/cluster.crt
-new file mode 100644
-index 00000000000..e080215f93f
---- /dev/null
-+++ b/vendor/github.com/canonical/go-dqlite/app/testdata/cluster.crt
-@@ -0,0 +1,30 @@
-+-----BEGIN CERTIFICATE-----
-+MIIFKjCCAxKgAwIBAgIUNdX/dAzmhTWG9oMmYrui8+uS2Z8wDQYJKoZIhvcNAQEL
-+BQAwFjEUMBIGA1UEAwwLZHFsaXRlLXRlc3QwHhcNMjAwNTExMTUwMjA2WhcNMzAw
-+NTA5MTUwMjA2WjAWMRQwEgYDVQQDDAtkcWxpdGUtdGVzdDCCAiIwDQYJKoZIhvcN
-+AQEBBQADggIPADCCAgoCggIBAM8xxnuDhYMdSQfypdLpGHP2k8ZIF0UzrdL4TJrB
-+LxzabdfGh/fhFP4ilHqYyK8Twpj27AH6EYMr0xEjgcCaxa28qir0irRUU4YGyujr
-+A0GT/m9xh9r46sra5gbJKJj7HGieCiSQG1RcpbSuayWOi6cYp416YqPhqWIwfsLx
-+79L9VfiYtuo86xV5bVGlDyWG8Yq8vss1w+2KKbyUgQovy1on4DrFgZmDBS09tXWS
-+ImQjWj7c9yvLl1EfwarGKuG/XRAOY8XdhPhS+cYeaPcCX9pZ5WigALbL3Y7A8UzF
-+ng4njcHKOlPpPOb/AOGpsO87ZWKQz7/aQCDlZ8/Zcxrgt7lwqmaebJSOtJEuyIj3
-+udRCaUwhy6vVsbuQRyca2kXdKd542fea4DOgUvrkXRPHbxZO+TvLtdn40WL7CJGM
-+junuoOlPutcAW2PjqRqb+cfwZhf31ZIfZY08muJ3OzEKN0i1/bQyxLVZNPn5JAjR
-+ZZocCghTni2SH+fAhFUxtUC4IcChg2GpAxZbafQjXCvbMVRuh7hCsy8ZB3Eo8Ppu
-+wQ/hP01hzYab9k4Rg4UVpBR1Yo4qlf47w2EhzydH0BoMHMQ4rEms/Cnwh+5Q6kEx
-+ohEUtOXEFGIAjtr8lnUSZCuXVs66xnBGWnVdxDSAC0IfS5GYHS3d9dIug47KAQtH
-+guxZAgMBAAGjcDBuMB0GA1UdDgQWBBT3t89E5SxmVjyVsyoDbuIR2ly7mzAfBgNV
-+HSMEGDAWgBT3t89E5SxmVjyVsyoDbuIR2ly7mzAPBgNVHRMBAf8EBTADAQH/MBsG
-+A1UdEQQUMBKHBH8AAAGCCmxvY2FsLnRlc3QwDQYJKoZIhvcNAQELBQADggIBAB9A
-+zYg53ZkVDvLwsCrR+8E0VnTQQuM/i9uDH50Vhq7znQ22OQ2USxMHLz+Yz65PCbD9
-+SPD4VPKDkhqH4G0ujT2NLG8+vb1Ckhv+/3ETPpO2xDVt/agl576ill5r/UI1U1LI
-+NuVAK7JN8FE7pUQXHjFNfGXBI4wT3fLF/FX+NlMEB6Dw98Ik6oSMtRJdo7qfOeNA
-+wiQ7Pak5MBQ9T5CcNbu/qpgZXu48Zo1j0ZA2ONlidQ7cnOBJUtQyHsDpw41+MkDY
-+g4eC9W/jCI2KxOKR6dLfO4XWXXbHkJV5jyA4w0ekz36UnJnM08qsfjYVgc1es83z
-+X/x0iuDAHG54otU1L/joxvOaiqgEz5dAjbPmznpr5LCSSB/2TxX/Dl42g80FDBSM
-+DqgXeRbbcHcoGIFDFvC1oq0qW7zCvteHVnOV4lUpbQ82JWXsRfkWroGcJxxXPePA
-+9B3jZFZO3R9OyiqcPqsAMFAKrNRGLK+JMy7ufi2Cz1a4g8ojhNn8ML3MaAyPzatE
-+D28avmHW2+S/l8oJWD54uIMCKPvP+0UiySg9M1OK3VCcmZeltVs2sUtibBxtZ8gA
-+j7Dp22XZDWL5mR/hgyywCEym93lEuyztaQV6NnSnNc7VPLsLre/FtSJkXzo1Z7bu
-+N7pGl3/F6mgrHReplv1/l+CkeIral9Pnn9yAUfRI
-+-----END CERTIFICATE-----
-diff --git a/vendor/github.com/canonical/go-dqlite/app/testdata/cluster.key b/vendor/github.com/canonical/go-dqlite/app/testdata/cluster.key
-new file mode 100644
-index 00000000000..7ae4d0e245a
---- /dev/null
-+++ b/vendor/github.com/canonical/go-dqlite/app/testdata/cluster.key
-@@ -0,0 +1,52 @@
-+-----BEGIN PRIVATE KEY-----
-+MIIJRAIBADANBgkqhkiG9w0BAQEFAASCCS4wggkqAgEAAoICAQDPMcZ7g4WDHUkH
-+8qXS6Rhz9pPGSBdFM63S+EyawS8c2m3Xxof34RT+IpR6mMivE8KY9uwB+hGDK9MR
-+I4HAmsWtvKoq9Iq0VFOGBsro6wNBk/5vcYfa+OrK2uYGySiY+xxongokkBtUXKW0
-+rmsljounGKeNemKj4aliMH7C8e/S/VX4mLbqPOsVeW1RpQ8lhvGKvL7LNcPtiim8
-+lIEKL8taJ+A6xYGZgwUtPbV1kiJkI1o+3Pcry5dRH8Gqxirhv10QDmPF3YT4UvnG
-+Hmj3Al/aWeVooAC2y92OwPFMxZ4OJ43ByjpT6Tzm/wDhqbDvO2VikM+/2kAg5WfP
-+2XMa4Le5cKpmnmyUjrSRLsiI97nUQmlMIcur1bG7kEcnGtpF3SneeNn3muAzoFL6
-+5F0Tx28WTvk7y7XZ+NFi+wiRjI7p7qDpT7rXAFtj46kam/nH8GYX99WSH2WNPJri
-+dzsxCjdItf20MsS1WTT5+SQI0WWaHAoIU54tkh/nwIRVMbVAuCHAoYNhqQMWW2n0
-+I1wr2zFUboe4QrMvGQdxKPD6bsEP4T9NYc2Gm/ZOEYOFFaQUdWKOKpX+O8NhIc8n
-+R9AaDBzEOKxJrPwp8IfuUOpBMaIRFLTlxBRiAI7a/JZ1EmQrl1bOusZwRlp1XcQ0
-+gAtCH0uRmB0t3fXSLoOOygELR4LsWQIDAQABAoICAQC9YsIKJC3lqt8WZLUuE+JG
-+HRz2IO8kUhiQvmVHD04BEadVlCK0cQi/Qtx1MzI81dMJ/qm7JNjdDTHgeJQheVc3
-+dzwUB3LBYlL2Lz9Jp4dSafQn6Z5EiUIUKgIYMcbqDp3pAoxC33YbppPQk51ctu7v
-+zZlZuIRxR9OL7MfxaTPNp8LteKm5cfVvq45BEp7FrpMaPWwoUvzEI4YC2bsZlJZL
-+0icjkWTk7N3OfJlg3jKRVb8nttI3NeEaRezALGMF9eUayAxq0BC66x1j+mf3YDQ9
-+Ye5Ps/DzJWVfFv15LylJxt3MRQ0LpiYE/xHPM0yHX8NMVRGQS+5SI17AngKYjs9f
-+DJFho/M3GuXLFHmGHVKc3RVgzO2pRUJt8wHAWNltv4A547gsDSxjAxpcu563gsez
-+mxA+mNAf9h78FYF4OPFbXNBCEi9RDgYqUUHcSb8DSYqZdksb+Ge3s7GE4Euk4lv6
-+mYQcd5I2oYfDNwaBTeJ6CPhdvUHaNhSHwoNxx4PIUIkSpyABeMY9ENMZNlbMFi7N
-+YULUlv0Lizd8ujt118N4HfyBA120wAld9d7e2UXC2ErgA34JrP1MAZgrhtWcWk2X
-+HnIgkrqImN22xOK7U0tpLxkrIf+czvb/iSblPPcwXp9OicAUxC9mHS3YDkLxNdFw
-+ZV5nDJ+l5c70mIjORizoAQKCAQEA6BWjDE5JP2bh9DXfS3P8vzdhXJS5hEFfFtNj
-+R4rnjOr0VqKRRjnQibSi0zouf/Ce3o6DXCcoKKxY1A/RVUcUzFHq8XpDjwvP7Cmc
-+5WHPLpgNmOUndQIJ2asUy3kyByPZmsUR+vKcB9Lpt7aCM/9x3481eQa0uZJQdbNR
-+k8DPh5CXE3QWziVesX/gbLHboRsEpVW/7aN2F7KxUKMi2qI27SVadqFqcQI5Gbq1
-+YwG4T4EJ32y7kC9gpRV6hOzkJIXakvXVPX61u6Ka7U3TNx5sDLUv4AH3opqErfI6
-+LaH7kYD+LxWUmJwrfdj92ibbdgfApUoOwEplNNqMu+YTMoG/qwKCAQEA5IuKdDed
-+TP3utM9EYPQWwvrzpj8xjbJUloNoooQgZRvvLTdVE7mZlhUurIGVRds5wImES2RV
-+EYRHMWATqJtwJn+Ln2ihZxeK108o4Ji8jK+ILdNY327LeXYgMW5IHVQhedv1Yb2p
-+gczYvUiistpc1j9Oy6/8tZ5IsrNAj8b5CNWpWVSWyIIjDWadhPSyOQPVu/9qRfF9
-+WJDi6h1/+Z6BkAwkJmQ2TA41yqt1M5JgZUZ5BL39kDKqEIlYY7QEeBgRqgyc5LUh
-+zIbmYvR/QICmshvE9EytUDHQShnpgnLzfDIQ6s7QLFulceKB4NqOn65Rzu6xQb5a
-+5pABnOK8Ht8QCwKCAQA4B7E7cVMBuYbnfHISN0hXqbkZrtYy9c1NJ2+agBBy1u5y
-+VAEaoS6VcES3Fh7PyHmyZVmVT+bEGwe++qqhGe4NfeFotwnEaSejblBwmHJ6Xz85
-+IGmsN/fE0ybMENBhJOiOEvHKUfQVd3fxYGCCyTLmtB5Gv7W173r5T3dsYmo34lJ2
-+4dn3ntIDrEm0NCQ/+tPeTpttdb827W8ddqUdf3gfPJtgdIvMCKtcaNUNuAaVOyP1
-+Cr0ljCqa8FqJ6N8TajVgsXTvyb16NUGN9PH/JpsTXlLWk+MDIKbTj9030xl3AVtc
-+ZqtfRKTDVzXCCwuErU53GTACgHN6lSDQ5DTSRrzNAoIBAQCsHFHWF5PESW8chwJN
-+qUQC7g3S4sAkn2mIN8nyqWHkAtdpy8OsKRd6qNUPYD7T7rZdQrJ4PES7/kvG6T2k
-+ufoJY//3ukniYx5R6vEzwpIruHzrVcAV81AUVlsEHrL8M0FKjdULGFOwxl1qnmq4
-+Gi3TwrVKO2wqMds5iDhUjlk4x/7SoRs0QJy5GNADMmuM+FBbVYvoxGXK7sGT4Udg
-+6ndTzXDj4I/rUsA7skd+4PrrCdlVMubjUuQzs9r2qri64BVdZEY7DhU3+1dAqjOy
-+la/zbWozOBNE016yjzUE4iBvJR0MV1b6MMI1M4jvlmfV5OcC2Upv4jR+cXBlf3KA
-+JSflAoIBAQCZ9gjaChFZfkoVmUL0fJXJR5XcmoPzGxilmM77uhm+fUZMGbPYfIJF
-+H9U+YermPVjhkFfPC0bTDFzNbK0w5/f08MOcFSPcMu74XPvT0vlBXetapXCYL6J2
-+qXbEt53YGy9S8YA3uBQpF7bf1wuMRLR+x7EIUGifrtZNH0YnjcgUDMBJmrfrMoz7
-+CD1eRgSdA28jn8OFWS1Ouz4vGABImBzMHmiDFIRtdi3TvHyhKbknrdGBRiRn014/
-+krJtLm36DCfy2ruepfyAit2oKITbbwitlOIF90h3aR//CLPFMA5YuGSBQ9UTTRav
-+p4Liq8PVLoNnx6poJi7sG7SGyDkHoZSX
-+-----END PRIVATE KEY-----
-diff --git a/vendor/github.com/canonical/go-dqlite/app/tls.go b/vendor/github.com/canonical/go-dqlite/app/tls.go
-new file mode 100644
-index 00000000000..9d3ec30d610
---- /dev/null
-+++ b/vendor/github.com/canonical/go-dqlite/app/tls.go
-@@ -0,0 +1,114 @@
-+package app
-+
-+import (
-+ "crypto/tls"
-+ "crypto/x509"
-+ "fmt"
-+
-+ "github.com/canonical/go-dqlite/internal/protocol"
-+)
-+
-+// SimpleTLSConfig returns a pair of TLS configuration objects with sane
-+// defaults, one to be used as server-side configuration when listening to
-+// incoming connections and one to be used as client-side configuration when
-+// establishing outgoing connections.
-+//
-+// The returned configs can be used as "listen" and "dial" parameters for the
-+// WithTLS option.
-+//
-+// In order to generate a suitable TLS certificate you can use the openssl
-+// command, for example:
-+//
-+// DNS=$(hostname)
-+// IP=$(hostname -I | cut -f 1 -d ' ')
-+// CN=example.com
-+// openssl req -x509 -newkey rsa:4096 -sha256 -days 3650 \
-+// -nodes -keyout cluster.key -out cluster.crt -subj "/CN=$CN" \
-+// -addext "subjectAltName=DNS:$DNS,IP:$IP"
-+//
-+// then load the resulting key pair and pool with:
-+//
-+// cert, _ := tls.LoadX509KeyPair("cluster.crt", "cluster.key")
-+// data, _ := ioutil.ReadFile("cluster.crt")
-+// pool := x509.NewCertPool()
-+// pool.AppendCertsFromPEM(data)
-+//
-+// and finally use the WithTLS option together with the SimpleTLSConfig helper:
-+//
-+// app, _ := app.New("/my/dir", app.WithTLS(app.SimpleTLSConfig(cert, pool)))
-+//
-+// See SimpleListenTLSConfig and SimpleDialTLSConfig for details.
-+
-+func SimpleTLSConfig(cert tls.Certificate, pool *x509.CertPool) (*tls.Config, *tls.Config) {
-+ listen := SimpleListenTLSConfig(cert, pool)
-+ dial := SimpleDialTLSConfig(cert, pool)
-+ return listen, dial
-+}
-+
-+// SimpleListenTLSConfig returns a server-side TLS configuration with sane
-+// defaults (e.g. TLS version, ciphers and mutual authentication).
-+//
-+// The cert parameter must be a public/private key pair, typically loaded from
-+// disk using tls.LoadX509KeyPair().
-+//
-+// The pool parameter can be used to specify a custom signing CA (e.g. for
-+// self-signed certificates).
-+//
-+// When server and client both use the same certificate, the same key pair and
-+// pool should be passed to SimpleDialTLSConfig() in order to generate the
-+// client-side config.
-+//
-+// The returned config can be used as "listen" parameter for the WithTLS
-+// option.
-+func SimpleListenTLSConfig(cert tls.Certificate, pool *x509.CertPool) *tls.Config {
-+ // See https://github.com/denji/golang-tls
-+ config := &tls.Config{
-+ MinVersion: tls.VersionTLS12,
-+ CipherSuites: protocol.TLSCipherSuites,
-+ PreferServerCipherSuites: true,
-+ CurvePreferences: []tls.CurveID{tls.CurveP521, tls.CurveP384, tls.CurveP256},
-+ Certificates: []tls.Certificate{cert},
-+ RootCAs: pool,
-+ ClientCAs: pool,
-+ ClientAuth: tls.RequireAndVerifyClientCert,
-+ }
-+ config.BuildNameToCertificate()
-+
-+ return config
-+}
-+
-+// SimpleDialTLSConfig returns a client-side TLS configuration with sane
-+// defaults (e.g. TLS version, ciphers and mutual authentication).
-+//
-+// The cert parameter must be a public/private key pair, typically loaded from
-+// disk using tls.LoadX509KeyPair().
-+//
-+// The pool parameter can be used to specify a custom signing CA (e.g. for
-+// self-signed certificates).
-+//
-+// When server and client both use the same certificate, the same key pair and
-+// pool should be passed to SimpleListenTLSConfig() in order to generate the
-+// server-side config.
-+//
-+// The returned config can be used as "client" parameter for the WithTLS App
-+// option, or as "config" parameter for the client.DialFuncWithTLS() helper.
-+func SimpleDialTLSConfig(cert tls.Certificate, pool *x509.CertPool) *tls.Config {
-+ config := &tls.Config{
-+ MinVersion: tls.VersionTLS12,
-+ CipherSuites: protocol.TLSCipherSuites,
-+ PreferServerCipherSuites: true,
-+ RootCAs: pool,
-+ Certificates: []tls.Certificate{cert},
-+ }
-+
-+ x509cert, err := x509.ParseCertificate(cert.Certificate[0])
-+ if err != nil {
-+ panic(fmt.Errorf("parse certificate: %v", err))
-+ }
-+ if len(x509cert.DNSNames) == 0 {
-+ panic("certificate has no DNS extension")
-+ }
-+ config.ServerName = x509cert.DNSNames[0]
-+
-+ return config
-+}
-diff --git a/vendor/github.com/canonical/go-dqlite/client/client.go b/vendor/github.com/canonical/go-dqlite/client/client.go
-new file mode 100644
-index 00000000000..c8809be737d
---- /dev/null
-+++ b/vendor/github.com/canonical/go-dqlite/client/client.go
-@@ -0,0 +1,319 @@
-+package client
-+
-+import (
-+ "context"
-+
-+ "github.com/canonical/go-dqlite/internal/protocol"
-+ "github.com/pkg/errors"
-+)
-+
-+// DialFunc is a function that can be used to establish a network connection.
-+type DialFunc = protocol.DialFunc
-+
-+// Client speaks the dqlite wire protocol.
-+type Client struct {
-+ protocol *protocol.Protocol
-+}
-+
-+// Option that can be used to tweak client parameters.
-+type Option func(*options)
-+
-+type options struct {
-+ DialFunc DialFunc
-+ LogFunc LogFunc
-+}
-+
-+// WithDialFunc sets a custom dial function for creating the client network
-+// connection.
-+func WithDialFunc(dial DialFunc) Option {
-+ return func(options *options) {
-+ options.DialFunc = dial
-+ }
-+}
-+
-+// WithLogFunc sets a custom log function.
-+// connection.
-+func WithLogFunc(log LogFunc) Option {
-+ return func(options *options) {
-+ options.LogFunc = log
-+ }
-+}
-+
-+// New creates a new client connected to the dqlite node with the given
-+// address.
-+func New(ctx context.Context, address string, options ...Option) (*Client, error) {
-+ o := defaultOptions()
-+
-+ for _, option := range options {
-+ option(o)
-+ }
-+ // Establish the connection.
-+ conn, err := o.DialFunc(ctx, address)
-+ if err != nil {
-+ return nil, errors.Wrap(err, "failed to establish network connection")
-+ }
-+
-+ protocol, err := protocol.Handshake(ctx, conn, protocol.VersionOne)
-+ if err != nil {
-+ conn.Close()
-+ return nil, err
-+ }
-+
-+ client := &Client{protocol: protocol}
-+
-+ return client, nil
-+}
-+
-+// Leader returns information about the current leader, if any.
-+func (c *Client) Leader(ctx context.Context) (*NodeInfo, error) {
-+ request := protocol.Message{}
-+ request.Init(16)
-+ response := protocol.Message{}
-+ response.Init(512)
-+
-+ protocol.EncodeLeader(&request)
-+
-+ if err := c.protocol.Call(ctx, &request, &response); err != nil {
-+ return nil, errors.Wrap(err, "failed to send Leader request")
-+ }
-+
-+ id, address, err := protocol.DecodeNode(&response)
-+ if err != nil {
-+ return nil, errors.Wrap(err, "failed to parse Node response")
-+ }
-+
-+ info := &NodeInfo{ID: id, Address: address}
-+
-+ return info, nil
-+}
-+
-+// Cluster returns information about all nodes in the cluster.
-+func (c *Client) Cluster(ctx context.Context) ([]NodeInfo, error) {
-+ request := protocol.Message{}
-+ request.Init(16)
-+ response := protocol.Message{}
-+ response.Init(512)
-+
-+ protocol.EncodeCluster(&request, protocol.ClusterFormatV1)
-+
-+ if err := c.protocol.Call(ctx, &request, &response); err != nil {
-+ return nil, errors.Wrap(err, "failed to send Cluster request")
-+ }
-+
-+ servers, err := protocol.DecodeNodes(&response)
-+ if err != nil {
-+ return nil, errors.Wrap(err, "failed to parse Node response")
-+ }
-+
-+ return servers, nil
-+}
-+
-+// File holds the content of a single database file.
-+type File struct {
-+ Name string
-+ Data []byte
-+}
-+
-+// Dump the content of the database with the given name. Two files will be
-+// returned, the first is the main database file (which has the same name as
-+// the database), the second is the WAL file (which has the same name as the
-+// database plus the suffix "-wal").
-+func (c *Client) Dump(ctx context.Context, dbname string) ([]File, error) {
-+ request := protocol.Message{}
-+ request.Init(16)
-+ response := protocol.Message{}
-+ response.Init(512)
-+
-+ protocol.EncodeDump(&request, dbname)
-+
-+ if err := c.protocol.Call(ctx, &request, &response); err != nil {
-+ return nil, errors.Wrap(err, "failed to send dump request")
-+ }
-+
-+ files, err := protocol.DecodeFiles(&response)
-+ if err != nil {
-+ return nil, errors.Wrap(err, "failed to parse files response")
-+ }
-+ defer files.Close()
-+
-+ dump := make([]File, 0)
-+
-+ for {
-+ name, data := files.Next()
-+ if name == "" {
-+ break
-+ }
-+ dump = append(dump, File{Name: name, Data: data})
-+ }
-+
-+ return dump, nil
-+}
-+
-+// Add a node to a cluster.
-+//
-+// The new node will have the role specified in node.Role. Note that if the
-+// desired role is Voter, the node being added must be online, since it will be
-+// granted voting rights only once it catches up with the leader's log.
-+func (c *Client) Add(ctx context.Context, node NodeInfo) error {
-+ request := protocol.Message{}
-+ response := protocol.Message{}
-+
-+ request.Init(4096)
-+ response.Init(4096)
-+
-+ protocol.EncodeAdd(&request, node.ID, node.Address)
-+
-+ if err := c.protocol.Call(ctx, &request, &response); err != nil {
-+ return err
-+ }
-+
-+ if err := protocol.DecodeEmpty(&response); err != nil {
-+ return err
-+ }
-+
-+ // If the desired role is spare, there's nothing to do, since all newly
-+ // added nodes have the spare role.
-+ if node.Role == Spare {
-+ return nil
-+ }
-+
-+ return c.Assign(ctx, node.ID, node.Role)
-+}
-+
-+// Assign a role to a node.
-+//
-+// Possible roles are:
-+//
-+// - Voter: the node will replicate data and participate in quorum.
-+// - StandBy: the node will replicate data but won't participate in quorum.
-+// - Spare: the node won't replicate data and won't participate in quorum.
-+//
-+// If the target node does not exist or has already the desired role, an error
-+// is returned.
-+func (c *Client) Assign(ctx context.Context, id uint64, role NodeRole) error {
-+ request := protocol.Message{}
-+ response := protocol.Message{}
-+
-+ request.Init(4096)
-+ response.Init(4096)
-+
-+ protocol.EncodeAssign(&request, id, uint64(role))
-+
-+ if err := c.protocol.Call(ctx, &request, &response); err != nil {
-+ return err
-+ }
-+
-+ if err := protocol.DecodeEmpty(&response); err != nil {
-+ return err
-+ }
-+
-+ return nil
-+}
-+
-+// Transfer leadership from the current leader to another node.
-+//
-+// This must be invoked one client connected to the current leader.
-+func (c *Client) Transfer(ctx context.Context, id uint64) error {
-+ request := protocol.Message{}
-+ response := protocol.Message{}
-+
-+ request.Init(4096)
-+ response.Init(4096)
-+
-+ protocol.EncodeTransfer(&request, id)
-+
-+ if err := c.protocol.Call(ctx, &request, &response); err != nil {
-+ return err
-+ }
-+
-+ if err := protocol.DecodeEmpty(&response); err != nil {
-+ return err
-+ }
-+
-+ return nil
-+}
-+
-+// Remove a node from the cluster.
-+func (c *Client) Remove(ctx context.Context, id uint64) error {
-+ request := protocol.Message{}
-+ request.Init(4096)
-+ response := protocol.Message{}
-+ response.Init(4096)
-+
-+ protocol.EncodeRemove(&request, id)
-+
-+ if err := c.protocol.Call(ctx, &request, &response); err != nil {
-+ return err
-+ }
-+
-+ if err := protocol.DecodeEmpty(&response); err != nil {
-+ return err
-+ }
-+
-+ return nil
-+}
-+
-+// NodeMetadata user-defined node-level metadata.
-+type NodeMetadata struct {
-+ FailureDomain uint64
-+ Weight uint64
-+}
-+
-+// Describe returns metadata about the node we're connected with.
-+func (c *Client) Describe(ctx context.Context) (*NodeMetadata, error) {
-+ request := protocol.Message{}
-+ request.Init(4096)
-+ response := protocol.Message{}
-+ response.Init(4096)
-+
-+ protocol.EncodeDescribe(&request, protocol.RequestDescribeFormatV0)
-+
-+ if err := c.protocol.Call(ctx, &request, &response); err != nil {
-+ return nil, err
-+ }
-+
-+ domain, weight, err := protocol.DecodeMetadata(&response)
-+ if err != nil {
-+ return nil, err
-+ }
-+
-+ metadata := &NodeMetadata{
-+ FailureDomain: domain,
-+ Weight: weight,
-+ }
-+
-+ return metadata, nil
-+}
-+
-+// Weight updates the weight associated to the node we're connected with.
-+func (c *Client) Weight(ctx context.Context, weight uint64) error {
-+ request := protocol.Message{}
-+ request.Init(4096)
-+ response := protocol.Message{}
-+ response.Init(4096)
-+
-+ protocol.EncodeWeight(&request, weight)
-+
-+ if err := c.protocol.Call(ctx, &request, &response); err != nil {
-+ return err
-+ }
-+
-+ if err := protocol.DecodeEmpty(&response); err != nil {
-+ return err
-+ }
-+
-+ return nil
-+}
-+
-+// Close the client.
-+func (c *Client) Close() error {
-+ return c.protocol.Close()
-+}
-+
-+// Create a client options object with sane defaults.
-+func defaultOptions() *options {
-+ return &options{
-+ DialFunc: DefaultDialFunc,
-+ LogFunc: DefaultLogFunc,
-+ }
-+}
-diff --git a/vendor/github.com/canonical/go-dqlite/client/client_export_test.go b/vendor/github.com/canonical/go-dqlite/client/client_export_test.go
-new file mode 100644
-index 00000000000..5fa73b4806c
---- /dev/null
-+++ b/vendor/github.com/canonical/go-dqlite/client/client_export_test.go
-@@ -0,0 +1,9 @@
-+package client
-+
-+import (
-+ "github.com/canonical/go-dqlite/internal/protocol"
-+)
-+
-+func (c *Client) Protocol() *protocol.Protocol {
-+ return c.protocol
-+}
-diff --git a/vendor/github.com/canonical/go-dqlite/client/client_test.go b/vendor/github.com/canonical/go-dqlite/client/client_test.go
-new file mode 100644
-index 00000000000..6c83ca2e097
---- /dev/null
-+++ b/vendor/github.com/canonical/go-dqlite/client/client_test.go
-@@ -0,0 +1,227 @@
-+package client_test
-+
-+import (
-+ "context"
-+ "fmt"
-+ "io/ioutil"
-+ "os"
-+ "testing"
-+ "time"
-+
-+ dqlite "github.com/canonical/go-dqlite"
-+ "github.com/canonical/go-dqlite/client"
-+ "github.com/canonical/go-dqlite/internal/protocol"
-+ "github.com/stretchr/testify/assert"
-+ "github.com/stretchr/testify/require"
-+)
-+
-+func TestClient_Leader(t *testing.T) {
-+ node, cleanup := newNode(t)
-+ defer cleanup()
-+
-+ ctx, cancel := context.WithTimeout(context.Background(), time.Second)
-+ defer cancel()
-+
-+ client, err := client.New(ctx, node.BindAddress())
-+ require.NoError(t, err)
-+ defer client.Close()
-+
-+ leader, err := client.Leader(context.Background())
-+ require.NoError(t, err)
-+
-+ assert.Equal(t, leader.ID, uint64(1))
-+ assert.Equal(t, leader.Address, "@1001")
-+}
-+
-+func TestClient_Dump(t *testing.T) {
-+ node, cleanup := newNode(t)
-+ defer cleanup()
-+
-+ ctx, cancel := context.WithTimeout(context.Background(), time.Second)
-+ defer cancel()
-+
-+ client, err := client.New(ctx, node.BindAddress())
-+ require.NoError(t, err)
-+ defer client.Close()
-+
-+ // Open a database and create a test table.
-+ request := protocol.Message{}
-+ request.Init(4096)
-+
-+ response := protocol.Message{}
-+ response.Init(4096)
-+
-+ protocol.EncodeOpen(&request, "test.db", 0, "volatile")
-+
-+ p := client.Protocol()
-+ err = p.Call(ctx, &request, &response)
-+ require.NoError(t, err)
-+
-+ db, err := protocol.DecodeDb(&response)
-+ require.NoError(t, err)
-+
-+ protocol.EncodeExecSQL(&request, uint64(db), "CREATE TABLE foo (n INT)", nil)
-+
-+ err = p.Call(ctx, &request, &response)
-+ require.NoError(t, err)
-+
-+ files, err := client.Dump(ctx, "test.db")
-+ require.NoError(t, err)
-+
-+ require.Len(t, files, 2)
-+ assert.Equal(t, "test.db", files[0].Name)
-+ assert.Equal(t, 4096, len(files[0].Data))
-+
-+ assert.Equal(t, "test.db-wal", files[1].Name)
-+ assert.Equal(t, 8272, len(files[1].Data))
-+}
-+
-+func TestClient_Cluster(t *testing.T) {
-+ node, cleanup := newNode(t)
-+ defer cleanup()
-+
-+ ctx, cancel := context.WithTimeout(context.Background(), time.Second)
-+ defer cancel()
-+
-+ cli, err := client.New(ctx, node.BindAddress())
-+ require.NoError(t, err)
-+ defer cli.Close()
-+
-+ servers, err := cli.Cluster(context.Background())
-+ require.NoError(t, err)
-+
-+ assert.Len(t, servers, 1)
-+ assert.Equal(t, servers[0].ID, uint64(1))
-+ assert.Equal(t, servers[0].Address, "@1001")
-+ assert.Equal(t, servers[0].Role, client.Voter)
-+}
-+
-+func TestClient_Transfer(t *testing.T) {
-+ node1, cleanup := newNode(t)
-+ defer cleanup()
-+
-+ ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second)
-+ defer cancel()
-+
-+ cli, err := client.New(ctx, node1.BindAddress())
-+ require.NoError(t, err)
-+ defer cli.Close()
-+
-+ node2, cleanup := addNode(t, cli, 2)
-+ defer cleanup()
-+
-+ err = cli.Assign(context.Background(), 2, client.Voter)
-+ require.NoError(t, err)
-+
-+ err = cli.Transfer(context.Background(), 2)
-+ require.NoError(t, err)
-+
-+ leader, err := cli.Leader(context.Background())
-+ require.NoError(t, err)
-+ assert.Equal(t, leader.ID, uint64(2))
-+
-+ cli, err = client.New(ctx, node2.BindAddress())
-+ require.NoError(t, err)
-+ defer cli.Close()
-+
-+ leader, err = cli.Leader(context.Background())
-+ require.NoError(t, err)
-+ assert.Equal(t, leader.ID, uint64(2))
-+
-+}
-+
-+func TestClient_Describe(t *testing.T) {
-+ node, cleanup := newNode(t)
-+ defer cleanup()
-+
-+ ctx, cancel := context.WithTimeout(context.Background(), time.Second)
-+ defer cancel()
-+
-+ cli, err := client.New(ctx, node.BindAddress())
-+ require.NoError(t, err)
-+ defer cli.Close()
-+
-+ metadata, err := cli.Describe(context.Background())
-+ require.NoError(t, err)
-+
-+ assert.Equal(t, uint64(0), metadata.FailureDomain)
-+ assert.Equal(t, uint64(0), metadata.Weight)
-+
-+ require.NoError(t, cli.Weight(context.Background(), 123))
-+
-+ metadata, err = cli.Describe(context.Background())
-+ require.NoError(t, err)
-+
-+ assert.Equal(t, uint64(0), metadata.FailureDomain)
-+ assert.Equal(t, uint64(123), metadata.Weight)
-+}
-+
-+func newNode(t *testing.T) (*dqlite.Node, func()) {
-+ t.Helper()
-+ dir, dirCleanup := newDir(t)
-+
-+ id := uint64(1)
-+ address := fmt.Sprintf("@%d", id+1000)
-+ node, err := dqlite.New(uint64(1), address, dir, dqlite.WithBindAddress(address))
-+ require.NoError(t, err)
-+
-+ err = node.Start()
-+ require.NoError(t, err)
-+
-+ cleanup := func() {
-+ require.NoError(t, node.Close())
-+ dirCleanup()
-+ }
-+
-+ return node, cleanup
-+}
-+
-+func addNode(t *testing.T, cli *client.Client, id uint64) (*dqlite.Node, func()) {
-+ t.Helper()
-+ dir, dirCleanup := newDir(t)
-+
-+ ctx, cancel := context.WithTimeout(context.Background(), time.Second)
-+ defer cancel()
-+
-+ address := fmt.Sprintf("@%d", id+1000)
-+ node, err := dqlite.New(id, address, dir, dqlite.WithBindAddress(address))
-+ require.NoError(t, err)
-+
-+ err = node.Start()
-+ require.NoError(t, err)
-+
-+ info := client.NodeInfo{
-+ ID: id,
-+ Address: address,
-+ Role: client.Spare,
-+ }
-+
-+ err = cli.Add(ctx, info)
-+ require.NoError(t, err)
-+
-+ cleanup := func() {
-+ require.NoError(t, node.Close())
-+ dirCleanup()
-+ }
-+
-+ return node, cleanup
-+}
-+
-+// Return a new temporary directory.
-+func newDir(t *testing.T) (string, func()) {
-+ t.Helper()
-+
-+ dir, err := ioutil.TempDir("", "dqlite-replication-test-")
-+ assert.NoError(t, err)
-+
-+ cleanup := func() {
-+ _, err := os.Stat(dir)
-+ if err != nil {
-+ assert.True(t, os.IsNotExist(err))
-+ } else {
-+ assert.NoError(t, os.RemoveAll(dir))
-+ }
-+ }
-+
-+ return dir, cleanup
-+}
-diff --git a/vendor/github.com/canonical/go-dqlite/client/constants.go b/vendor/github.com/canonical/go-dqlite/client/constants.go
-new file mode 100644
-index 00000000000..2aacfba967b
---- /dev/null
-+++ b/vendor/github.com/canonical/go-dqlite/client/constants.go
-@@ -0,0 +1,12 @@
-+package client
-+
-+import (
-+ "github.com/canonical/go-dqlite/internal/protocol"
-+)
-+
-+// Node roles
-+const (
-+ Voter = protocol.Voter
-+ StandBy = protocol.StandBy
-+ Spare = protocol.Spare
-+)
-diff --git a/vendor/github.com/canonical/go-dqlite/client/dial.go b/vendor/github.com/canonical/go-dqlite/client/dial.go
-new file mode 100644
-index 00000000000..5466679196f
---- /dev/null
-+++ b/vendor/github.com/canonical/go-dqlite/client/dial.go
-@@ -0,0 +1,37 @@
-+package client
-+
-+import (
-+ "context"
-+ "crypto/tls"
-+ "net"
-+
-+ "github.com/canonical/go-dqlite/internal/protocol"
-+)
-+
-+// DefaultDialFunc is the default dial function, which can handle plain TCP and
-+// Unix socket endpoints. You can customize it with WithDialFunc()
-+func DefaultDialFunc(ctx context.Context, address string) (net.Conn, error) {
-+ return protocol.Dial(ctx, address)
-+}
-+
-+// DialFuncWithTLS returns a dial function that uses TLS encryption.
-+//
-+// The given dial function will be used to establish the network connection,
-+// and the given TLS config will be used for encryption.
-+func DialFuncWithTLS(dial DialFunc, config *tls.Config) DialFunc {
-+ return func(ctx context.Context, addr string) (net.Conn, error) {
-+ clonedConfig := config.Clone()
-+ if len(clonedConfig.ServerName) == 0 {
-+ remoteIP, _, err := net.SplitHostPort(addr)
-+ if err != nil {
-+ return nil, err
-+ }
-+ clonedConfig.ServerName = remoteIP
-+ }
-+ conn, err := dial(ctx, addr)
-+ if err != nil {
-+ return nil, err
-+ }
-+ return tls.Client(conn, clonedConfig), nil
-+ }
-+}
-diff --git a/vendor/github.com/canonical/go-dqlite/client/leader.go b/vendor/github.com/canonical/go-dqlite/client/leader.go
-new file mode 100644
-index 00000000000..5de72ce9432
---- /dev/null
-+++ b/vendor/github.com/canonical/go-dqlite/client/leader.go
-@@ -0,0 +1,34 @@
-+package client
-+
-+import (
-+ "context"
-+
-+ "github.com/canonical/go-dqlite/internal/protocol"
-+)
-+
-+// FindLeader returns a Client connected to the current cluster leader.
-+//
-+// The function will iterate through to all nodes in the given store, and for
-+// each of them check if it's the current leader. If no leader is found, the
-+// function will keep retrying (with a capped exponential backoff) until the
-+// given context is canceled.
-+func FindLeader(ctx context.Context, store NodeStore, options ...Option) (*Client, error) {
-+ o := defaultOptions()
-+
-+ for _, option := range options {
-+ option(o)
-+ }
-+
-+ config := protocol.Config{
-+ Dial: o.DialFunc,
-+ }
-+ connector := protocol.NewConnector(0, store, config, o.LogFunc)
-+ protocol, err := connector.Connect(ctx)
-+ if err != nil {
-+ return nil, err
-+ }
-+
-+ client := &Client{protocol: protocol}
-+
-+ return client, nil
-+}
-diff --git a/vendor/github.com/canonical/go-dqlite/client/leader_test.go b/vendor/github.com/canonical/go-dqlite/client/leader_test.go
-new file mode 100644
-index 00000000000..f1739728b0d
---- /dev/null
-+++ b/vendor/github.com/canonical/go-dqlite/client/leader_test.go
-@@ -0,0 +1,46 @@
-+package client_test
-+
-+import (
-+ "context"
-+ "fmt"
-+ "testing"
-+ "time"
-+
-+ dqlite "github.com/canonical/go-dqlite"
-+ "github.com/canonical/go-dqlite/client"
-+ "github.com/stretchr/testify/require"
-+)
-+
-+func TestMembership(t *testing.T) {
-+ n := 3
-+ nodes := make([]*dqlite.Node, n)
-+ infos := make([]client.NodeInfo, n)
-+
-+ for i := range nodes {
-+ id := uint64(i + 1)
-+ address := fmt.Sprintf("@test-%d", id)
-+ dir, cleanup := newDir(t)
-+ defer cleanup()
-+ node, err := dqlite.New(id, address, dir, dqlite.WithBindAddress(address))
-+ require.NoError(t, err)
-+ nodes[i] = node
-+ infos[i].ID = id
-+ infos[i].Address = address
-+ err = node.Start()
-+ require.NoError(t, err)
-+ defer node.Close()
-+ }
-+
-+ ctx, cancel := context.WithTimeout(context.Background(), time.Second)
-+ defer cancel()
-+
-+ store := client.NewInmemNodeStore()
-+ store.Set(context.Background(), []client.NodeInfo{infos[0]})
-+
-+ client, err := client.FindLeader(ctx, store)
-+ require.NoError(t, err)
-+ defer client.Close()
-+
-+ err = client.Add(ctx, infos[1])
-+ require.NoError(t, err)
-+}
-diff --git a/vendor/github.com/canonical/go-dqlite/client/log.go b/vendor/github.com/canonical/go-dqlite/client/log.go
-new file mode 100644
-index 00000000000..e5acda2a721
---- /dev/null
-+++ b/vendor/github.com/canonical/go-dqlite/client/log.go
-@@ -0,0 +1,23 @@
-+package client
-+
-+import (
-+ "github.com/canonical/go-dqlite/internal/logging"
-+)
-+
-+// LogFunc is a function that can be used for logging.
-+type LogFunc = logging.Func
-+
-+// LogLevel defines the logging level.
-+type LogLevel = logging.Level
-+
-+// Available logging levels.
-+const (
-+ LogNone = logging.None
-+ LogDebug = logging.Debug
-+ LogInfo = logging.Info
-+ LogWarn = logging.Warn
-+ LogError = logging.Error
-+)
-+
-+// DefaultLogFunc doesn't emit any message.
-+func DefaultLogFunc(l LogLevel, format string, a ...interface{}) {}
-diff --git a/vendor/github.com/canonical/go-dqlite/client/store.go b/vendor/github.com/canonical/go-dqlite/client/store.go
-new file mode 100644
-index 00000000000..7d6a81ab44d
---- /dev/null
-+++ b/vendor/github.com/canonical/go-dqlite/client/store.go
-@@ -0,0 +1,238 @@
-+package client
-+
-+import (
-+ "context"
-+ "database/sql"
-+ "fmt"
-+ "io/ioutil"
-+ "os"
-+ "strings"
-+ "sync"
-+
-+ "github.com/ghodss/yaml"
-+ "github.com/pkg/errors"
-+
-+ "github.com/canonical/go-dqlite/internal/protocol"
-+ _ "github.com/mattn/go-sqlite3" // Go SQLite bindings
-+)
-+
-+// NodeStore is used by a dqlite client to get an initial list of candidate
-+// dqlite nodes that it can dial in order to find a leader dqlite node to use.
-+type NodeStore = protocol.NodeStore
-+
-+// NodeRole identifies the role of a node.
-+type NodeRole = protocol.NodeRole
-+
-+// NodeInfo holds information about a single server.
-+type NodeInfo = protocol.NodeInfo
-+
-+// InmemNodeStore keeps the list of target dqlite nodes in memory.
-+type InmemNodeStore = protocol.InmemNodeStore
-+
-+// NewInmemNodeStore creates NodeStore which stores its data in-memory.
-+var NewInmemNodeStore = protocol.NewInmemNodeStore
-+
-+// DatabaseNodeStore persists a list addresses of dqlite nodes in a SQL table.
-+type DatabaseNodeStore struct {
-+ db *sql.DB // Database handle to use.
-+ schema string // Name of the schema holding the servers table.
-+ table string // Name of the servers table.
-+ column string // Column name in the servers table holding the server address.
-+ where string // Optional WHERE filter
-+}
-+
-+// DefaultNodeStore creates a new NodeStore using the given filename.
-+//
-+// If the filename ends with ".yaml" then the YamlNodeStore implementation will
-+// be used. Otherwise the SQLite-based one will be picked, with default names
-+// for the schema, table and column parameters.
-+//
-+// It also creates the table if it doesn't exist yet.
-+func DefaultNodeStore(filename string) (NodeStore, error) {
-+ if strings.HasSuffix(filename, ".yaml") {
-+ return NewYamlNodeStore(filename)
-+ }
-+
-+ // Open the database.
-+ db, err := sql.Open("sqlite3", filename)
-+ if err != nil {
-+ return nil, errors.Wrap(err, "failed to open database")
-+ }
-+
-+ // Since we're setting SQLite single-thread mode, we need to have one
-+ // connection at most.
-+ db.SetMaxOpenConns(1)
-+
-+ // Create the servers table if it does not exist yet.
-+ _, err = db.Exec("CREATE TABLE IF NOT EXISTS servers (address TEXT, UNIQUE(address))")
-+ if err != nil {
-+ return nil, errors.Wrap(err, "failed to create servers table")
-+ }
-+
-+ store := NewNodeStore(db, "main", "servers", "address")
-+
-+ return store, nil
-+}
-+
-+// Option that can be used to tweak node store parameters.
-+type NodeStoreOption func(*nodeStoreOptions)
-+
-+type nodeStoreOptions struct {
-+ Where string
-+}
-+
-+// WithNodeStoreWhereClause configures the node store to append the given
-+// hard-coded where clause to the SELECT query used to fetch nodes. Only the
-+// clause itself must be given, without the "WHERE" prefix.
-+func WithNodeStoreWhereClause(where string) NodeStoreOption {
-+ return func(options *nodeStoreOptions) {
-+ options.Where = where
-+ }
-+}
-+
-+// NewNodeStore creates a new NodeStore.
-+func NewNodeStore(db *sql.DB, schema, table, column string, options ...NodeStoreOption) *DatabaseNodeStore {
-+ o := &nodeStoreOptions{}
-+ for _, option := range options {
-+ option(o)
-+ }
-+
-+ return &DatabaseNodeStore{
-+ db: db,
-+ schema: schema,
-+ table: table,
-+ column: column,
-+ where: o.Where,
-+ }
-+}
-+
-+// Get the current servers.
-+func (d *DatabaseNodeStore) Get(ctx context.Context) ([]NodeInfo, error) {
-+ tx, err := d.db.Begin()
-+ if err != nil {
-+ return nil, errors.Wrap(err, "failed to begin transaction")
-+ }
-+ defer tx.Rollback()
-+
-+ query := fmt.Sprintf("SELECT %s FROM %s.%s", d.column, d.schema, d.table)
-+ if d.where != "" {
-+ query += " WHERE " + d.where
-+ }
-+ rows, err := tx.QueryContext(ctx, query)
-+ if err != nil {
-+ return nil, errors.Wrap(err, "failed to query servers table")
-+ }
-+ defer rows.Close()
-+
-+ servers := make([]NodeInfo, 0)
-+ for rows.Next() {
-+ var address string
-+ err := rows.Scan(&address)
-+ if err != nil {
-+ return nil, errors.Wrap(err, "failed to fetch server address")
-+ }
-+ servers = append(servers, NodeInfo{ID: 1, Address: address})
-+ }
-+ if err := rows.Err(); err != nil {
-+ return nil, errors.Wrap(err, "result set failure")
-+ }
-+
-+ return servers, nil
-+}
-+
-+// Set the servers addresses.
-+func (d *DatabaseNodeStore) Set(ctx context.Context, servers []NodeInfo) error {
-+ tx, err := d.db.Begin()
-+ if err != nil {
-+ return errors.Wrap(err, "failed to begin transaction")
-+ }
-+
-+ query := fmt.Sprintf("DELETE FROM %s.%s", d.schema, d.table)
-+ if _, err := tx.ExecContext(ctx, query); err != nil {
-+ tx.Rollback()
-+ return errors.Wrap(err, "failed to delete existing servers rows")
-+ }
-+
-+ query = fmt.Sprintf("INSERT INTO %s.%s(%s) VALUES (?)", d.schema, d.table, d.column)
-+ stmt, err := tx.PrepareContext(ctx, query)
-+ if err != nil {
-+ tx.Rollback()
-+ return errors.Wrap(err, "failed to prepare insert statement")
-+ }
-+ defer stmt.Close()
-+
-+ for _, server := range servers {
-+ if _, err := stmt.ExecContext(ctx, server.Address); err != nil {
-+ tx.Rollback()
-+ return errors.Wrapf(err, "failed to insert server %s", server.Address)
-+ }
-+ }
-+
-+ if err := tx.Commit(); err != nil {
-+ return errors.Wrap(err, "failed to commit transaction")
-+ }
-+
-+ return nil
-+}
-+
-+// Persists a list addresses of dqlite nodes in a YAML file.
-+type YamlNodeStore struct {
-+ path string
-+ servers []NodeInfo
-+ mu sync.RWMutex
-+}
-+
-+// NewYamlNodeStore creates a new YamlNodeStore backed by the given YAML file.
-+func NewYamlNodeStore(path string) (*YamlNodeStore, error) {
-+ servers := []NodeInfo{}
-+
-+ _, err := os.Stat(path)
-+ if err != nil {
-+ if !os.IsNotExist(err) {
-+ return nil, err
-+ }
-+ } else {
-+ data, err := ioutil.ReadFile(path)
-+ if err != nil {
-+ return nil, err
-+ }
-+
-+ if err := yaml.Unmarshal(data, &servers); err != nil {
-+ return nil, err
-+ }
-+ }
-+
-+ store := &YamlNodeStore{
-+ path: path,
-+ servers: servers,
-+ }
-+
-+ return store, nil
-+}
-+
-+// Get the current servers.
-+func (s *YamlNodeStore) Get(ctx context.Context) ([]NodeInfo, error) {
-+ s.mu.RLock()
-+ defer s.mu.RUnlock()
-+
-+ return s.servers, nil
-+}
-+
-+// Set the servers addresses.
-+func (s *YamlNodeStore) Set(ctx context.Context, servers []NodeInfo) error {
-+ s.mu.Lock()
-+ defer s.mu.Unlock()
-+
-+ data, err := yaml.Marshal(servers)
-+ if err != nil {
-+ return err
-+ }
-+
-+ if err := ioutil.WriteFile(s.path, data, 0600); err != nil {
-+ return err
-+ }
-+
-+ s.servers = servers
-+
-+ return nil
-+}
-diff --git a/vendor/github.com/canonical/go-dqlite/client/store_test.go b/vendor/github.com/canonical/go-dqlite/client/store_test.go
-new file mode 100644
-index 00000000000..ee55b91e6aa
---- /dev/null
-+++ b/vendor/github.com/canonical/go-dqlite/client/store_test.go
-@@ -0,0 +1,55 @@
-+package client_test
-+
-+import (
-+ "context"
-+ "testing"
-+
-+ "github.com/canonical/go-dqlite/client"
-+ "github.com/stretchr/testify/assert"
-+ "github.com/stretchr/testify/require"
-+)
-+
-+// Exercise setting and getting servers in a DatabaseNodeStore created with
-+// DefaultNodeStore.
-+func TestDefaultNodeStore(t *testing.T) {
-+ // Create a new default store.
-+ store, err := client.DefaultNodeStore(":memory:")
-+ require.NoError(t, err)
-+
-+ // Set and get some targets.
-+ err = store.Set(context.Background(), []client.NodeInfo{
-+ {Address: "1.2.3.4:666"}, {Address: "5.6.7.8:666"}},
-+ )
-+ require.NoError(t, err)
-+
-+ servers, err := store.Get(context.Background())
-+ assert.Equal(t, []client.NodeInfo{
-+ {ID: uint64(1), Address: "1.2.3.4:666"},
-+ {ID: uint64(1), Address: "5.6.7.8:666"}},
-+ servers)
-+
-+ // Set and get some new targets.
-+ err = store.Set(context.Background(), []client.NodeInfo{
-+ {Address: "1.2.3.4:666"}, {Address: "9.9.9.9:666"},
-+ })
-+ require.NoError(t, err)
-+
-+ servers, err = store.Get(context.Background())
-+ assert.Equal(t, []client.NodeInfo{
-+ {ID: uint64(1), Address: "1.2.3.4:666"},
-+ {ID: uint64(1), Address: "9.9.9.9:666"}},
-+ servers)
-+
-+ // Setting duplicate targets returns an error and the change is not
-+ // persisted.
-+ err = store.Set(context.Background(), []client.NodeInfo{
-+ {Address: "1.2.3.4:666"}, {Address: "1.2.3.4:666"},
-+ })
-+ assert.EqualError(t, err, "failed to insert server 1.2.3.4:666: UNIQUE constraint failed: servers.address")
-+
-+ servers, err = store.Get(context.Background())
-+ assert.Equal(t, []client.NodeInfo{
-+ {ID: uint64(1), Address: "1.2.3.4:666"},
-+ {ID: uint64(1), Address: "9.9.9.9:666"}},
-+ servers)
-+}
-diff --git a/vendor/github.com/canonical/go-dqlite/cmd/dqlite-demo/dqlite-demo.go b/vendor/github.com/canonical/go-dqlite/cmd/dqlite-demo/dqlite-demo.go
-new file mode 100644
-index 00000000000..49f81973788
---- /dev/null
-+++ b/vendor/github.com/canonical/go-dqlite/cmd/dqlite-demo/dqlite-demo.go
-@@ -0,0 +1,131 @@
-+package main
-+
-+import (
-+ "context"
-+ "fmt"
-+ "io/ioutil"
-+ "log"
-+ "net"
-+ "net/http"
-+ "os"
-+ "os/signal"
-+ "path/filepath"
-+ "strings"
-+
-+ "github.com/canonical/go-dqlite/app"
-+ "github.com/canonical/go-dqlite/client"
-+ "github.com/pkg/errors"
-+ "github.com/spf13/cobra"
-+ "golang.org/x/sys/unix"
-+)
-+
-+func main() {
-+ var api string
-+ var db string
-+ var join *[]string
-+ var dir string
-+ var verbose bool
-+
-+ cmd := &cobra.Command{
-+ Use: "dqlite-demo",
-+ Short: "Demo application using dqlite",
-+ Long: `This demo shows how to integrate a Go application with dqlite.
-+
-+Complete documentation is available at https://github.com/canonical/go-dqlite`,
-+ RunE: func(cmd *cobra.Command, args []string) error {
-+ dir := filepath.Join(dir, db)
-+ if err := os.MkdirAll(dir, 0755); err != nil {
-+ return errors.Wrapf(err, "can't create %s", dir)
-+ }
-+ logFunc := func(l client.LogLevel, format string, a ...interface{}) {
-+ if !verbose {
-+ return
-+ }
-+ log.Printf(fmt.Sprintf("%s: %s: %s\n", api, l.String(), format), a...)
-+ }
-+ app, err := app.New(dir, app.WithAddress(db), app.WithCluster(*join), app.WithLogFunc(logFunc))
-+ if err != nil {
-+ return err
-+ }
-+
-+ if err := app.Ready(context.Background()); err != nil {
-+ return err
-+ }
-+
-+ db, err := app.Open(context.Background(), "demo")
-+ if err != nil {
-+ return err
-+ }
-+
-+ if _, err := db.Exec(schema); err != nil {
-+ return err
-+ }
-+
-+ http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
-+ key := strings.TrimLeft(r.URL.Path, "/")
-+ result := ""
-+ switch r.Method {
-+ case "GET":
-+ row := db.QueryRow(query, key)
-+ if err := row.Scan(&result); err != nil {
-+ result = fmt.Sprintf("Error: %s", err.Error())
-+ }
-+ break
-+ case "PUT":
-+ result = "done"
-+ value, _ := ioutil.ReadAll(r.Body)
-+ if _, err := db.Exec(update, key, value); err != nil {
-+ result = fmt.Sprintf("Error: %s", err.Error())
-+ }
-+ default:
-+ result = fmt.Sprintf("Error: unsupported method %q", r.Method)
-+
-+ }
-+ fmt.Fprintf(w, "%s\n", result)
-+ })
-+
-+ listener, err := net.Listen("tcp", api)
-+ if err != nil {
-+ return err
-+ }
-+
-+ go http.Serve(listener, nil)
-+
-+ ch := make(chan os.Signal)
-+ signal.Notify(ch, unix.SIGPWR)
-+ signal.Notify(ch, unix.SIGINT)
-+ signal.Notify(ch, unix.SIGQUIT)
-+ signal.Notify(ch, unix.SIGTERM)
-+
-+ <-ch
-+
-+ listener.Close()
-+ db.Close()
-+
-+ app.Handover(context.Background())
-+ app.Close()
-+
-+ return nil
-+ },
-+ }
-+
-+ flags := cmd.Flags()
-+ flags.StringVarP(&api, "api", "a", "", "address used to expose the demo API")
-+ flags.StringVarP(&db, "db", "d", "", "address used for internal database replication")
-+ join = flags.StringSliceP("join", "j", nil, "database addresses of existing nodes")
-+ flags.StringVarP(&dir, "dir", "D", "/tmp/dqlite-demo", "data directory")
-+ flags.BoolVarP(&verbose, "verbose", "v", false, "verbose logging")
-+
-+ cmd.MarkFlagRequired("api")
-+ cmd.MarkFlagRequired("db")
-+
-+ if err := cmd.Execute(); err != nil {
-+ os.Exit(1)
-+ }
-+}
-+
-+const (
-+ schema = "CREATE TABLE IF NOT EXISTS model (key TEXT, value TEXT, UNIQUE(key))"
-+ query = "SELECT value FROM model WHERE key = ?"
-+ update = "INSERT OR REPLACE INTO model(key, value) VALUES(?, ?)"
-+)
-diff --git a/vendor/github.com/canonical/go-dqlite/cmd/dqlite/dqlite.go b/vendor/github.com/canonical/go-dqlite/cmd/dqlite/dqlite.go
-new file mode 100644
-index 00000000000..c95e88e2bb6
---- /dev/null
-+++ b/vendor/github.com/canonical/go-dqlite/cmd/dqlite/dqlite.go
-@@ -0,0 +1,135 @@
-+package main
-+
-+import (
-+ "context"
-+ "crypto/tls"
-+ "crypto/x509"
-+ "fmt"
-+ "io"
-+ "io/ioutil"
-+ "os"
-+ "strings"
-+
-+ "github.com/canonical/go-dqlite/app"
-+ "github.com/canonical/go-dqlite/client"
-+ "github.com/canonical/go-dqlite/internal/shell"
-+ "github.com/peterh/liner"
-+ "github.com/spf13/cobra"
-+)
-+
-+func main() {
-+ var crt string
-+ var key string
-+ var servers *[]string
-+ var format string
-+
-+ cmd := &cobra.Command{
-+ Use: "dqlite -s [command]",
-+ Short: "Standard dqlite shell",
-+ Args: cobra.RangeArgs(1, 2),
-+ RunE: func(cmd *cobra.Command, args []string) error {
-+ if len(*servers) == 0 {
-+ return fmt.Errorf("no servers provided")
-+ }
-+ var store client.NodeStore
-+ var err error
-+
-+ first := (*servers)[0]
-+ if strings.HasPrefix(first, "file://") {
-+ if len(*servers) > 1 {
-+ return fmt.Errorf("can't mix server store and explicit list")
-+ }
-+ path := first[len("file://"):]
-+ store, err = client.DefaultNodeStore(path)
-+ if err != nil {
-+ return fmt.Errorf("open servers store: %w", err)
-+ }
-+ } else {
-+ infos := make([]client.NodeInfo, len(*servers))
-+ for i, address := range *servers {
-+ infos[i].Address = address
-+ }
-+ store = client.NewInmemNodeStore()
-+ store.Set(context.Background(), infos)
-+ }
-+
-+ if (crt != "" && key == "") || (key != "" && crt == "") {
-+ return fmt.Errorf("both TLS certificate and key must be given")
-+ }
-+
-+ dial := client.DefaultDialFunc
-+
-+ if crt != "" {
-+ cert, err := tls.LoadX509KeyPair(crt, key)
-+ if err != nil {
-+ return err
-+ }
-+
-+ data, err := ioutil.ReadFile(crt)
-+ if err != nil {
-+ return err
-+ }
-+
-+ pool := x509.NewCertPool()
-+ if !pool.AppendCertsFromPEM(data) {
-+ return fmt.Errorf("bad certificate")
-+ }
-+
-+ config := app.SimpleDialTLSConfig(cert, pool)
-+ dial = client.DialFuncWithTLS(dial, config)
-+
-+ }
-+
-+ sh, err := shell.New(args[0], store, shell.WithDialFunc(dial), shell.WithFormat(format))
-+ if err != nil {
-+ return err
-+ }
-+
-+ if len(args) > 1 {
-+ for _, input := range strings.Split(args[1], ";") {
-+ result, err := sh.Process(context.Background(), input)
-+ if err != nil {
-+ return err
-+ } else if result != "" {
-+ fmt.Println(result)
-+ }
-+ }
-+ return nil
-+ }
-+
-+ line := liner.NewLiner()
-+ defer line.Close()
-+
-+ for {
-+ input, err := line.Prompt("dqlite> ")
-+ if err != nil {
-+ if err == io.EOF {
-+ break
-+ }
-+ return err
-+ }
-+
-+ result, err := sh.Process(context.Background(), input)
-+ if err != nil {
-+ fmt.Println("Error: ", err)
-+ } else if result != "" {
-+ fmt.Println(result)
-+ }
-+ }
-+
-+ return nil
-+ },
-+ }
-+
-+ flags := cmd.Flags()
-+ servers = flags.StringSliceP("servers", "s", nil, "comma-separated list of db servers, or file://")
-+ flags.StringVarP(&crt, "cert", "c", "", "public TLS cert")
-+ flags.StringVarP(&key, "key", "k", "", "private TLS key")
-+ flags.StringVarP(&format, "format", "f", "tabular", "output format (tabular, json)")
-+
-+ cmd.MarkFlagRequired("servers")
-+
-+ if err := cmd.Execute(); err != nil {
-+ os.Exit(1)
-+ }
-+}
-diff --git a/vendor/github.com/canonical/go-dqlite/config.go b/vendor/github.com/canonical/go-dqlite/config.go
-new file mode 100644
-index 00000000000..64361605798
---- /dev/null
-+++ b/vendor/github.com/canonical/go-dqlite/config.go
-@@ -0,0 +1,48 @@
-+package dqlite
-+
-+import (
-+ "fmt"
-+ "os"
-+
-+ "github.com/canonical/go-dqlite/internal/bindings"
-+ "github.com/canonical/go-dqlite/internal/protocol"
-+ "github.com/pkg/errors"
-+)
-+
-+// ConfigMultiThread sets the threading mode of SQLite to Multi-thread.
-+//
-+// By default go-dqlite configures SQLite to Single-thread mode, because the
-+// dqlite engine itself is single-threaded, and enabling Multi-thread or
-+// Serialized modes would incur in a performance penality.
-+//
-+// If your Go process also uses SQLite directly (e.g. using the
-+// github.com/mattn/go-sqlite3 bindings) you might need to switch to
-+// Multi-thread mode in order to be thread-safe.
-+//
-+// IMPORTANT: It's possible to successfully change SQLite's threading mode only
-+// if no SQLite APIs have been invoked yet (e.g. no database has been opened
-+// yet). Therefore you'll typically want to call ConfigMultiThread() very early
-+// in your process setup. Alternatively you can set the GO_DQLITE_MULTITHREAD
-+// environment variable to 1 at process startup, in order to prevent go-dqlite
-+// from setting Single-thread mode at all.
-+func ConfigMultiThread() error {
-+ if err := bindings.ConfigMultiThread(); err != nil {
-+ if err, ok := err.(protocol.Error); ok && err.Code == 21 /* SQLITE_MISUSE */ {
-+ return fmt.Errorf("SQLite is already initialized")
-+ }
-+ return errors.Wrap(err, "unknown error")
-+ }
-+ return nil
-+}
-+
-+func init() {
-+ // Don't enable single thread mode by default if GO_DQLITE_MULTITHREAD
-+ // is set.
-+ if os.Getenv("GO_DQLITE_MULTITHREAD") == "1" {
-+ return
-+ }
-+ err := bindings.ConfigSingleThread()
-+ if err != nil {
-+ panic(errors.Wrap(err, "set single thread mode"))
-+ }
-+}
-diff --git a/vendor/github.com/canonical/go-dqlite/driver/driver.go b/vendor/github.com/canonical/go-dqlite/driver/driver.go
-new file mode 100644
-index 00000000000..b5b54efeb09
---- /dev/null
-+++ b/vendor/github.com/canonical/go-dqlite/driver/driver.go
-@@ -0,0 +1,782 @@
-+// Copyright 2017 Canonical Ltd.
-+//
-+// Licensed under the Apache License, Version 2.0 (the "License");
-+// you may not use this file except in compliance with the License.
-+// You may obtain a copy of the License at
-+//
-+// http://www.apache.org/licenses/LICENSE-2.0
-+//
-+// Unless required by applicable law or agreed to in writing, software
-+// distributed under the License is distributed on an "AS IS" BASIS,
-+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-+// See the License for the specific language governing permissions and
-+// limitations under the License.
-+
-+package driver
-+
-+import (
-+ "context"
-+ "database/sql/driver"
-+ "io"
-+ "net"
-+ "reflect"
-+ "syscall"
-+ "time"
-+
-+ "github.com/pkg/errors"
-+
-+ "github.com/canonical/go-dqlite/client"
-+ "github.com/canonical/go-dqlite/internal/protocol"
-+)
-+
-+// Driver perform queries against a dqlite server.
-+type Driver struct {
-+ log client.LogFunc // Log function to use
-+ store client.NodeStore // Holds addresses of dqlite servers
-+ context context.Context // Global cancellation context
-+ connectionTimeout time.Duration // Max time to wait for a new connection
-+ contextTimeout time.Duration // Default client context timeout.
-+ clientConfig protocol.Config // Configuration for dqlite client instances
-+ tracing client.LogLevel // Whether to trace statements
-+}
-+
-+// Error is returned in case of database errors.
-+type Error = protocol.Error
-+
-+// Error codes. Values here mostly overlap with native SQLite codes.
-+const (
-+ ErrBusy = 5
-+ errIoErr = 10
-+ errIoErrNotLeader = errIoErr | 40<<8
-+ errIoErrLeadershipLost = errIoErr | (41 << 8)
-+
-+ // Legacy error codes before version-3.32.1+replication4. Kept here
-+ // for backward compatibility, but should eventually be dropped.
-+ errIoErrNotLeaderLegacy = errIoErr | 32<<8
-+ errIoErrLeadershipLostLegacy = errIoErr | (33 << 8)
-+)
-+
-+// Option can be used to tweak driver parameters.
-+type Option func(*options)
-+
-+// NodeStore is a convenience alias of client.NodeStore.
-+type NodeStore = client.NodeStore
-+
-+// NodeInfo is a convenience alias of client.NodeInfo.
-+type NodeInfo = client.NodeInfo
-+
-+// DefaultNodeStore is a convenience alias of client.DefaultNodeStore.
-+var DefaultNodeStore = client.DefaultNodeStore
-+
-+// WithLogFunc sets a custom logging function.
-+func WithLogFunc(log client.LogFunc) Option {
-+ return func(options *options) {
-+ options.Log = log
-+ }
-+}
-+
-+// DialFunc is a function that can be used to establish a network connection
-+// with a dqlite node.
-+type DialFunc = protocol.DialFunc
-+
-+// WithDialFunc sets a custom dial function.
-+func WithDialFunc(dial DialFunc) Option {
-+ return func(options *options) {
-+ options.Dial = protocol.DialFunc(dial)
-+ }
-+}
-+
-+// WithConnectionTimeout sets the connection timeout.
-+//
-+// If not used, the default is 5 seconds.
-+//
-+// DEPRECATED: Connection cancellation is supported via the driver.Connector
-+// interface, which is used internally by the stdlib sql package.
-+func WithConnectionTimeout(timeout time.Duration) Option {
-+ return func(options *options) {
-+ options.ConnectionTimeout = timeout
-+ }
-+}
-+
-+// WithConnectionBackoffFactor sets the exponential backoff factor for retrying
-+// failed connection attempts.
-+//
-+// If not used, the default is 100 milliseconds.
-+func WithConnectionBackoffFactor(factor time.Duration) Option {
-+ return func(options *options) {
-+ options.ConnectionBackoffFactor = factor
-+ }
-+}
-+
-+// WithConnectionBackoffCap sets the maximum connection retry backoff value,
-+// (regardless of the backoff factor) for retrying failed connection attempts.
-+//
-+// If not used, the default is 1 second.
-+func WithConnectionBackoffCap(cap time.Duration) Option {
-+ return func(options *options) {
-+ options.ConnectionBackoffCap = cap
-+ }
-+}
-+
-+// WithAttemptTimeout sets the timeout for each individual connection attempt.
-+//
-+// The Connector.Connect() and Driver.Open() methods try to find the current
-+// leader among the servers in the store that was passed to New(). Each time
-+// they attempt to probe an individual server for leadership this timeout will
-+// apply, so a server which accepts the connection but it's then unresponsive
-+// won't block the line.
-+//
-+// If not used, the default is 15 seconds.
-+func WithAttemptTimeout(timeout time.Duration) Option {
-+ return func(options *options) {
-+ options.AttemptTimeout = timeout
-+ }
-+}
-+
-+// WithRetryLimit sets the maximum number of connection retries.
-+//
-+// If not used, the default is 0 (unlimited retries)
-+func WithRetryLimit(limit uint) Option {
-+ return func(options *options) {
-+ options.RetryLimit = limit
-+ }
-+}
-+
-+// WithContext sets a global cancellation context.
-+//
-+// DEPRECATED: This API is no a no-op. Users should explicitly pass a context
-+// if they wish to cancel their requests.
-+func WithContext(context context.Context) Option {
-+ return func(options *options) {
-+ options.Context = context
-+ }
-+}
-+
-+// WithContextTimeout sets the default client context timeout for DB.Begin()
-+// when no context deadline is provided.
-+//
-+// DEPRECATED: Users should use db APIs that support contexts if they wish to
-+// cancel their requests.
-+func WithContextTimeout(timeout time.Duration) Option {
-+ return func(options *options) {
-+ options.ContextTimeout = timeout
-+ }
-+}
-+
-+// WithTracing will emit a log message at the given level every time a
-+// statement gets executed.
-+func WithTracing(level client.LogLevel) Option {
-+ return func(options *options) {
-+ options.Tracing = level
-+ }
-+}
-+
-+// NewDriver creates a new dqlite driver, which also implements the
-+// driver.Driver interface.
-+func New(store client.NodeStore, options ...Option) (*Driver, error) {
-+ o := defaultOptions()
-+
-+ for _, option := range options {
-+ option(o)
-+ }
-+
-+ driver := &Driver{
-+ log: o.Log,
-+ store: store,
-+ context: o.Context,
-+ connectionTimeout: o.ConnectionTimeout,
-+ contextTimeout: o.ContextTimeout,
-+ tracing: o.Tracing,
-+ clientConfig: protocol.Config{
-+ Dial: o.Dial,
-+ AttemptTimeout: o.AttemptTimeout,
-+ BackoffFactor: o.ConnectionBackoffFactor,
-+ BackoffCap: o.ConnectionBackoffCap,
-+ RetryLimit: o.RetryLimit,
-+ },
-+ }
-+
-+ return driver, nil
-+}
-+
-+// Hold configuration options for a dqlite driver.
-+type options struct {
-+ Log client.LogFunc
-+ Dial protocol.DialFunc
-+ AttemptTimeout time.Duration
-+ ConnectionTimeout time.Duration
-+ ContextTimeout time.Duration
-+ ConnectionBackoffFactor time.Duration
-+ ConnectionBackoffCap time.Duration
-+ RetryLimit uint
-+ Context context.Context
-+ Tracing client.LogLevel
-+}
-+
-+// Create a options object with sane defaults.
-+func defaultOptions() *options {
-+ return &options{
-+ Log: client.DefaultLogFunc,
-+ Dial: client.DefaultDialFunc,
-+ Tracing: client.LogNone,
-+ }
-+}
-+
-+// A Connector represents a driver in a fixed configuration and can create any
-+// number of equivalent Conns for use by multiple goroutines.
-+type Connector struct {
-+ uri string
-+ driver *Driver
-+}
-+
-+// Connect returns a connection to the database.
-+func (c *Connector) Connect(ctx context.Context) (driver.Conn, error) {
-+ if c.driver.context != nil {
-+ ctx = c.driver.context
-+ }
-+
-+ if c.driver.connectionTimeout != 0 {
-+ var cancel func()
-+ ctx, cancel = context.WithTimeout(ctx, c.driver.connectionTimeout)
-+ defer cancel()
-+ }
-+
-+ // TODO: generate a client ID.
-+ connector := protocol.NewConnector(0, c.driver.store, c.driver.clientConfig, c.driver.log)
-+
-+ conn := &Conn{
-+ log: c.driver.log,
-+ contextTimeout: c.driver.contextTimeout,
-+ tracing: c.driver.tracing,
-+ }
-+
-+ var err error
-+ conn.protocol, err = connector.Connect(ctx)
-+ if err != nil {
-+ return nil, errors.Wrap(err, "failed to create dqlite connection")
-+ }
-+
-+ conn.request.Init(4096)
-+ conn.response.Init(4096)
-+
-+ protocol.EncodeOpen(&conn.request, c.uri, 0, "volatile")
-+
-+ if err := conn.protocol.Call(ctx, &conn.request, &conn.response); err != nil {
-+ conn.protocol.Close()
-+ return nil, errors.Wrap(err, "failed to open database")
-+ }
-+
-+ conn.id, err = protocol.DecodeDb(&conn.response)
-+ if err != nil {
-+ conn.protocol.Close()
-+ return nil, errors.Wrap(err, "failed to open database")
-+ }
-+
-+ return conn, nil
-+}
-+
-+// Driver returns the underlying Driver of the Connector,
-+func (c *Connector) Driver() driver.Driver {
-+ return c.driver
-+}
-+
-+// OpenConnector must parse the name in the same format that Driver.Open
-+// parses the name parameter.
-+func (d *Driver) OpenConnector(name string) (driver.Connector, error) {
-+ connector := &Connector{
-+ uri: name,
-+ driver: d,
-+ }
-+ return connector, nil
-+}
-+
-+// Open establishes a new connection to a SQLite database on the dqlite server.
-+//
-+// The given name must be a pure file name without any directory segment,
-+// dqlite will connect to a database with that name in its data directory.
-+//
-+// Query parameters are always valid except for "mode=memory".
-+//
-+// If this node is not the leader, or the leader is unknown an ErrNotLeader
-+// error is returned.
-+func (d *Driver) Open(uri string) (driver.Conn, error) {
-+ connector, err := d.OpenConnector(uri)
-+ if err != nil {
-+ return nil, err
-+ }
-+
-+ return connector.Connect(context.Background())
-+}
-+
-+// SetContextTimeout sets the default client timeout when no context deadline
-+// is provided.
-+//
-+// DEPRECATED: This API is no a no-op. Users should explicitly pass a context
-+// if they wish to cancel their requests, or use the WithContextTimeout option.
-+func (d *Driver) SetContextTimeout(timeout time.Duration) {}
-+
-+// ErrNoAvailableLeader is returned as root cause of Open() if there's no
-+// leader available in the cluster.
-+var ErrNoAvailableLeader = protocol.ErrNoAvailableLeader
-+
-+// Conn implements the sql.Conn interface.
-+type Conn struct {
-+ log client.LogFunc
-+ protocol *protocol.Protocol
-+ request protocol.Message
-+ response protocol.Message
-+ id uint32 // Database ID.
-+ contextTimeout time.Duration
-+ tracing client.LogLevel
-+}
-+
-+// PrepareContext returns a prepared statement, bound to this connection.
-+// context is for the preparation of the statement, it must not store the
-+// context within the statement itself.
-+func (c *Conn) PrepareContext(ctx context.Context, query string) (driver.Stmt, error) {
-+ stmt := &Stmt{
-+ protocol: c.protocol,
-+ request: &c.request,
-+ response: &c.response,
-+ log: c.log,
-+ tracing: c.tracing,
-+ }
-+
-+ protocol.EncodePrepare(&c.request, uint64(c.id), query)
-+
-+ if err := c.protocol.Call(ctx, &c.request, &c.response); err != nil {
-+ return nil, driverError(c.log, err)
-+ }
-+
-+ var err error
-+ stmt.db, stmt.id, stmt.params, err = protocol.DecodeStmt(&c.response)
-+ if err != nil {
-+ return nil, driverError(c.log, err)
-+ }
-+
-+ if c.tracing != client.LogNone {
-+ stmt.sql = query
-+ }
-+
-+ return stmt, nil
-+}
-+
-+// Prepare returns a prepared statement, bound to this connection.
-+func (c *Conn) Prepare(query string) (driver.Stmt, error) {
-+ return c.PrepareContext(context.Background(), query)
-+}
-+
-+// ExecContext is an optional interface that may be implemented by a Conn.
-+func (c *Conn) ExecContext(ctx context.Context, query string, args []driver.NamedValue) (driver.Result, error) {
-+ protocol.EncodeExecSQL(&c.request, uint64(c.id), query, args)
-+
-+ if err := c.protocol.Call(ctx, &c.request, &c.response); err != nil {
-+ return nil, driverError(c.log, err)
-+ }
-+
-+ result, err := protocol.DecodeResult(&c.response)
-+ if err != nil {
-+ return nil, driverError(c.log, err)
-+ }
-+
-+ if c.tracing != client.LogNone {
-+ c.log(c.tracing, "exec: %s", query)
-+ }
-+
-+ return &Result{result: result}, nil
-+}
-+
-+// Query is an optional interface that may be implemented by a Conn.
-+func (c *Conn) Query(query string, args []driver.Value) (driver.Rows, error) {
-+ return c.QueryContext(context.Background(), query, valuesToNamedValues(args))
-+}
-+
-+// QueryContext is an optional interface that may be implemented by a Conn.
-+func (c *Conn) QueryContext(ctx context.Context, query string, args []driver.NamedValue) (driver.Rows, error) {
-+ protocol.EncodeQuerySQL(&c.request, uint64(c.id), query, args)
-+
-+ if err := c.protocol.Call(ctx, &c.request, &c.response); err != nil {
-+ return nil, driverError(c.log, err)
-+ }
-+
-+ rows, err := protocol.DecodeRows(&c.response)
-+ if err != nil {
-+ return nil, driverError(c.log, err)
-+ }
-+
-+ if c.tracing != client.LogNone {
-+ c.log(c.tracing, "query: %s", query)
-+ }
-+
-+ return &Rows{
-+ ctx: ctx,
-+ request: &c.request,
-+ response: &c.response,
-+ protocol: c.protocol,
-+ rows: rows,
-+ log: c.log,
-+ }, nil
-+}
-+
-+// Exec is an optional interface that may be implemented by a Conn.
-+func (c *Conn) Exec(query string, args []driver.Value) (driver.Result, error) {
-+ return c.ExecContext(context.Background(), query, valuesToNamedValues(args))
-+}
-+
-+// Close invalidates and potentially stops any current prepared statements and
-+// transactions, marking this connection as no longer in use.
-+//
-+// Because the sql package maintains a free pool of connections and only calls
-+// Close when there's a surplus of idle connections, it shouldn't be necessary
-+// for drivers to do their own connection caching.
-+func (c *Conn) Close() error {
-+ return c.protocol.Close()
-+}
-+
-+// BeginTx starts and returns a new transaction. If the context is canceled by
-+// the user the sql package will call Tx.Rollback before discarding and closing
-+// the connection.
-+//
-+// This must check opts.Isolation to determine if there is a set isolation
-+// level. If the driver does not support a non-default level and one is set or
-+// if there is a non-default isolation level that is not supported, an error
-+// must be returned.
-+//
-+// This must also check opts.ReadOnly to determine if the read-only value is
-+// true to either set the read-only transaction property if supported or return
-+// an error if it is not supported.
-+func (c *Conn) BeginTx(ctx context.Context, opts driver.TxOptions) (driver.Tx, error) {
-+ if _, err := c.ExecContext(ctx, "BEGIN", nil); err != nil {
-+ return nil, err
-+ }
-+
-+ tx := &Tx{
-+ conn: c,
-+ log: c.log,
-+ }
-+
-+ return tx, nil
-+}
-+
-+// Begin starts and returns a new transaction.
-+//
-+// Deprecated: Drivers should implement ConnBeginTx instead (or additionally).
-+func (c *Conn) Begin() (driver.Tx, error) {
-+ ctx := context.Background()
-+
-+ if c.contextTimeout > 0 {
-+ var cancel func()
-+ ctx, cancel = context.WithTimeout(context.Background(), c.contextTimeout)
-+ defer cancel()
-+ }
-+
-+ return c.BeginTx(ctx, driver.TxOptions{})
-+}
-+
-+// Tx is a transaction.
-+type Tx struct {
-+ conn *Conn
-+ log client.LogFunc
-+}
-+
-+// Commit the transaction.
-+func (tx *Tx) Commit() error {
-+ ctx := context.Background()
-+
-+ if _, err := tx.conn.ExecContext(ctx, "COMMIT", nil); err != nil {
-+ return driverError(tx.log, err)
-+ }
-+
-+ return nil
-+}
-+
-+// Rollback the transaction.
-+func (tx *Tx) Rollback() error {
-+ ctx := context.Background()
-+
-+ if _, err := tx.conn.ExecContext(ctx, "ROLLBACK", nil); err != nil {
-+ return driverError(tx.log, err)
-+ }
-+
-+ return nil
-+}
-+
-+// Stmt is a prepared statement. It is bound to a Conn and not
-+// used by multiple goroutines concurrently.
-+type Stmt struct {
-+ protocol *protocol.Protocol
-+ request *protocol.Message
-+ response *protocol.Message
-+ db uint32
-+ id uint32
-+ params uint64
-+ log client.LogFunc
-+ sql string // Prepared SQL, only set when tracing
-+ tracing client.LogLevel
-+}
-+
-+// Close closes the statement.
-+func (s *Stmt) Close() error {
-+ protocol.EncodeFinalize(s.request, s.db, s.id)
-+
-+ ctx := context.Background()
-+
-+ if err := s.protocol.Call(ctx, s.request, s.response); err != nil {
-+ return driverError(s.log, err)
-+ }
-+
-+ if err := protocol.DecodeEmpty(s.response); err != nil {
-+ return driverError(s.log, err)
-+ }
-+
-+ return nil
-+}
-+
-+// NumInput returns the number of placeholder parameters.
-+func (s *Stmt) NumInput() int {
-+ return int(s.params)
-+}
-+
-+// ExecContext executes a query that doesn't return rows, such
-+// as an INSERT or UPDATE.
-+//
-+// ExecContext must honor the context timeout and return when it is canceled.
-+func (s *Stmt) ExecContext(ctx context.Context, args []driver.NamedValue) (driver.Result, error) {
-+ protocol.EncodeExec(s.request, s.db, s.id, args)
-+
-+ if err := s.protocol.Call(ctx, s.request, s.response); err != nil {
-+ return nil, driverError(s.log, err)
-+ }
-+
-+ result, err := protocol.DecodeResult(s.response)
-+ if err != nil {
-+ return nil, driverError(s.log, err)
-+ }
-+
-+ if s.tracing != client.LogNone {
-+ s.log(s.tracing, "exec prepared: %s", s.sql)
-+ }
-+
-+ return &Result{result: result}, nil
-+}
-+
-+// Exec executes a query that doesn't return rows, such
-+func (s *Stmt) Exec(args []driver.Value) (driver.Result, error) {
-+ return s.ExecContext(context.Background(), valuesToNamedValues(args))
-+}
-+
-+// QueryContext executes a query that may return rows, such as a
-+// SELECT.
-+//
-+// QueryContext must honor the context timeout and return when it is canceled.
-+func (s *Stmt) QueryContext(ctx context.Context, args []driver.NamedValue) (driver.Rows, error) {
-+ protocol.EncodeQuery(s.request, s.db, s.id, args)
-+
-+ if err := s.protocol.Call(ctx, s.request, s.response); err != nil {
-+ return nil, driverError(s.log, err)
-+ }
-+
-+ rows, err := protocol.DecodeRows(s.response)
-+ if err != nil {
-+ return nil, driverError(s.log, err)
-+ }
-+
-+ if s.tracing != client.LogNone {
-+ s.log(s.tracing, "query prepared: %s", s.sql)
-+ }
-+
-+ return &Rows{ctx: ctx, request: s.request, response: s.response, protocol: s.protocol, rows: rows}, nil
-+}
-+
-+// Query executes a query that may return rows, such as a
-+func (s *Stmt) Query(args []driver.Value) (driver.Rows, error) {
-+ return s.QueryContext(context.Background(), valuesToNamedValues(args))
-+}
-+
-+// Result is the result of a query execution.
-+type Result struct {
-+ result protocol.Result
-+}
-+
-+// LastInsertId returns the database's auto-generated ID
-+// after, for example, an INSERT into a table with primary
-+// key.
-+func (r *Result) LastInsertId() (int64, error) {
-+ return int64(r.result.LastInsertID), nil
-+}
-+
-+// RowsAffected returns the number of rows affected by the
-+// query.
-+func (r *Result) RowsAffected() (int64, error) {
-+ return int64(r.result.RowsAffected), nil
-+}
-+
-+// Rows is an iterator over an executed query's results.
-+type Rows struct {
-+ ctx context.Context
-+ protocol *protocol.Protocol
-+ request *protocol.Message
-+ response *protocol.Message
-+ rows protocol.Rows
-+ consumed bool
-+ types []string
-+ log client.LogFunc
-+}
-+
-+// Columns returns the names of the columns. The number of
-+// columns of the result is inferred from the length of the
-+// slice. If a particular column name isn't known, an empty
-+// string should be returned for that entry.
-+func (r *Rows) Columns() []string {
-+ return r.rows.Columns
-+}
-+
-+// Close closes the rows iterator.
-+func (r *Rows) Close() error {
-+ err := r.rows.Close()
-+
-+ // If we consumed the whole result set, there's nothing to do as
-+ // there's no pending response from the server.
-+ if r.consumed {
-+ return nil
-+ }
-+
-+ // If there is was a single-response result set, we're done.
-+ if err == io.EOF {
-+ return nil
-+ }
-+
-+ // Let's issue an interrupt request and wait until we get an empty
-+ // response, signalling that the query was interrupted.
-+ if err := r.protocol.Interrupt(r.ctx, r.request, r.response); err != nil {
-+ return driverError(r.log, err)
-+ }
-+
-+ return nil
-+}
-+
-+// Next is called to populate the next row of data into
-+// the provided slice. The provided slice will be the same
-+// size as the Columns() are wide.
-+//
-+// Next should return io.EOF when there are no more rows.
-+func (r *Rows) Next(dest []driver.Value) error {
-+ err := r.rows.Next(dest)
-+
-+ if err == protocol.ErrRowsPart {
-+ r.rows.Close()
-+ if err := r.protocol.More(r.ctx, r.response); err != nil {
-+ return driverError(r.log, err)
-+ }
-+ rows, err := protocol.DecodeRows(r.response)
-+ if err != nil {
-+ return driverError(r.log, err)
-+ }
-+ r.rows = rows
-+ return r.rows.Next(dest)
-+ }
-+
-+ if err == io.EOF {
-+ r.consumed = true
-+ }
-+
-+ return err
-+}
-+
-+// ColumnTypeScanType implements RowsColumnTypeScanType.
-+func (r *Rows) ColumnTypeScanType(i int) reflect.Type {
-+ // column := sql.NewColumn(r.rows, i)
-+
-+ // typ, err := r.protocol.ColumnTypeScanType(context.Background(), column)
-+ // if err != nil {
-+ // return nil
-+ // }
-+
-+ // return typ.DriverType()
-+ return nil
-+}
-+
-+// ColumnTypeDatabaseTypeName implements RowsColumnTypeDatabaseTypeName.
-+// warning: not thread safe
-+func (r *Rows) ColumnTypeDatabaseTypeName(i int) string {
-+ if r.types == nil {
-+ var err error
-+ r.types, err = r.rows.ColumnTypes()
-+ // an error might not matter if we get our types
-+ if err != nil && i >= len(r.types) {
-+ // a panic here doesn't really help,
-+ // as an empty column type is not the end of the world
-+ // but we should still inform the user of the failure
-+ const msg = "row (%p) error returning column #%d type: %v\n"
-+ r.log(client.LogWarn, msg, r, i, err)
-+ return ""
-+ }
-+ }
-+ return r.types[i]
-+}
-+
-+// Convert a driver.Value slice into a driver.NamedValue slice.
-+func valuesToNamedValues(args []driver.Value) []driver.NamedValue {
-+ namedValues := make([]driver.NamedValue, len(args))
-+ for i, value := range args {
-+ namedValues[i] = driver.NamedValue{
-+ Ordinal: i + 1,
-+ Value: value,
-+ }
-+ }
-+ return namedValues
-+}
-+
-+type unwrappable interface {
-+ Unwrap() error
-+}
-+
-+func driverError(log client.LogFunc, err error) error {
-+ switch err := errors.Cause(err).(type) {
-+ case syscall.Errno:
-+ log(client.LogDebug, "network connection lost: %v", err)
-+ return driver.ErrBadConn
-+ case *net.OpError:
-+ log(client.LogDebug, "network connection lost: %v", err)
-+ return driver.ErrBadConn
-+ case protocol.ErrRequest:
-+ switch err.Code {
-+ case errIoErrNotLeaderLegacy:
-+ fallthrough
-+ case errIoErrLeadershipLostLegacy:
-+ fallthrough
-+ case errIoErrNotLeader:
-+ fallthrough
-+ case errIoErrLeadershipLost:
-+ log(client.LogDebug, "leadership lost (%d - %s)", err.Code, err.Description)
-+ return driver.ErrBadConn
-+ default:
-+ // FIXME: the server side sometimes return SQLITE_OK
-+ // even in case of errors. This issue is still being
-+ // investigated, but for now let's just mark this
-+ // connection as bad so the client will retry.
-+ if err.Code == 0 {
-+ log(client.LogWarn, "unexpected error code (%d - %s)", err.Code, err.Description)
-+ return driver.ErrBadConn
-+ }
-+ return Error{
-+ Code: int(err.Code),
-+ Message: err.Description,
-+ }
-+ }
-+ default:
-+ // When using a TLS connection, the underlying error might get
-+ // wrapped by the stdlib itself with the new errors wrapping
-+ // conventions available since go 1.13. In that case we check
-+ // the underlying error with Unwrap() instead of Cause().
-+ if root, ok := err.(unwrappable); ok {
-+ err = root.Unwrap()
-+ }
-+ switch err.(type) {
-+ case *net.OpError:
-+ log(client.LogDebug, "network connection lost: %v", err)
-+ return driver.ErrBadConn
-+ }
-+ }
-+ return err
-+}
-diff --git a/vendor/github.com/canonical/go-dqlite/driver/driver_test.go b/vendor/github.com/canonical/go-dqlite/driver/driver_test.go
-new file mode 100644
-index 00000000000..500f4136a80
---- /dev/null
-+++ b/vendor/github.com/canonical/go-dqlite/driver/driver_test.go
-@@ -0,0 +1,825 @@
-+// Copyright 2017 Canonical Ltd.
-+//
-+// Licensed under the Apache License, Version 2.0 (the "License");
-+// you may not use this file except in compliance with the License.
-+// You may obtain a copy of the License at
-+//
-+// http://www.apache.org/licenses/LICENSE-2.0
-+//
-+// Unless required by applicable law or agreed to in writing, software
-+// distributed under the License is distributed on an "AS IS" BASIS,
-+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-+// See the License for the specific language governing permissions and
-+// limitations under the License.
-+
-+package driver_test
-+
-+import (
-+ "context"
-+ "database/sql/driver"
-+ "io"
-+ "io/ioutil"
-+ "os"
-+ "testing"
-+
-+ dqlite "github.com/canonical/go-dqlite"
-+ "github.com/canonical/go-dqlite/client"
-+ dqlitedriver "github.com/canonical/go-dqlite/driver"
-+ "github.com/canonical/go-dqlite/internal/logging"
-+ "github.com/stretchr/testify/assert"
-+ "github.com/stretchr/testify/require"
-+)
-+
-+func TestDriver_Open(t *testing.T) {
-+ driver, cleanup := newDriver(t)
-+ defer cleanup()
-+
-+ conn, err := driver.Open("test.db")
-+ require.NoError(t, err)
-+
-+ assert.NoError(t, conn.Close())
-+}
-+
-+func TestDriver_Prepare(t *testing.T) {
-+ driver, cleanup := newDriver(t)
-+ defer cleanup()
-+
-+ conn, err := driver.Open("test.db")
-+ require.NoError(t, err)
-+
-+ stmt, err := conn.Prepare("CREATE TABLE test (n INT)")
-+ require.NoError(t, err)
-+
-+ assert.Equal(t, 0, stmt.NumInput())
-+
-+ assert.NoError(t, conn.Close())
-+}
-+
-+func TestConn_Exec(t *testing.T) {
-+ drv, cleanup := newDriver(t)
-+ defer cleanup()
-+
-+ conn, err := drv.Open("test.db")
-+ require.NoError(t, err)
-+
-+ _, err = conn.Begin()
-+ require.NoError(t, err)
-+
-+ execer := conn.(driver.Execer)
-+
-+ _, err = execer.Exec("CREATE TABLE test (n INT)", nil)
-+ require.NoError(t, err)
-+
-+ result, err := execer.Exec("INSERT INTO test(n) VALUES(1)", nil)
-+ require.NoError(t, err)
-+
-+ lastInsertID, err := result.LastInsertId()
-+ require.NoError(t, err)
-+
-+ assert.Equal(t, lastInsertID, int64(1))
-+
-+ rowsAffected, err := result.RowsAffected()
-+ require.NoError(t, err)
-+
-+ assert.Equal(t, rowsAffected, int64(1))
-+
-+ assert.NoError(t, conn.Close())
-+}
-+
-+func TestConn_Query(t *testing.T) {
-+ drv, cleanup := newDriver(t)
-+ defer cleanup()
-+
-+ conn, err := drv.Open("test.db")
-+ require.NoError(t, err)
-+
-+ _, err = conn.Begin()
-+ require.NoError(t, err)
-+
-+ execer := conn.(driver.Execer)
-+
-+ _, err = execer.Exec("CREATE TABLE test (n INT)", nil)
-+ require.NoError(t, err)
-+
-+ _, err = execer.Exec("INSERT INTO test(n) VALUES(1)", nil)
-+ require.NoError(t, err)
-+
-+ queryer := conn.(driver.Queryer)
-+
-+ _, err = queryer.Query("SELECT n FROM test", nil)
-+ require.NoError(t, err)
-+
-+ assert.NoError(t, conn.Close())
-+}
-+
-+func TestConn_QueryRow(t *testing.T) {
-+ drv, cleanup := newDriver(t)
-+ defer cleanup()
-+
-+ conn, err := drv.Open("test.db")
-+ require.NoError(t, err)
-+
-+ _, err = conn.Begin()
-+ require.NoError(t, err)
-+
-+ execer := conn.(driver.Execer)
-+
-+ _, err = execer.Exec("CREATE TABLE test (n INT)", nil)
-+ require.NoError(t, err)
-+
-+ _, err = execer.Exec("INSERT INTO test(n) VALUES(1)", nil)
-+ require.NoError(t, err)
-+
-+ _, err = execer.Exec("INSERT INTO test(n) VALUES(1)", nil)
-+ require.NoError(t, err)
-+
-+ queryer := conn.(driver.Queryer)
-+
-+ rows, err := queryer.Query("SELECT n FROM test", nil)
-+ require.NoError(t, err)
-+
-+ values := make([]driver.Value, 1)
-+ require.NoError(t, rows.Next(values))
-+
-+ require.NoError(t, rows.Close())
-+
-+ assert.NoError(t, conn.Close())
-+}
-+
-+func TestConn_QueryBlob(t *testing.T) {
-+ drv, cleanup := newDriver(t)
-+ defer cleanup()
-+
-+ conn, err := drv.Open("test.db")
-+ require.NoError(t, err)
-+
-+ _, err = conn.Begin()
-+ require.NoError(t, err)
-+
-+ execer := conn.(driver.Execer)
-+
-+ _, err = execer.Exec("CREATE TABLE test (data BLOB)", nil)
-+ require.NoError(t, err)
-+
-+ values := []driver.Value{
-+ []byte{'a', 'b', 'c'},
-+ }
-+ _, err = execer.Exec("INSERT INTO test(data) VALUES(?)", values)
-+ require.NoError(t, err)
-+
-+ queryer := conn.(driver.Queryer)
-+
-+ rows, err := queryer.Query("SELECT data FROM test", nil)
-+ require.NoError(t, err)
-+
-+ assert.Equal(t, rows.Columns(), []string{"data"})
-+
-+ values = make([]driver.Value, 1)
-+ require.NoError(t, rows.Next(values))
-+
-+ assert.Equal(t, []byte{'a', 'b', 'c'}, values[0])
-+
-+ assert.NoError(t, conn.Close())
-+}
-+
-+func TestStmt_Exec(t *testing.T) {
-+ drv, cleanup := newDriver(t)
-+ defer cleanup()
-+
-+ conn, err := drv.Open("test.db")
-+ require.NoError(t, err)
-+
-+ stmt, err := conn.Prepare("CREATE TABLE test (n INT)")
-+ require.NoError(t, err)
-+
-+ _, err = conn.Begin()
-+ require.NoError(t, err)
-+
-+ _, err = stmt.Exec(nil)
-+ require.NoError(t, err)
-+
-+ require.NoError(t, stmt.Close())
-+
-+ values := []driver.Value{
-+ int64(1),
-+ }
-+
-+ stmt, err = conn.Prepare("INSERT INTO test(n) VALUES(?)")
-+ require.NoError(t, err)
-+
-+ result, err := stmt.Exec(values)
-+ require.NoError(t, err)
-+
-+ lastInsertID, err := result.LastInsertId()
-+ require.NoError(t, err)
-+
-+ assert.Equal(t, lastInsertID, int64(1))
-+
-+ rowsAffected, err := result.RowsAffected()
-+ require.NoError(t, err)
-+
-+ assert.Equal(t, rowsAffected, int64(1))
-+
-+ require.NoError(t, stmt.Close())
-+
-+ assert.NoError(t, conn.Close())
-+}
-+
-+func TestStmt_Query(t *testing.T) {
-+ drv, cleanup := newDriver(t)
-+ defer cleanup()
-+
-+ conn, err := drv.Open("test.db")
-+ require.NoError(t, err)
-+
-+ stmt, err := conn.Prepare("CREATE TABLE test (n INT)")
-+ require.NoError(t, err)
-+
-+ _, err = conn.Begin()
-+ require.NoError(t, err)
-+
-+ _, err = stmt.Exec(nil)
-+ require.NoError(t, err)
-+
-+ require.NoError(t, stmt.Close())
-+
-+ stmt, err = conn.Prepare("INSERT INTO test(n) VALUES(-123)")
-+ require.NoError(t, err)
-+
-+ _, err = stmt.Exec(nil)
-+ require.NoError(t, err)
-+
-+ require.NoError(t, stmt.Close())
-+
-+ stmt, err = conn.Prepare("SELECT n FROM test")
-+ require.NoError(t, err)
-+
-+ rows, err := stmt.Query(nil)
-+ require.NoError(t, err)
-+
-+ assert.Equal(t, rows.Columns(), []string{"n"})
-+
-+ values := make([]driver.Value, 1)
-+ require.NoError(t, rows.Next(values))
-+
-+ assert.Equal(t, int64(-123), values[0])
-+
-+ require.Equal(t, io.EOF, rows.Next(values))
-+
-+ require.NoError(t, stmt.Close())
-+
-+ assert.NoError(t, conn.Close())
-+}
-+
-+func TestConn_QueryParams(t *testing.T) {
-+ drv, cleanup := newDriver(t)
-+ defer cleanup()
-+
-+ conn, err := drv.Open("test.db")
-+ require.NoError(t, err)
-+
-+ _, err = conn.Begin()
-+ require.NoError(t, err)
-+
-+ execer := conn.(driver.Execer)
-+
-+ _, err = execer.Exec("CREATE TABLE test (n INT, t TEXT)", nil)
-+ require.NoError(t, err)
-+
-+ _, err = execer.Exec(`
-+INSERT INTO test (n,t) VALUES (1,'a');
-+INSERT INTO test (n,t) VALUES (2,'a');
-+INSERT INTO test (n,t) VALUES (2,'b');
-+INSERT INTO test (n,t) VALUES (3,'b');
-+`,
-+ nil)
-+ require.NoError(t, err)
-+
-+ values := []driver.Value{
-+ int64(1),
-+ "a",
-+ }
-+
-+ queryer := conn.(driver.Queryer)
-+
-+ rows, err := queryer.Query("SELECT n, t FROM test WHERE n > ? AND t = ?", values)
-+ require.NoError(t, err)
-+
-+ assert.Equal(t, rows.Columns()[0], "n")
-+
-+ values = make([]driver.Value, 2)
-+ require.NoError(t, rows.Next(values))
-+
-+ assert.Equal(t, int64(2), values[0])
-+ assert.Equal(t, "a", values[1])
-+
-+ require.Equal(t, io.EOF, rows.Next(values))
-+
-+ assert.NoError(t, conn.Close())
-+}
-+
-+func Test_ColumnTypesEmpty(t *testing.T) {
-+ t.Skip("this currently fails if the result set is empty, is dqlite skipping the header if empty set?")
-+ drv, cleanup := newDriver(t)
-+ defer cleanup()
-+
-+ conn, err := drv.Open("test.db")
-+ require.NoError(t, err)
-+
-+ stmt, err := conn.Prepare("CREATE TABLE test (n INT)")
-+ require.NoError(t, err)
-+
-+ _, err = conn.Begin()
-+ require.NoError(t, err)
-+
-+ _, err = stmt.Exec(nil)
-+ require.NoError(t, err)
-+
-+ require.NoError(t, stmt.Close())
-+
-+ stmt, err = conn.Prepare("SELECT n FROM test")
-+ require.NoError(t, err)
-+
-+ rows, err := stmt.Query(nil)
-+ require.NoError(t, err)
-+
-+ require.NoError(t, err)
-+ rowTypes, ok := rows.(driver.RowsColumnTypeDatabaseTypeName)
-+ require.True(t, ok)
-+
-+ typeName := rowTypes.ColumnTypeDatabaseTypeName(0)
-+ assert.Equal(t, "INTEGER", typeName)
-+
-+ require.NoError(t, stmt.Close())
-+
-+ assert.NoError(t, conn.Close())
-+}
-+
-+func Test_ColumnTypesExists(t *testing.T) {
-+ drv, cleanup := newDriver(t)
-+ defer cleanup()
-+
-+ conn, err := drv.Open("test.db")
-+ require.NoError(t, err)
-+
-+ stmt, err := conn.Prepare("CREATE TABLE test (n INT)")
-+ require.NoError(t, err)
-+
-+ _, err = conn.Begin()
-+ require.NoError(t, err)
-+
-+ _, err = stmt.Exec(nil)
-+ require.NoError(t, err)
-+
-+ require.NoError(t, stmt.Close())
-+
-+ stmt, err = conn.Prepare("INSERT INTO test(n) VALUES(-123)")
-+ require.NoError(t, err)
-+
-+ _, err = stmt.Exec(nil)
-+ require.NoError(t, err)
-+
-+ stmt, err = conn.Prepare("SELECT n FROM test")
-+ require.NoError(t, err)
-+
-+ rows, err := stmt.Query(nil)
-+ require.NoError(t, err)
-+
-+ require.NoError(t, err)
-+ rowTypes, ok := rows.(driver.RowsColumnTypeDatabaseTypeName)
-+ require.True(t, ok)
-+
-+ typeName := rowTypes.ColumnTypeDatabaseTypeName(0)
-+ assert.Equal(t, "INTEGER", typeName)
-+
-+ require.NoError(t, stmt.Close())
-+ assert.NoError(t, conn.Close())
-+}
-+
-+// ensure column types data is available
-+// even after the last row of the query
-+func Test_ColumnTypesEnd(t *testing.T) {
-+ drv, cleanup := newDriver(t)
-+ defer cleanup()
-+
-+ conn, err := drv.Open("test.db")
-+ require.NoError(t, err)
-+
-+ stmt, err := conn.Prepare("CREATE TABLE test (n INT)")
-+ require.NoError(t, err)
-+
-+ _, err = conn.Begin()
-+ require.NoError(t, err)
-+
-+ _, err = stmt.Exec(nil)
-+ require.NoError(t, err)
-+
-+ require.NoError(t, stmt.Close())
-+
-+ stmt, err = conn.Prepare("INSERT INTO test(n) VALUES(-123)")
-+ require.NoError(t, err)
-+
-+ _, err = stmt.Exec(nil)
-+ require.NoError(t, err)
-+
-+ stmt, err = conn.Prepare("SELECT n FROM test")
-+ require.NoError(t, err)
-+
-+ rows, err := stmt.Query(nil)
-+ require.NoError(t, err)
-+
-+ require.NoError(t, err)
-+ rowTypes, ok := rows.(driver.RowsColumnTypeDatabaseTypeName)
-+ require.True(t, ok)
-+
-+ typeName := rowTypes.ColumnTypeDatabaseTypeName(0)
-+ assert.Equal(t, "INTEGER", typeName)
-+
-+ values := make([]driver.Value, 1)
-+ require.NoError(t, rows.Next(values))
-+
-+ assert.Equal(t, int64(-123), values[0])
-+
-+ require.Equal(t, io.EOF, rows.Next(values))
-+
-+ // despite EOF we should have types cached
-+ typeName = rowTypes.ColumnTypeDatabaseTypeName(0)
-+ assert.Equal(t, "INTEGER", typeName)
-+
-+ require.NoError(t, stmt.Close())
-+ assert.NoError(t, conn.Close())
-+}
-+
-+func newDriver(t *testing.T) (*dqlitedriver.Driver, func()) {
-+ t.Helper()
-+
-+ _, cleanup := newNode(t)
-+
-+ store := newStore(t, "@1")
-+
-+ log := logging.Test(t)
-+
-+ driver, err := dqlitedriver.New(store, dqlitedriver.WithLogFunc(log))
-+ require.NoError(t, err)
-+
-+ return driver, cleanup
-+}
-+
-+// Create a new in-memory server store populated with the given addresses.
-+func newStore(t *testing.T, address string) client.NodeStore {
-+ t.Helper()
-+
-+ store, err := client.DefaultNodeStore(":memory:")
-+ require.NoError(t, err)
-+
-+ server := client.NodeInfo{Address: address}
-+ require.NoError(t, store.Set(context.Background(), []client.NodeInfo{server}))
-+
-+ return store
-+}
-+
-+func newNode(t *testing.T) (*dqlite.Node, func()) {
-+ t.Helper()
-+ dir, dirCleanup := newDir(t)
-+
-+ server, err := dqlite.New(uint64(1), "@1", dir, dqlite.WithBindAddress("@1"))
-+ require.NoError(t, err)
-+
-+ err = server.Start()
-+ require.NoError(t, err)
-+
-+ cleanup := func() {
-+ require.NoError(t, server.Close())
-+ dirCleanup()
-+ }
-+
-+ return server, cleanup
-+}
-+
-+// Return a new temporary directory.
-+func newDir(t *testing.T) (string, func()) {
-+ t.Helper()
-+
-+ dir, err := ioutil.TempDir("", "dqlite-replication-test-")
-+ assert.NoError(t, err)
-+
-+ cleanup := func() {
-+ _, err := os.Stat(dir)
-+ if err != nil {
-+ assert.True(t, os.IsNotExist(err))
-+ } else {
-+ assert.NoError(t, os.RemoveAll(dir))
-+ }
-+ }
-+
-+ return dir, cleanup
-+}
-+
-+/*
-+import (
-+ "bytes"
-+ "fmt"
-+ "io/ioutil"
-+ "log"
-+ "os"
-+ "path/filepath"
-+ "testing"
-+
-+ "github.com/canonical/go-dqlite"
-+ "github.com/CanonicalLtd/go-sqlite3"
-+ "github.com/CanonicalLtd/raft-test"
-+ "github.com/hashicorp/raft"
-+ "github.com/mpvl/subtest"
-+ "github.com/stretchr/testify/assert"
-+ "github.com/stretchr/testify/require"
-+)
-+
-+// Using invalid paths in Config.Dir results in an error.
-+func TestNewDriver_DirErrors(t *testing.T) {
-+ cases := []struct {
-+ title string
-+ dir string // Dir to pass to the new driver.
-+ error string // Expected message
-+ }{
-+ {
-+ `no path given at all`,
-+ "",
-+ "no data dir provided in config",
-+ },
-+ {
-+ `non-existing path that can't be created`,
-+ "/cant/create/anything/here/",
-+ "failed to create data dir",
-+ },
-+ {
-+ `path that can't be accessed`,
-+ "/proc/1/root/",
-+ "failed to access data dir",
-+ },
-+ {
-+ `path that is not a directory`,
-+ "/etc/fstab",
-+ "data dir '/etc/fstab' is not a directory",
-+ },
-+ }
-+ for _, c := range cases {
-+ subtest.Run(t, c.title, func(t *testing.T) {
-+ registry := dqlite.NewRegistry(c.dir)
-+ driver, err := dqlite.NewDriver(registry, nil, dqlite.DriverConfig{})
-+ assert.Nil(t, driver)
-+ require.Error(t, err)
-+ assert.Contains(t, err.Error(), c.error)
-+ })
-+ }
-+}
-+
-+func TestNewDriver_CreateDir(t *testing.T) {
-+ dir, cleanup := newDir(t)
-+ defer cleanup()
-+
-+ dir = filepath.Join(dir, "does", "not", "exist")
-+ registry := dqlite.NewRegistry(dir)
-+ _, err := dqlite.NewDriver(registry, &raft.Raft{}, dqlite.DriverConfig{})
-+ assert.NoError(t, err)
-+}
-+
-+func DISABLE_TestDriver_SQLiteLogging(t *testing.T) {
-+ output := bytes.NewBuffer(nil)
-+ logger := log.New(output, "", 0)
-+ config := dqlite.DriverConfig{Logger: logger}
-+
-+ driver, cleanup := newDriverWithConfig(t, config)
-+ defer cleanup()
-+
-+ conn, err := driver.Open("test.db")
-+ require.NoError(t, err)
-+
-+ _, err = conn.Prepare("CREATE FOO")
-+ require.Error(t, err)
-+ assert.Contains(t, output.String(), `[ERR] near "FOO": syntax error (1)`)
-+}
-+
-+func TestDriver_OpenClose(t *testing.T) {
-+ driver, cleanup := newDriver(t)
-+ defer cleanup()
-+
-+ conn, err := driver.Open("test.db")
-+ require.NoError(t, err)
-+ assert.NoError(t, conn.Close())
-+}
-+
-+func TestDriver_OpenInvalidURI(t *testing.T) {
-+ driver, cleanup := newDriver(t)
-+ defer cleanup()
-+
-+ conn, err := driver.Open("/foo/test.db")
-+ assert.Nil(t, conn)
-+ assert.EqualError(t, err, "invalid URI /foo/test.db: directory segments are invalid")
-+}
-+
-+func TestDriver_OpenError(t *testing.T) {
-+ dir, cleanup := newDir(t)
-+ defer cleanup()
-+
-+ registry := dqlite.NewRegistry(dir)
-+ fsm := dqlite.NewFSM(registry)
-+ raft, cleanup := rafttest.Node(t, fsm)
-+ defer cleanup()
-+ config := dqlite.DriverConfig{}
-+
-+ driver, err := dqlite.NewDriver(registry, raft, config)
-+ require.NoError(t, err)
-+ require.NoError(t, os.RemoveAll(dir))
-+
-+ conn, err := driver.Open("test.db")
-+ assert.Nil(t, conn)
-+
-+ expected := fmt.Sprintf("open error for %s: unable to open database file", filepath.Join(dir, "test.db"))
-+ assert.EqualError(t, err, expected)
-+}
-+
-+// If the driver is not the current leader, all APIs return an error.
-+func TestDriver_NotLeader_Errors(t *testing.T) {
-+ cases := []struct {
-+ title string
-+ f func(*testing.T, *dqlite.Conn) error
-+ }{
-+ {
-+ `open`,
-+ func(t *testing.T, conn *dqlite.Conn) error {
-+ _, err := conn.Prepare("CREATE TABLE foo (n INT)")
-+ return err
-+ },
-+ },
-+ {
-+ `exec`,
-+ func(t *testing.T, conn *dqlite.Conn) error {
-+ _, err := conn.Exec("CREATE TABLE foo (n INT)", nil)
-+ return err
-+ },
-+ },
-+ {
-+ `begin`,
-+ func(t *testing.T, conn *dqlite.Conn) error {
-+ _, err := conn.Begin()
-+ return err
-+ },
-+ },
-+ }
-+
-+ for _, c := range cases {
-+ t.Run(c.title, func(t *testing.T) {
-+ dir, cleanup := newDir(t)
-+ defer cleanup()
-+
-+ registry1 := dqlite.NewRegistry(dir)
-+ registry2 := dqlite.NewRegistry(dir)
-+ fsm1 := dqlite.NewFSM(registry1)
-+ fsm2 := dqlite.NewFSM(registry2)
-+ rafts, control := rafttest.Cluster(t, []raft.FSM{fsm1, fsm2}, rafttest.Latency(1000.0))
-+ defer control.Close()
-+
-+ config := dqlite.DriverConfig{}
-+
-+ driver, err := dqlite.NewDriver(registry1, rafts["0"], config)
-+ require.NoError(t, err)
-+
-+ conn, err := driver.Open("test.db")
-+ require.NoError(t, err)
-+
-+ err = c.f(t, conn.(*dqlite.Conn))
-+ require.Error(t, err)
-+ erri, ok := err.(sqlite3.Error)
-+ require.True(t, ok)
-+ assert.Equal(t, sqlite3.ErrIoErrNotLeader, erri.ExtendedCode)
-+ })
-+ }
-+}
-+
-+// Return the address of the current raft leader.
-+func TestDriver_Leader(t *testing.T) {
-+ driver, cleanup := newDriver(t)
-+ defer cleanup()
-+
-+ assert.Equal(t, "0", driver.Leader())
-+}
-+
-+// Return the addresses of all current raft servers.
-+func TestDriver_Nodes(t *testing.T) {
-+ driver, cleanup := newDriver(t)
-+ defer cleanup()
-+
-+ servers, err := driver.Nodes()
-+ require.NoError(t, err)
-+ assert.Equal(t, []string{"0"}, servers)
-+}
-+
-+func TestStmt_Exec(t *testing.T) {
-+ driver, cleanup := newDriver(t)
-+ defer cleanup()
-+
-+ conn, err := driver.Open("test.db")
-+ require.NoError(t, err)
-+ defer conn.Close()
-+
-+ stmt, err := conn.Prepare("CREATE TABLE foo (n INT)")
-+ require.NoError(t, err)
-+ _, err = stmt.Exec(nil)
-+ assert.NoError(t, err)
-+}
-+
-+func TestStmt_Query(t *testing.T) {
-+ driver, cleanup := newDriver(t)
-+ defer cleanup()
-+
-+ conn, err := driver.Open("test.db")
-+ require.NoError(t, err)
-+ defer conn.Close()
-+
-+ stmt, err := conn.Prepare("SELECT name FROM sqlite_master")
-+ require.NoError(t, err)
-+ assert.Equal(t, 0, stmt.NumInput())
-+ rows, err := stmt.Query(nil)
-+ assert.NoError(t, err)
-+ defer rows.Close()
-+
-+}
-+
-+func TestTx_Commit(t *testing.T) {
-+ driver, cleanup := newDriver(t)
-+ defer cleanup()
-+
-+ conn, err := driver.Open("test.db")
-+ require.NoError(t, err)
-+ defer conn.Close()
-+
-+ tx, err := conn.Begin()
-+ require.NoError(t, err)
-+
-+ _, err = conn.(*dqlite.Conn).Exec("CREATE TABLE test (n INT)", nil)
-+ require.NoError(t, err)
-+
-+ assert.NoError(t, tx.Commit())
-+
-+ // The transaction ID has been saved in the committed buffer.
-+ token := tx.(*dqlite.Tx).Token()
-+ assert.Equal(t, uint64(5), token)
-+ assert.NoError(t, driver.Recover(token))
-+}
-+
-+func TestTx_Rollback(t *testing.T) {
-+ driver, cleanup := newDriver(t)
-+ defer cleanup()
-+
-+ conn, err := driver.Open("test.db")
-+ require.NoError(t, err)
-+ defer conn.Close()
-+
-+ tx, err := conn.Begin()
-+ require.NoError(t, err)
-+ assert.NoError(t, tx.Rollback())
-+}
-+
-+// Create a new test dqlite.Driver.
-+func newDriver(t *testing.T) (*dqlite.Driver, func()) {
-+ config := dqlite.DriverConfig{Logger: newTestingLogger(t, 0)}
-+ return newDriverWithConfig(t, config)
-+}
-+
-+// Create a new test dqlite.Driver with custom configuration.
-+func newDriverWithConfig(t *testing.T, config dqlite.DriverConfig) (*dqlite.Driver, func()) {
-+ dir, dirCleanup := newDir(t)
-+
-+ registry := dqlite.NewRegistry(dir)
-+ fsm := dqlite.NewFSM(registry)
-+ raft, raftCleanup := rafttest.Node(t, fsm)
-+
-+ driver, err := dqlite.NewDriver(registry, raft, config)
-+ require.NoError(t, err)
-+
-+ cleanup := func() {
-+ raftCleanup()
-+ dirCleanup()
-+ }
-+
-+ return driver, cleanup
-+}
-+
-+// Create a new test directory and return it, along with a function that can be
-+// used to remove it.
-+func newDir(t *testing.T) (string, func()) {
-+ dir, err := ioutil.TempDir("", "dqlite-driver-test-")
-+ if err != nil {
-+ t.Fatalf("failed to create temp dir: %v", err)
-+ }
-+ cleanup := func() {
-+ _, err := os.Stat(dir)
-+ if err != nil {
-+ assert.True(t, os.IsNotExist(err))
-+ } else {
-+ assert.NoError(t, os.RemoveAll(dir))
-+ }
-+ }
-+ return dir, cleanup
-+}
-+*/
-diff --git a/vendor/github.com/canonical/go-dqlite/driver/integration_test.go b/vendor/github.com/canonical/go-dqlite/driver/integration_test.go
-new file mode 100644
-index 00000000000..ecc07dafd1d
---- /dev/null
-+++ b/vendor/github.com/canonical/go-dqlite/driver/integration_test.go
-@@ -0,0 +1,470 @@
-+package driver_test
-+
-+import (
-+ "context"
-+ "database/sql"
-+ "fmt"
-+ "os"
-+ "testing"
-+ "time"
-+
-+ dqlite "github.com/canonical/go-dqlite"
-+ "github.com/canonical/go-dqlite/client"
-+ "github.com/canonical/go-dqlite/driver"
-+ "github.com/canonical/go-dqlite/internal/logging"
-+ "github.com/mattn/go-sqlite3"
-+ "github.com/stretchr/testify/assert"
-+ "github.com/stretchr/testify/require"
-+)
-+
-+func TestIntegration_DatabaseSQL(t *testing.T) {
-+ db, _, cleanup := newDB(t, 3)
-+ defer cleanup()
-+
-+ tx, err := db.Begin()
-+ require.NoError(t, err)
-+
-+ _, err = tx.Exec(`
-+CREATE TABLE test (n INT, s TEXT);
-+CREATE TABLE test2 (n INT, t DATETIME DEFAULT CURRENT_TIMESTAMP)
-+`)
-+ require.NoError(t, err)
-+
-+ stmt, err := tx.Prepare("INSERT INTO test(n, s) VALUES(?, ?)")
-+ require.NoError(t, err)
-+
-+ _, err = stmt.Exec(int64(123), "hello")
-+ require.NoError(t, err)
-+
-+ require.NoError(t, stmt.Close())
-+
-+ _, err = tx.Exec("INSERT INTO test2(n) VALUES(?)", int64(456))
-+ require.NoError(t, err)
-+
-+ require.NoError(t, tx.Commit())
-+
-+ tx, err = db.Begin()
-+ require.NoError(t, err)
-+
-+ rows, err := tx.Query("SELECT n, s FROM test")
-+ require.NoError(t, err)
-+
-+ for rows.Next() {
-+ var n int64
-+ var s string
-+
-+ require.NoError(t, rows.Scan(&n, &s))
-+
-+ assert.Equal(t, int64(123), n)
-+ assert.Equal(t, "hello", s)
-+ }
-+
-+ require.NoError(t, rows.Err())
-+ require.NoError(t, rows.Close())
-+
-+ rows, err = tx.Query("SELECT n, t FROM test2")
-+ require.NoError(t, err)
-+
-+ for rows.Next() {
-+ var n int64
-+ var s time.Time
-+
-+ require.NoError(t, rows.Scan(&n, &s))
-+
-+ assert.Equal(t, int64(456), n)
-+ }
-+
-+ require.NoError(t, rows.Err())
-+ require.NoError(t, rows.Close())
-+
-+ require.NoError(t, tx.Rollback())
-+}
-+
-+func TestIntegration_ConstraintError(t *testing.T) {
-+ db, _, cleanup := newDB(t, 3)
-+ defer cleanup()
-+
-+ _, err := db.Exec("CREATE TABLE test (n INT, UNIQUE (n))")
-+ require.NoError(t, err)
-+
-+ _, err = db.Exec("INSERT INTO test (n) VALUES (1)")
-+ require.NoError(t, err)
-+
-+ _, err = db.Exec("INSERT INTO test (n) VALUES (1)")
-+ if err, ok := err.(driver.Error); ok {
-+ assert.Equal(t, int(sqlite3.ErrConstraintUnique), err.Code)
-+ assert.Equal(t, "UNIQUE constraint failed: test.n", err.Message)
-+ } else {
-+ t.Fatalf("expected diver error, got %+v", err)
-+ }
-+}
-+
-+func TestIntegration_ExecBindError(t *testing.T) {
-+ db, _, cleanup := newDB(t, 1)
-+ defer cleanup()
-+ defer db.Close()
-+
-+ ctx, cancel := context.WithTimeout(context.Background(), 9*time.Millisecond)
-+ defer cancel()
-+
-+ _, err := db.ExecContext(ctx, "CREATE TABLE test (n INT)")
-+ require.NoError(t, err)
-+
-+ _, err = db.ExecContext(ctx, "INSERT INTO test(n) VALUES(1)", 1)
-+ assert.EqualError(t, err, "column index out of range")
-+}
-+
-+func TestIntegration_QueryBindError(t *testing.T) {
-+ db, _, cleanup := newDB(t, 1)
-+ defer cleanup()
-+ defer db.Close()
-+
-+ ctx, cancel := context.WithTimeout(context.Background(), 9*time.Millisecond)
-+ defer cancel()
-+
-+ _, err := db.QueryContext(ctx, "SELECT 1", 1)
-+ assert.EqualError(t, err, "column index out of range")
-+}
-+
-+func TestIntegration_ConfigMultiThread(t *testing.T) {
-+ _, _, cleanup := newDB(t, 1)
-+ defer cleanup()
-+
-+ err := dqlite.ConfigMultiThread()
-+ assert.EqualError(t, err, "SQLite is already initialized")
-+}
-+
-+func TestIntegration_LargeQuery(t *testing.T) {
-+ db, _, cleanup := newDB(t, 3)
-+ defer cleanup()
-+
-+ tx, err := db.Begin()
-+ require.NoError(t, err)
-+
-+ _, err = tx.Exec("CREATE TABLE test (n INT)")
-+ require.NoError(t, err)
-+
-+ stmt, err := tx.Prepare("INSERT INTO test(n) VALUES(?)")
-+ require.NoError(t, err)
-+
-+ for i := 0; i < 512; i++ {
-+ _, err = stmt.Exec(int64(i))
-+ require.NoError(t, err)
-+ }
-+
-+ require.NoError(t, stmt.Close())
-+
-+ require.NoError(t, tx.Commit())
-+
-+ tx, err = db.Begin()
-+ require.NoError(t, err)
-+
-+ rows, err := tx.Query("SELECT n FROM test")
-+ require.NoError(t, err)
-+
-+ columns, err := rows.Columns()
-+ require.NoError(t, err)
-+
-+ assert.Equal(t, []string{"n"}, columns)
-+
-+ count := 0
-+ for i := 0; rows.Next(); i++ {
-+ var n int64
-+
-+ require.NoError(t, rows.Scan(&n))
-+
-+ assert.Equal(t, int64(i), n)
-+ count++
-+ }
-+
-+ require.NoError(t, rows.Err())
-+ require.NoError(t, rows.Close())
-+
-+ assert.Equal(t, count, 512)
-+
-+ require.NoError(t, tx.Rollback())
-+}
-+
-+// Build a 2-node cluster, kill one node and recover the other.
-+func TestIntegration_Recover(t *testing.T) {
-+ db, helpers, cleanup := newDB(t, 2)
-+ defer cleanup()
-+
-+ _, err := db.Exec("CREATE TABLE test (n INT)")
-+ require.NoError(t, err)
-+
-+ helpers[0].Close()
-+ helpers[1].Close()
-+
-+ helpers[0].Create()
-+
-+ infos := []client.NodeInfo{{ID: 1, Address: "@1"}}
-+ require.NoError(t, helpers[0].Node.Recover(infos))
-+
-+ helpers[0].Start()
-+
-+ // FIXME: this is necessary otherwise the INSERT below fails with "no
-+ // such table", because the replication hooks are not triggered and the
-+ // barrier is not applied.
-+ _, err = db.Exec("CREATE TABLE test2 (n INT)")
-+ require.NoError(t, err)
-+
-+ _, err = db.Exec("INSERT INTO test(n) VALUES(1)")
-+ require.NoError(t, err)
-+}
-+
-+// The db.Ping() method can be used to wait until there is a stable leader.
-+func TestIntegration_PingOnlyWorksOnceLeaderElected(t *testing.T) {
-+ db, helpers, cleanup := newDB(t, 2)
-+ defer cleanup()
-+
-+ helpers[0].Close()
-+
-+ // Ping returns an error, since the cluster is not available.
-+ ctx, cancel := context.WithTimeout(context.Background(), time.Second)
-+ defer cancel()
-+ assert.Error(t, db.PingContext(ctx))
-+
-+ helpers[0].Create()
-+ helpers[0].Start()
-+
-+ // Ping now returns no error, since the cluster is available.
-+ assert.NoError(t, db.Ping())
-+
-+ // If leadership is lost after the first successful call, Ping() still
-+ // returns no error.
-+ helpers[0].Close()
-+ assert.NoError(t, db.Ping())
-+}
-+
-+func TestIntegration_HighAvailability(t *testing.T) {
-+ db, helpers, cleanup := newDB(t, 3)
-+ defer cleanup()
-+
-+ _, err := db.Exec("CREATE TABLE test (n INT)")
-+ require.NoError(t, err)
-+
-+ // Shutdown all three nodes.
-+ helpers[0].Close()
-+ helpers[1].Close()
-+ helpers[2].Close()
-+
-+ // Restart two of them.
-+ helpers[1].Create()
-+ helpers[2].Create()
-+ helpers[1].Start()
-+ helpers[2].Start()
-+
-+ // Give the cluster a chance to establish a quorom
-+ time.Sleep(2 * time.Second)
-+
-+ _, err = db.Exec("INSERT INTO test(n) VALUES(1)")
-+ require.NoError(t, err)
-+}
-+
-+func TestIntegration_LeadershipTransfer(t *testing.T) {
-+ db, helpers, cleanup := newDB(t, 3)
-+ defer cleanup()
-+
-+ _, err := db.Exec("CREATE TABLE test (n INT)")
-+ require.NoError(t, err)
-+
-+ cli := helpers[0].Client()
-+ require.NoError(t, cli.Transfer(context.Background(), 2))
-+
-+ _, err = db.Exec("INSERT INTO test(n) VALUES(1)")
-+ require.NoError(t, err)
-+}
-+
-+func TestIntegration_LeadershipTransfer_Tx(t *testing.T) {
-+ db, helpers, cleanup := newDB(t, 3)
-+ defer cleanup()
-+
-+ _, err := db.Exec("CREATE TABLE test (n INT)")
-+ require.NoError(t, err)
-+
-+ cli := helpers[0].Client()
-+ require.NoError(t, cli.Transfer(context.Background(), 2))
-+
-+ tx, err := db.Begin()
-+ require.NoError(t, err)
-+
-+ _, err = tx.Query("SELECT * FROM test")
-+ require.NoError(t, err)
-+
-+ require.NoError(t, tx.Commit())
-+}
-+
-+func TestOptions(t *testing.T) {
-+ // make sure applying all options doesn't break anything
-+ store, err := client.DefaultNodeStore(":memory:")
-+ require.NoError(t, err)
-+ log := logging.Test(t)
-+ _, err = driver.New(
-+ store,
-+ driver.WithLogFunc(log),
-+ driver.WithContext(context.Background()),
-+ driver.WithConnectionTimeout(15*time.Second),
-+ driver.WithContextTimeout(2*time.Second),
-+ driver.WithConnectionBackoffFactor(50*time.Millisecond),
-+ driver.WithConnectionBackoffCap(1*time.Second),
-+ driver.WithAttemptTimeout(5*time.Second),
-+ driver.WithRetryLimit(0),
-+ )
-+ require.NoError(t, err)
-+}
-+
-+func newDB(t *testing.T, n int) (*sql.DB, []*nodeHelper, func()) {
-+ infos := make([]client.NodeInfo, n)
-+ for i := range infos {
-+ infos[i].ID = uint64(i + 1)
-+ infos[i].Address = fmt.Sprintf("@%d", infos[i].ID)
-+ infos[i].Role = client.Voter
-+ }
-+ return newDBWithInfos(t, infos)
-+}
-+
-+func newDBWithInfos(t *testing.T, infos []client.NodeInfo) (*sql.DB, []*nodeHelper, func()) {
-+ helpers, helpersCleanup := newNodeHelpers(t, infos)
-+
-+ store, err := client.DefaultNodeStore(":memory:")
-+ require.NoError(t, err)
-+
-+ require.NoError(t, store.Set(context.Background(), infos))
-+
-+ log := logging.Test(t)
-+
-+ driver, err := driver.New(store, driver.WithLogFunc(log))
-+ require.NoError(t, err)
-+
-+ driverName := fmt.Sprintf("dqlite-integration-test-%d", driversCount)
-+ sql.Register(driverName, driver)
-+
-+ driversCount++
-+
-+ db, err := sql.Open(driverName, "test.db")
-+ require.NoError(t, err)
-+
-+ cleanup := func() {
-+ require.NoError(t, db.Close())
-+ helpersCleanup()
-+ }
-+
-+ return db, helpers, cleanup
-+}
-+
-+func registerDriver(driver *driver.Driver) string {
-+ name := fmt.Sprintf("dqlite-integration-test-%d", driversCount)
-+ sql.Register(name, driver)
-+ driversCount++
-+ return name
-+}
-+
-+type nodeHelper struct {
-+ t *testing.T
-+ ID uint64
-+ Address string
-+ Dir string
-+ Node *dqlite.Node
-+}
-+
-+func newNodeHelper(t *testing.T, id uint64, address string) *nodeHelper {
-+ h := &nodeHelper{
-+ t: t,
-+ ID: id,
-+ Address: address,
-+ }
-+
-+ h.Dir, _ = newDir(t)
-+
-+ h.Create()
-+ h.Start()
-+
-+ return h
-+}
-+
-+func (h *nodeHelper) Client() *client.Client {
-+ client, err := client.New(context.Background(), h.Node.BindAddress())
-+ require.NoError(h.t, err)
-+ return client
-+}
-+
-+func (h *nodeHelper) Create() {
-+ var err error
-+ require.Nil(h.t, h.Node)
-+ h.Node, err = dqlite.New(h.ID, h.Address, h.Dir, dqlite.WithBindAddress(h.Address))
-+ require.NoError(h.t, err)
-+}
-+
-+func (h *nodeHelper) Start() {
-+ require.NotNil(h.t, h.Node)
-+ require.NoError(h.t, h.Node.Start())
-+}
-+
-+func (h *nodeHelper) Close() {
-+ require.NotNil(h.t, h.Node)
-+ require.NoError(h.t, h.Node.Close())
-+ h.Node = nil
-+}
-+
-+func (h *nodeHelper) cleanup() {
-+ if h.Node != nil {
-+ h.Close()
-+ }
-+ require.NoError(h.t, os.RemoveAll(h.Dir))
-+}
-+
-+func newNodeHelpers(t *testing.T, infos []client.NodeInfo) ([]*nodeHelper, func()) {
-+ t.Helper()
-+
-+ n := len(infos)
-+ helpers := make([]*nodeHelper, n)
-+
-+ for i, info := range infos {
-+ helpers[i] = newNodeHelper(t, info.ID, info.Address)
-+
-+ if i > 0 {
-+ client := helpers[0].Client()
-+ defer client.Close()
-+
-+ require.NoError(t, client.Add(context.Background(), infos[i]))
-+ }
-+ }
-+
-+ cleanup := func() {
-+ for _, helper := range helpers {
-+ helper.cleanup()
-+ }
-+ }
-+
-+ return helpers, cleanup
-+}
-+
-+var driversCount = 0
-+
-+func TestIntegration_ColumnTypeName(t *testing.T) {
-+ db, _, cleanup := newDB(t, 1)
-+ defer cleanup()
-+
-+ _, err := db.Exec("CREATE TABLE test (n INT, UNIQUE (n))")
-+ require.NoError(t, err)
-+
-+ _, err = db.Exec("INSERT INTO test (n) VALUES (1)")
-+ require.NoError(t, err)
-+
-+ rows, err := db.Query("SELECT n FROM test")
-+ require.NoError(t, err)
-+ defer rows.Close()
-+
-+ types, err := rows.ColumnTypes()
-+ require.NoError(t, err)
-+
-+ assert.Equal(t, "INTEGER", types[0].DatabaseTypeName())
-+
-+ require.True(t, rows.Next())
-+ var n int64
-+ err = rows.Scan(&n)
-+ require.NoError(t, err)
-+
-+ assert.Equal(t, int64(1), n)
-+}
-diff --git a/vendor/github.com/canonical/go-dqlite/go.mod b/vendor/github.com/canonical/go-dqlite/go.mod
-new file mode 100644
-index 00000000000..b6b4ae5400a
---- /dev/null
-+++ b/vendor/github.com/canonical/go-dqlite/go.mod
-@@ -0,0 +1,15 @@
-+module github.com/canonical/go-dqlite
-+
-+go 1.14
-+
-+require (
-+ github.com/Rican7/retry v0.1.0
-+ github.com/ghodss/yaml v1.0.0
-+ github.com/mattn/go-sqlite3 v2.0.3+incompatible
-+ github.com/peterh/liner v1.2.0
-+ github.com/pkg/errors v0.9.1
-+ github.com/spf13/cobra v1.0.0
-+ github.com/stretchr/testify v1.6.0
-+ golang.org/x/sys v0.0.0-20200602225109-6fdc65e7d980
-+ gopkg.in/yaml.v2 v2.3.0 // indirect
-+)
-diff --git a/vendor/github.com/canonical/go-dqlite/go.sum b/vendor/github.com/canonical/go-dqlite/go.sum
-new file mode 100644
-index 00000000000..0639f73b73d
---- /dev/null
-+++ b/vendor/github.com/canonical/go-dqlite/go.sum
-@@ -0,0 +1,151 @@
-+cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
-+github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
-+github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
-+github.com/Rican7/retry v0.1.0 h1:FqK94z34ly8Baa6K+G8Mmza9rYWTKOJk+yckIBB5qVk=
-+github.com/Rican7/retry v0.1.0/go.mod h1:FgOROf8P5bebcC1DS0PdOQiqGUridaZvikzUmkFW6gg=
-+github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
-+github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
-+github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8=
-+github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
-+github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
-+github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
-+github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
-+github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
-+github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
-+github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
-+github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
-+github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
-+github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
-+github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8=
-+github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
-+github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
-+github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
-+github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
-+github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
-+github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
-+github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk=
-+github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
-+github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
-+github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
-+github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
-+github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
-+github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
-+github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4=
-+github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
-+github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
-+github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
-+github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
-+github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
-+github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
-+github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
-+github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
-+github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
-+github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
-+github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
-+github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
-+github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
-+github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo=
-+github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
-+github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
-+github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
-+github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
-+github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
-+github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
-+github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
-+github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
-+github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
-+github.com/mattn/go-runewidth v0.0.3 h1:a+kO+98RDGEfo6asOGMmpodZq4FNtnGP54yps8BzLR4=
-+github.com/mattn/go-runewidth v0.0.3/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
-+github.com/mattn/go-sqlite3 v1.13.0 h1:LnJI81JidiW9r7pS/hXe6cFeO5EXNq7KbfvoJLRI69c=
-+github.com/mattn/go-sqlite3 v2.0.3+incompatible h1:gXHsfypPkaMZrKbD5209QV9jbUTJKjyR5WD3HYQSd+U=
-+github.com/mattn/go-sqlite3 v2.0.3+incompatible/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc=
-+github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
-+github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
-+github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
-+github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
-+github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U=
-+github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
-+github.com/peterh/liner v1.2.0 h1:w/UPXyl5GfahFxcTOz2j9wCIHNI+pUPr2laqpojKNCg=
-+github.com/peterh/liner v1.2.0/go.mod h1:CRroGNssyjTd/qIG2FyxByd2S8JEAZXBl4qUrZf8GS0=
-+github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
-+github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
-+github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
-+github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
-+github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
-+github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
-+github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso=
-+github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
-+github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
-+github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
-+github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
-+github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
-+github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
-+github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
-+github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
-+github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
-+github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
-+github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
-+github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM=
-+github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
-+github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ=
-+github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
-+github.com/spf13/cobra v1.0.0 h1:6m/oheQuQ13N9ks4hubMG6BnvwOeaJrqSPLahSnczz8=
-+github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE=
-+github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
-+github.com/spf13/pflag v1.0.3 h1:zPAT6CGy6wXeQ7NtTnaTerfKOsV6V6F8agHXFiazDkg=
-+github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
-+github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE=
-+github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
-+github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
-+github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
-+github.com/stretchr/testify v1.6.0 h1:jlIyCplCJFULU/01vCkhKuTyc3OorI3bJFuw6obfgho=
-+github.com/stretchr/testify v1.6.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
-+github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
-+github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc=
-+github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
-+github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
-+go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
-+go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
-+go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
-+go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
-+golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
-+golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
-+golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
-+golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
-+golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-+golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-+golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-+golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
-+golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
-+golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
-+golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-+golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-+golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-+golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-+golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-+golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-+golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-+golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-+golang.org/x/sys v0.0.0-20200602225109-6fdc65e7d980 h1:OjiUf46hAmXblsZdnoSXsEUSKU8r1UEzcL5RVZ4gO9Y=
-+golang.org/x/sys v0.0.0-20200602225109-6fdc65e7d980/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-+golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
-+golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
-+golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
-+golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
-+golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
-+google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
-+google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
-+google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
-+google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
-+gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
-+gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
-+gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
-+gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo=
-+gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74=
-+gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
-+gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
-+gopkg.in/yaml.v2 v2.3.0 h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU=
-+gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
-+gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo=
-+gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
-+honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
-diff --git a/vendor/github.com/canonical/go-dqlite/internal/bindings/build.go b/vendor/github.com/canonical/go-dqlite/internal/bindings/build.go
-new file mode 100644
-index 00000000000..430d9489857
---- /dev/null
-+++ b/vendor/github.com/canonical/go-dqlite/internal/bindings/build.go
-@@ -0,0 +1,6 @@
-+package bindings
-+
-+/*
-+#cgo linux LDFLAGS: -lsqlite3 -lraft -ldqlite
-+*/
-+import "C"
-diff --git a/vendor/github.com/canonical/go-dqlite/internal/bindings/server.go b/vendor/github.com/canonical/go-dqlite/internal/bindings/server.go
-new file mode 100644
-index 00000000000..3e668a3e6c9
---- /dev/null
-+++ b/vendor/github.com/canonical/go-dqlite/internal/bindings/server.go
-@@ -0,0 +1,274 @@
-+package bindings
-+
-+/*
-+#include
-+#include
-+#include
-+#include
-+#include
-+#include
-+
-+#include
-+#include
-+#include
-+
-+#define EMIT_BUF_LEN 1024
-+
-+typedef unsigned long long nanoseconds_t;
-+typedef unsigned long long failure_domain_t;
-+
-+// Duplicate a file descriptor and prevent it from being cloned into child processes.
-+static int dupCloexec(int oldfd) {
-+ int newfd = -1;
-+
-+ newfd = dup(oldfd);
-+ if (newfd < 0) {
-+ return -1;
-+ }
-+
-+ if (fcntl(newfd, F_SETFD, FD_CLOEXEC) < 0) {
-+ return -1;
-+ }
-+
-+ return newfd;
-+}
-+
-+// C to Go trampoline for custom connect function.
-+int connectWithDial(uintptr_t handle, char *address, int *fd);
-+
-+// Wrapper to call the Go trampoline.
-+static int connectTrampoline(void *data, const char *address, int *fd) {
-+ uintptr_t handle = (uintptr_t)(data);
-+ return connectWithDial(handle, (char*)address, fd);
-+}
-+
-+// Configure a custom connect function.
-+static int configConnectFunc(dqlite_node *t, uintptr_t handle) {
-+ return dqlite_node_set_connect_func(t, connectTrampoline, (void*)handle);
-+}
-+
-+static dqlite_node_info *makeInfos(int n) {
-+ return calloc(n, sizeof(dqlite_node_info));
-+}
-+
-+static void setInfo(dqlite_node_info *infos, unsigned i, dqlite_node_id id, const char *address) {
-+ dqlite_node_info *info = &infos[i];
-+ info->id = id;
-+ info->address = address;
-+}
-+
-+static int sqlite3ConfigSingleThread()
-+{
-+ return sqlite3_config(SQLITE_CONFIG_SINGLETHREAD);
-+}
-+
-+static int sqlite3ConfigMultiThread()
-+{
-+ return sqlite3_config(SQLITE_CONFIG_MULTITHREAD);
-+}
-+
-+*/
-+import "C"
-+import (
-+ "context"
-+ "fmt"
-+ "net"
-+ "os"
-+ "sync"
-+ "time"
-+ "unsafe"
-+
-+ "github.com/canonical/go-dqlite/internal/protocol"
-+)
-+
-+type Node C.dqlite_node
-+
-+// Initializes state.
-+func init() {
-+ // FIXME: ignore SIGPIPE, see https://github.com/joyent/libuv/issues/1254
-+ C.signal(C.SIGPIPE, C.SIG_IGN)
-+}
-+
-+func ConfigSingleThread() error {
-+ if rc := C.sqlite3ConfigSingleThread(); rc != 0 {
-+ return protocol.Error{Message: C.GoString(C.sqlite3_errstr(rc)), Code: int(rc)}
-+ }
-+ return nil
-+}
-+
-+func ConfigMultiThread() error {
-+ if rc := C.sqlite3ConfigMultiThread(); rc != 0 {
-+ return protocol.Error{Message: C.GoString(C.sqlite3_errstr(rc)), Code: int(rc)}
-+ }
-+ return nil
-+}
-+
-+// NewNode creates a new Node instance.
-+func NewNode(id uint64, address string, dir string) (*Node, error) {
-+ var server *C.dqlite_node
-+ cid := C.dqlite_node_id(id)
-+
-+ caddress := C.CString(address)
-+ defer C.free(unsafe.Pointer(caddress))
-+
-+ cdir := C.CString(dir)
-+ defer C.free(unsafe.Pointer(cdir))
-+
-+ if rc := C.dqlite_node_create(cid, caddress, cdir, &server); rc != 0 {
-+ errmsg := C.GoString(C.dqlite_node_errmsg(server))
-+ return nil, fmt.Errorf("%s", errmsg)
-+ }
-+
-+ return (*Node)(unsafe.Pointer(server)), nil
-+}
-+
-+func (s *Node) SetDialFunc(dial protocol.DialFunc) error {
-+ server := (*C.dqlite_node)(unsafe.Pointer(s))
-+ connectLock.Lock()
-+ defer connectLock.Unlock()
-+ connectIndex++
-+ connectRegistry[connectIndex] = dial
-+ if rc := C.configConnectFunc(server, connectIndex); rc != 0 {
-+ return fmt.Errorf("failed to set connect func")
-+ }
-+ return nil
-+}
-+
-+func (s *Node) SetBindAddress(address string) error {
-+ server := (*C.dqlite_node)(unsafe.Pointer(s))
-+ caddress := C.CString(address)
-+ defer C.free(unsafe.Pointer(caddress))
-+ if rc := C.dqlite_node_set_bind_address(server, caddress); rc != 0 {
-+ return fmt.Errorf("failed to set bind address %q: %d", address, rc)
-+ }
-+ return nil
-+}
-+
-+func (s *Node) SetNetworkLatency(nanoseconds uint64) error {
-+ server := (*C.dqlite_node)(unsafe.Pointer(s))
-+ cnanoseconds := C.nanoseconds_t(nanoseconds)
-+ if rc := C.dqlite_node_set_network_latency(server, cnanoseconds); rc != 0 {
-+ return fmt.Errorf("failed to set network latency")
-+ }
-+ return nil
-+}
-+
-+func (s *Node) SetFailureDomain(code uint64) error {
-+ server := (*C.dqlite_node)(unsafe.Pointer(s))
-+ ccode := C.failure_domain_t(code)
-+ if rc := C.dqlite_node_set_failure_domain(server, ccode); rc != 0 {
-+ return fmt.Errorf("set failure domain: %d", rc)
-+ }
-+ return nil
-+}
-+
-+func (s *Node) GetBindAddress() string {
-+ server := (*C.dqlite_node)(unsafe.Pointer(s))
-+ return C.GoString(C.dqlite_node_get_bind_address(server))
-+}
-+
-+func (s *Node) Start() error {
-+ server := (*C.dqlite_node)(unsafe.Pointer(s))
-+ if rc := C.dqlite_node_start(server); rc != 0 {
-+ errmsg := C.GoString(C.dqlite_node_errmsg(server))
-+ return fmt.Errorf("%s", errmsg)
-+ }
-+ return nil
-+}
-+
-+func (s *Node) Stop() error {
-+ server := (*C.dqlite_node)(unsafe.Pointer(s))
-+ if rc := C.dqlite_node_stop(server); rc != 0 {
-+ return fmt.Errorf("task stopped with error code %d", rc)
-+ }
-+ return nil
-+}
-+
-+// Close the server releasing all used resources.
-+func (s *Node) Close() {
-+ server := (*C.dqlite_node)(unsafe.Pointer(s))
-+ C.dqlite_node_destroy(server)
-+}
-+
-+func (s *Node) Recover(cluster []protocol.NodeInfo) error {
-+ server := (*C.dqlite_node)(unsafe.Pointer(s))
-+ n := C.int(len(cluster))
-+ infos := C.makeInfos(n)
-+ defer C.free(unsafe.Pointer(infos))
-+ for i, info := range cluster {
-+ cid := C.dqlite_node_id(info.ID)
-+ caddress := C.CString(info.Address)
-+ defer C.free(unsafe.Pointer(caddress))
-+ C.setInfo(infos, C.unsigned(i), cid, caddress)
-+ }
-+ if rc := C.dqlite_node_recover(server, infos, n); rc != 0 {
-+ return fmt.Errorf("recover failed with error code %d", rc)
-+ }
-+ return nil
-+}
-+
-+// GenerateID generates a unique ID for a server.
-+func GenerateID(address string) uint64 {
-+ caddress := C.CString(address)
-+ defer C.free(unsafe.Pointer(caddress))
-+ id := C.dqlite_generate_node_id(caddress)
-+ return uint64(id)
-+}
-+
-+// Extract the underlying socket from a connection.
-+func connToSocket(conn net.Conn) (C.int, error) {
-+ file, err := conn.(fileConn).File()
-+ if err != nil {
-+ return C.int(-1), err
-+ }
-+
-+ fd1 := C.int(file.Fd())
-+
-+ // Duplicate the file descriptor, in order to prevent Go's finalizer to
-+ // close it.
-+ fd2 := C.dupCloexec(fd1)
-+ if fd2 < 0 {
-+ return C.int(-1), fmt.Errorf("failed to dup socket fd")
-+ }
-+
-+ conn.Close()
-+
-+ return fd2, nil
-+}
-+
-+// Interface that net.Conn must implement in order to extract the underlying
-+// file descriptor.
-+type fileConn interface {
-+ File() (*os.File, error)
-+}
-+
-+//export connectWithDial
-+func connectWithDial(handle C.uintptr_t, address *C.char, fd *C.int) C.int {
-+ connectLock.Lock()
-+ defer connectLock.Unlock()
-+ dial := connectRegistry[handle]
-+ // TODO: make timeout customizable.
-+ ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
-+ defer cancel()
-+ conn, err := dial(ctx, C.GoString(address))
-+ if err != nil {
-+ return C.RAFT_NOCONNECTION
-+ }
-+ socket, err := connToSocket(conn)
-+ if err != nil {
-+ return C.RAFT_NOCONNECTION
-+ }
-+ *fd = socket
-+ return C.int(0)
-+}
-+
-+// Use handles to avoid passing Go pointers to C.
-+var connectRegistry = make(map[C.uintptr_t]protocol.DialFunc)
-+var connectIndex C.uintptr_t = 100
-+var connectLock = sync.Mutex{}
-+
-+// ErrNodeStopped is returned by Node.Handle() is the server was stopped.
-+var ErrNodeStopped = fmt.Errorf("server was stopped")
-+
-+// To compare bool values.
-+var cfalse C.bool
-diff --git a/vendor/github.com/canonical/go-dqlite/internal/bindings/server_test.go b/vendor/github.com/canonical/go-dqlite/internal/bindings/server_test.go
-new file mode 100644
-index 00000000000..83afdaed5ee
---- /dev/null
-+++ b/vendor/github.com/canonical/go-dqlite/internal/bindings/server_test.go
-@@ -0,0 +1,228 @@
-+package bindings_test
-+
-+import (
-+ "encoding/binary"
-+ "io/ioutil"
-+ "net"
-+ "os"
-+ "strings"
-+ "testing"
-+ "time"
-+
-+ "github.com/canonical/go-dqlite/internal/bindings"
-+ "github.com/canonical/go-dqlite/internal/protocol"
-+ "github.com/stretchr/testify/assert"
-+ "github.com/stretchr/testify/require"
-+)
-+
-+func TestNode_Create(t *testing.T) {
-+ _, cleanup := newNode(t)
-+ defer cleanup()
-+}
-+
-+func TestNode_Start(t *testing.T) {
-+ dir, cleanup := newDir(t)
-+ defer cleanup()
-+
-+ server, err := bindings.NewNode(1, "1", dir)
-+ require.NoError(t, err)
-+ defer server.Close()
-+
-+ err = server.SetBindAddress("@")
-+ require.NoError(t, err)
-+
-+ err = server.Start()
-+ require.NoError(t, err)
-+
-+ conn, err := net.Dial("unix", server.GetBindAddress())
-+ require.NoError(t, err)
-+ conn.Close()
-+
-+ assert.True(t, strings.HasPrefix(server.GetBindAddress(), "@"))
-+
-+ err = server.Stop()
-+ require.NoError(t, err)
-+}
-+
-+func TestNode_Restart(t *testing.T) {
-+ dir, cleanup := newDir(t)
-+ defer cleanup()
-+
-+ server, err := bindings.NewNode(1, "1", dir)
-+ require.NoError(t, err)
-+
-+ require.NoError(t, server.SetBindAddress("@abc"))
-+ require.NoError(t, server.Start())
-+
-+ require.NoError(t, server.Stop())
-+ server.Close()
-+
-+ server, err = bindings.NewNode(1, "1", dir)
-+ require.NoError(t, err)
-+
-+ require.NoError(t, server.SetBindAddress("@abc"))
-+ require.NoError(t, server.Start())
-+
-+ require.NoError(t, server.Stop())
-+ server.Close()
-+}
-+
-+func TestNode_Start_Inet(t *testing.T) {
-+ dir, cleanup := newDir(t)
-+ defer cleanup()
-+
-+ server, err := bindings.NewNode(1, "1", dir)
-+ require.NoError(t, err)
-+ defer server.Close()
-+
-+ err = server.SetBindAddress("127.0.0.1:9000")
-+ require.NoError(t, err)
-+
-+ err = server.Start()
-+ require.NoError(t, err)
-+
-+ conn, err := net.Dial("tcp", server.GetBindAddress())
-+ require.NoError(t, err)
-+ conn.Close()
-+
-+ err = server.Stop()
-+ require.NoError(t, err)
-+}
-+
-+func TestNode_Leader(t *testing.T) {
-+ _, cleanup := newNode(t)
-+ defer cleanup()
-+
-+ conn := newClient(t)
-+
-+ // Make a Leader request
-+ buf := makeClientRequest(t, conn, protocol.RequestLeader)
-+ assert.Equal(t, uint8(1), buf[0])
-+
-+ require.NoError(t, conn.Close())
-+}
-+
-+// func TestNode_Heartbeat(t *testing.T) {
-+// server, cleanup := newNode(t)
-+// defer cleanup()
-+
-+// listener, cleanup := newListener(t)
-+// defer cleanup()
-+
-+// cleanup = runNode(t, server, listener)
-+// defer cleanup()
-+
-+// conn := newClient(t, listener)
-+
-+// // Make a Heartbeat request
-+// makeClientRequest(t, conn, bindings.RequestHeartbeat)
-+
-+// require.NoError(t, conn.Close())
-+// }
-+
-+// func TestNode_ConcurrentHandleAndClose(t *testing.T) {
-+// server, cleanup := newNode(t)
-+// defer cleanup()
-+
-+// listener, cleanup := newListener(t)
-+// defer cleanup()
-+
-+// acceptCh := make(chan error)
-+// go func() {
-+// conn, err := listener.Accept()
-+// if err != nil {
-+// acceptCh <- err
-+// }
-+// server.Handle(conn)
-+// acceptCh <- nil
-+// }()
-+
-+// conn, err := net.Dial("unix", listener.Addr().String())
-+// require.NoError(t, err)
-+
-+// require.NoError(t, conn.Close())
-+
-+// assert.NoError(t, <-acceptCh)
-+// }
-+
-+// Create a new Node object for tests.
-+func newNode(t *testing.T) (*bindings.Node, func()) {
-+ t.Helper()
-+
-+ dir, dirCleanup := newDir(t)
-+
-+ server, err := bindings.NewNode(1, "1", dir)
-+ require.NoError(t, err)
-+
-+ err = server.SetBindAddress("@test")
-+ require.NoError(t, err)
-+
-+ require.NoError(t, server.Start())
-+
-+ cleanup := func() {
-+ require.NoError(t, server.Stop())
-+ server.Close()
-+ dirCleanup()
-+ }
-+
-+ return server, cleanup
-+}
-+
-+// Create a new client network connection, performing the handshake.
-+func newClient(t *testing.T) net.Conn {
-+ t.Helper()
-+
-+ conn, err := net.Dial("unix", "@test")
-+ require.NoError(t, err)
-+
-+ // Handshake
-+ err = binary.Write(conn, binary.LittleEndian, protocol.VersionLegacy)
-+ require.NoError(t, err)
-+
-+ return conn
-+}
-+
-+// Perform a client request.
-+func makeClientRequest(t *testing.T, conn net.Conn, kind byte) []byte {
-+ t.Helper()
-+
-+ // Number of words
-+ err := binary.Write(conn, binary.LittleEndian, uint32(1))
-+ require.NoError(t, err)
-+
-+ // Type, flags, extra.
-+ n, err := conn.Write([]byte{kind, 0, 0, 0})
-+ require.NoError(t, err)
-+ require.Equal(t, 4, n)
-+
-+ n, err = conn.Write([]byte{0, 0, 0, 0, 0, 0, 0, 0}) // Unused single-word request payload
-+ require.NoError(t, err)
-+ require.Equal(t, 8, n)
-+
-+ // Read the response
-+ conn.SetDeadline(time.Now().Add(250 * time.Millisecond))
-+ buf := make([]byte, 64)
-+ _, err = conn.Read(buf)
-+ require.NoError(t, err)
-+
-+ return buf
-+}
-+
-+// Return a new temporary directory.
-+func newDir(t *testing.T) (string, func()) {
-+ t.Helper()
-+
-+ dir, err := ioutil.TempDir("", "dqlite-replication-test-")
-+ assert.NoError(t, err)
-+
-+ cleanup := func() {
-+ _, err := os.Stat(dir)
-+ if err != nil {
-+ assert.True(t, os.IsNotExist(err))
-+ } else {
-+ assert.NoError(t, os.RemoveAll(dir))
-+ }
-+ }
-+
-+ return dir, cleanup
-+}
-diff --git a/vendor/github.com/canonical/go-dqlite/internal/logging/func.go b/vendor/github.com/canonical/go-dqlite/internal/logging/func.go
-new file mode 100644
-index 00000000000..57e0525fa89
---- /dev/null
-+++ b/vendor/github.com/canonical/go-dqlite/internal/logging/func.go
-@@ -0,0 +1,26 @@
-+package logging
-+
-+import (
-+ "fmt"
-+ "testing"
-+)
-+
-+// Func is a function that can be used for logging.
-+type Func func(Level, string, ...interface{})
-+
-+// Test returns a logging function that forwards messages to the test logger.
-+func Test(t *testing.T) Func {
-+ return func(l Level, format string, a ...interface{}) {
-+ format = fmt.Sprintf("%s: %s", l.String(), format)
-+ t.Logf(format, a...)
-+ }
-+}
-+
-+// Stdout returns a logging function that prints log messages on standard
-+// output.
-+func Stdout() Func {
-+ return func(l Level, format string, a ...interface{}) {
-+ format = fmt.Sprintf("%s: %s\n", l.String(), format)
-+ fmt.Printf(format, a...)
-+ }
-+}
-diff --git a/vendor/github.com/canonical/go-dqlite/internal/logging/func_test.go b/vendor/github.com/canonical/go-dqlite/internal/logging/func_test.go
-new file mode 100644
-index 00000000000..a480786d4ee
---- /dev/null
-+++ b/vendor/github.com/canonical/go-dqlite/internal/logging/func_test.go
-@@ -0,0 +1,12 @@
-+package logging_test
-+
-+import (
-+ "testing"
-+
-+ "github.com/canonical/go-dqlite/internal/logging"
-+)
-+
-+func Test_TestFunc(t *testing.T) {
-+ f := logging.Test(t)
-+ f(logging.Info, "hello")
-+}
-diff --git a/vendor/github.com/canonical/go-dqlite/internal/logging/level.go b/vendor/github.com/canonical/go-dqlite/internal/logging/level.go
-new file mode 100644
-index 00000000000..c4b5cdfb47c
---- /dev/null
-+++ b/vendor/github.com/canonical/go-dqlite/internal/logging/level.go
-@@ -0,0 +1,28 @@
-+package logging
-+
-+// Level defines the logging level.
-+type Level int
-+
-+// Available logging levels.
-+const (
-+ None Level = iota
-+ Debug
-+ Info
-+ Warn
-+ Error
-+)
-+
-+func (l Level) String() string {
-+ switch l {
-+ case Debug:
-+ return "DEBUG"
-+ case Info:
-+ return "INFO"
-+ case Warn:
-+ return "WARN"
-+ case Error:
-+ return "ERROR"
-+ default:
-+ return "UNKNOWN"
-+ }
-+}
-diff --git a/vendor/github.com/canonical/go-dqlite/internal/logging/level_test.go b/vendor/github.com/canonical/go-dqlite/internal/logging/level_test.go
-new file mode 100644
-index 00000000000..4da30fa012a
---- /dev/null
-+++ b/vendor/github.com/canonical/go-dqlite/internal/logging/level_test.go
-@@ -0,0 +1,18 @@
-+package logging_test
-+
-+import (
-+ "testing"
-+
-+ "github.com/canonical/go-dqlite/internal/logging"
-+ "github.com/stretchr/testify/assert"
-+)
-+
-+func TestLevel_String(t *testing.T) {
-+ assert.Equal(t, "DEBUG", logging.Debug.String())
-+ assert.Equal(t, "INFO", logging.Info.String())
-+ assert.Equal(t, "WARN", logging.Warn.String())
-+ assert.Equal(t, "ERROR", logging.Error.String())
-+
-+ unknown := logging.Level(666)
-+ assert.Equal(t, "UNKNOWN", unknown.String())
-+}
-diff --git a/vendor/github.com/canonical/go-dqlite/internal/protocol/buffer.go b/vendor/github.com/canonical/go-dqlite/internal/protocol/buffer.go
-new file mode 100644
-index 00000000000..356b96f4d26
---- /dev/null
-+++ b/vendor/github.com/canonical/go-dqlite/internal/protocol/buffer.go
-@@ -0,0 +1,11 @@
-+package protocol
-+
-+// Buffer for reading responses or writing requests.
-+type buffer struct {
-+ Bytes []byte
-+ Offset int
-+}
-+
-+func (b *buffer) Advance(amount int) {
-+ b.Offset += amount
-+}
-diff --git a/vendor/github.com/canonical/go-dqlite/internal/protocol/config.go b/vendor/github.com/canonical/go-dqlite/internal/protocol/config.go
-new file mode 100644
-index 00000000000..de5272df051
---- /dev/null
-+++ b/vendor/github.com/canonical/go-dqlite/internal/protocol/config.go
-@@ -0,0 +1,15 @@
-+package protocol
-+
-+import (
-+ "time"
-+)
-+
-+// Config holds various configuration parameters for a dqlite client.
-+type Config struct {
-+ Dial DialFunc // Network dialer.
-+ DialTimeout time.Duration // Timeout for establishing a network connection .
-+ AttemptTimeout time.Duration // Timeout for each individual attempt to probe a server's leadership.
-+ BackoffFactor time.Duration // Exponential backoff factor for retries.
-+ BackoffCap time.Duration // Maximum connection retry backoff value,
-+ RetryLimit uint // Maximum number of retries, or 0 for unlimited.
-+}
-diff --git a/vendor/github.com/canonical/go-dqlite/internal/protocol/connector.go b/vendor/github.com/canonical/go-dqlite/internal/protocol/connector.go
-new file mode 100644
-index 00000000000..2cc2f0e1e5d
---- /dev/null
-+++ b/vendor/github.com/canonical/go-dqlite/internal/protocol/connector.go
-@@ -0,0 +1,324 @@
-+package protocol
-+
-+import (
-+ "context"
-+ "encoding/binary"
-+ "fmt"
-+ "io"
-+ "net"
-+ "time"
-+
-+ "github.com/Rican7/retry"
-+ "github.com/Rican7/retry/backoff"
-+ "github.com/Rican7/retry/strategy"
-+ "github.com/canonical/go-dqlite/internal/logging"
-+ "github.com/pkg/errors"
-+)
-+
-+// DialFunc is a function that can be used to establish a network connection.
-+type DialFunc func(context.Context, string) (net.Conn, error)
-+
-+// Connector is in charge of creating a dqlite SQL client connected to the
-+// current leader of a cluster.
-+type Connector struct {
-+ id uint64 // Conn ID to use when registering against the server.
-+ store NodeStore // Used to get and update current cluster servers.
-+ config Config // Connection parameters.
-+ log logging.Func // Logging function.
-+}
-+
-+// NewConnector returns a new connector that can be used by a dqlite driver to
-+// create new clients connected to a leader dqlite server.
-+func NewConnector(id uint64, store NodeStore, config Config, log logging.Func) *Connector {
-+ if config.Dial == nil {
-+ config.Dial = Dial
-+ }
-+
-+ if config.DialTimeout == 0 {
-+ config.DialTimeout = 5 * time.Second
-+ }
-+
-+ if config.AttemptTimeout == 0 {
-+ config.AttemptTimeout = 15 * time.Second
-+ }
-+
-+ if config.BackoffFactor == 0 {
-+ config.BackoffFactor = 100 * time.Millisecond
-+ }
-+
-+ if config.BackoffCap == 0 {
-+ config.BackoffCap = time.Second
-+ }
-+
-+ connector := &Connector{
-+ id: id,
-+ store: store,
-+ config: config,
-+ log: log,
-+ }
-+
-+ return connector
-+}
-+
-+// Connect finds the leader server and returns a connection to it.
-+//
-+// If the connector is stopped before a leader is found, nil is returned.
-+func (c *Connector) Connect(ctx context.Context) (*Protocol, error) {
-+ var protocol *Protocol
-+
-+ strategies := makeRetryStrategies(c.config.BackoffFactor, c.config.BackoffCap, c.config.RetryLimit)
-+
-+ // The retry strategy should be configured to retry indefinitely, until
-+ // the given context is done.
-+ err := retry.Retry(func(attempt uint) error {
-+ log := func(l logging.Level, format string, a ...interface{}) {
-+ format = fmt.Sprintf("attempt %d: ", attempt) + format
-+ c.log(l, format, a...)
-+ }
-+
-+ select {
-+ case <-ctx.Done():
-+ // Stop retrying
-+ return nil
-+ default:
-+ }
-+
-+ var err error
-+ protocol, err = c.connectAttemptAll(ctx, log)
-+ if err != nil {
-+ return err
-+ }
-+
-+ return nil
-+ }, strategies...)
-+
-+ if err != nil {
-+ // We exhausted the number of retries allowed by the configured
-+ // strategy.
-+ return nil, ErrNoAvailableLeader
-+ }
-+
-+ if ctx.Err() != nil {
-+ return nil, ErrNoAvailableLeader
-+ }
-+
-+ // At this point we should have a connected protocol object, since the
-+ // retry loop didn't hit any error and the given context hasn't
-+ // expired.
-+ if protocol == nil {
-+ panic("no protocol object")
-+ }
-+
-+ return protocol, nil
-+}
-+
-+// Make a single attempt to establish a connection to the leader server trying
-+// all addresses available in the store.
-+func (c *Connector) connectAttemptAll(ctx context.Context, log logging.Func) (*Protocol, error) {
-+ servers, err := c.store.Get(ctx)
-+ if err != nil {
-+ return nil, errors.Wrap(err, "get servers")
-+ }
-+
-+ // Make an attempt for each address until we find the leader.
-+ for _, server := range servers {
-+ log := func(l logging.Level, format string, a ...interface{}) {
-+ format = fmt.Sprintf("server %s: ", server.Address) + format
-+ log(l, format, a...)
-+ }
-+
-+ ctx, cancel := context.WithTimeout(ctx, c.config.AttemptTimeout)
-+ defer cancel()
-+
-+ version := VersionOne
-+ protocol, leader, err := c.connectAttemptOne(ctx, server.Address, version)
-+ if err == errBadProtocol {
-+ log(logging.Warn, "unsupported protocol %d, attempt with legacy", version)
-+ version = VersionLegacy
-+ protocol, leader, err = c.connectAttemptOne(ctx, server.Address, version)
-+ }
-+ if err != nil {
-+ // This server is unavailable, try with the next target.
-+ log(logging.Warn, err.Error())
-+ continue
-+ }
-+ if protocol != nil {
-+ // We found the leader
-+ log(logging.Debug, "connected")
-+ return protocol, nil
-+ }
-+ if leader == "" {
-+ // This server does not know who the current leader is,
-+ // try with the next target.
-+ log(logging.Warn, "no known leader")
-+ continue
-+ }
-+
-+ // If we get here, it means this server reported that another
-+ // server is the leader, let's close the connection to this
-+ // server and try with the suggested one.
-+ log(logging.Debug, "connect to reported leader %s", leader)
-+
-+ ctx, cancel = context.WithTimeout(ctx, c.config.AttemptTimeout)
-+ defer cancel()
-+
-+ protocol, leader, err = c.connectAttemptOne(ctx, leader, version)
-+ if err != nil {
-+ // The leader reported by the previous server is
-+ // unavailable, try with the next target.
-+ log(logging.Warn, "reported leader unavailable err=%v", err)
-+ continue
-+ }
-+ if protocol == nil {
-+ // The leader reported by the target server does not consider itself
-+ // the leader, try with the next target.
-+ log(logging.Warn, "reported leader server is not the leader")
-+ continue
-+ }
-+ log(logging.Debug, "connected")
-+ return protocol, nil
-+ }
-+
-+ return nil, ErrNoAvailableLeader
-+}
-+
-+// Perform the initial handshake using the given protocol version.
-+func Handshake(ctx context.Context, conn net.Conn, version uint64) (*Protocol, error) {
-+ // Latest protocol version.
-+ protocol := make([]byte, 8)
-+ binary.LittleEndian.PutUint64(protocol, version)
-+
-+ // Honor the ctx deadline, if present.
-+ if deadline, ok := ctx.Deadline(); ok {
-+ conn.SetDeadline(deadline)
-+ defer conn.SetDeadline(time.Time{})
-+ }
-+
-+ // Perform the protocol handshake.
-+ n, err := conn.Write(protocol)
-+ if err != nil {
-+ return nil, errors.Wrap(err, "write handshake")
-+ }
-+ if n != 8 {
-+ return nil, errors.Wrap(io.ErrShortWrite, "short handshake write")
-+ }
-+
-+ return newProtocol(version, conn), nil
-+}
-+
-+// Connect to the given dqlite server and check if it's the leader.
-+//
-+// Return values:
-+//
-+// - Any failure is hit: -> nil, "", err
-+// - Target not leader and no leader known: -> nil, "", nil
-+// - Target not leader and leader known: -> nil, leader, nil
-+// - Target is the leader: -> server, "", nil
-+//
-+func (c *Connector) connectAttemptOne(ctx context.Context, address string, version uint64) (*Protocol, string, error) {
-+ dialCtx, cancel := context.WithTimeout(ctx, c.config.DialTimeout)
-+ defer cancel()
-+
-+ // Establish the connection.
-+ conn, err := c.config.Dial(dialCtx, address)
-+ if err != nil {
-+ return nil, "", errors.Wrap(err, "dial")
-+ }
-+
-+ protocol, err := Handshake(ctx, conn, version)
-+ if err != nil {
-+ conn.Close()
-+ return nil, "", err
-+ }
-+
-+ // Send the initial Leader request.
-+ request := Message{}
-+ request.Init(16)
-+ response := Message{}
-+ response.Init(512)
-+
-+ EncodeLeader(&request)
-+
-+ if err := protocol.Call(ctx, &request, &response); err != nil {
-+ protocol.Close()
-+ cause := errors.Cause(err)
-+ // Best-effort detection of a pre-1.0 dqlite node: when sent
-+ // version 1 it should close the connection immediately.
-+ if err, ok := cause.(*net.OpError); ok && !err.Timeout() || cause == io.EOF {
-+ return nil, "", errBadProtocol
-+ }
-+
-+ return nil, "", err
-+ }
-+
-+ _, leader, err := DecodeNodeCompat(protocol, &response)
-+ if err != nil {
-+ protocol.Close()
-+ return nil, "", err
-+ }
-+
-+ switch leader {
-+ case "":
-+ // Currently this server does not know about any leader.
-+ protocol.Close()
-+ return nil, "", nil
-+ case address:
-+ // This server is the leader, register ourselves and return.
-+ request.reset()
-+ response.reset()
-+
-+ EncodeClient(&request, c.id)
-+
-+ if err := protocol.Call(ctx, &request, &response); err != nil {
-+ protocol.Close()
-+ return nil, "", err
-+ }
-+
-+ _, err := DecodeWelcome(&response)
-+ if err != nil {
-+ protocol.Close()
-+ return nil, "", err
-+ }
-+
-+ // TODO: enable heartbeat
-+ // protocol.heartbeatTimeout = time.Duration(heartbeatTimeout) * time.Millisecond
-+ //go protocol.heartbeat()
-+
-+ return protocol, "", nil
-+ default:
-+ // This server claims to know who the current leader is.
-+ protocol.Close()
-+ return nil, leader, nil
-+ }
-+}
-+
-+// Return a retry strategy with exponential backoff, capped at the given amount
-+// of time and possibly with a maximum number of retries.
-+func makeRetryStrategies(factor, cap time.Duration, limit uint) []strategy.Strategy {
-+ backoff := backoff.BinaryExponential(factor)
-+
-+ strategies := []strategy.Strategy{}
-+
-+ if limit > 0 {
-+ strategies = append(strategies, strategy.Limit(limit))
-+ }
-+
-+ strategies = append(strategies,
-+ func(attempt uint) bool {
-+ if attempt > 0 {
-+ duration := backoff(attempt)
-+ // Duration might be negative in case of integer overflow.
-+ if duration > cap || duration <= 0 {
-+ duration = cap
-+ }
-+ time.Sleep(duration)
-+ }
-+
-+ return true
-+ },
-+ )
-+
-+ return strategies
-+}
-+
-+var errBadProtocol = fmt.Errorf("bad protocol")
-diff --git a/vendor/github.com/canonical/go-dqlite/internal/protocol/connector_test.go b/vendor/github.com/canonical/go-dqlite/internal/protocol/connector_test.go
-new file mode 100644
-index 00000000000..790f33a6ac0
---- /dev/null
-+++ b/vendor/github.com/canonical/go-dqlite/internal/protocol/connector_test.go
-@@ -0,0 +1,368 @@
-+package protocol_test
-+
-+import (
-+ "context"
-+ "fmt"
-+ "io/ioutil"
-+ "net"
-+ "os"
-+ "testing"
-+ "time"
-+
-+ "github.com/canonical/go-dqlite/internal/bindings"
-+ "github.com/canonical/go-dqlite/internal/logging"
-+ "github.com/canonical/go-dqlite/internal/protocol"
-+ "github.com/stretchr/testify/assert"
-+ "github.com/stretchr/testify/require"
-+)
-+
-+// Successful connection.
-+func TestConnector_Success(t *testing.T) {
-+ address, cleanup := newNode(t, 0)
-+ defer cleanup()
-+
-+ store := newStore(t, []string{address})
-+
-+ log, check := newLogFunc(t)
-+ connector := protocol.NewConnector(0, store, protocol.Config{}, log)
-+
-+ ctx, cancel := context.WithTimeout(context.Background(), 100*time.Millisecond)
-+ defer cancel()
-+
-+ client, err := connector.Connect(ctx)
-+ require.NoError(t, err)
-+
-+ assert.NoError(t, client.Close())
-+
-+ check([]string{
-+ "DEBUG: attempt 0: server @test-0: connected",
-+ })
-+}
-+
-+// The network connection can't be established within the specified number of
-+// attempts.
-+func TestConnector_LimitRetries(t *testing.T) {
-+ store := newStore(t, []string{"@test-123"})
-+ config := protocol.Config{
-+ RetryLimit: 2,
-+ }
-+ log, check := newLogFunc(t)
-+ connector := protocol.NewConnector(0, store, config, log)
-+
-+ _, err := connector.Connect(context.Background())
-+ assert.Equal(t, protocol.ErrNoAvailableLeader, err)
-+
-+ check([]string{
-+ "WARN: attempt 0: server @test-123: dial: dial unix @test-123: connect: connection refused",
-+ "WARN: attempt 1: server @test-123: dial: dial unix @test-123: connect: connection refused",
-+ "WARN: attempt 2: server @test-123: dial: dial unix @test-123: connect: connection refused",
-+ })
-+}
-+
-+// The network connection can't be established because of a connection timeout.
-+func TestConnector_DialTimeout(t *testing.T) {
-+ store := newStore(t, []string{"8.8.8.8:9000"})
-+ log, check := newLogFunc(t)
-+ config := protocol.Config{
-+ DialTimeout: 50 * time.Millisecond,
-+ RetryLimit: 1,
-+ }
-+ connector := protocol.NewConnector(0, store, config, log)
-+
-+ _, err := connector.Connect(context.Background())
-+ assert.Equal(t, protocol.ErrNoAvailableLeader, err)
-+
-+ check([]string{
-+ "WARN: attempt 0: server 8.8.8.8:9000: dial: dial tcp 8.8.8.8:9000: i/o timeout",
-+ "WARN: attempt 1: server 8.8.8.8:9000: dial: dial tcp 8.8.8.8:9000: i/o timeout",
-+ })
-+}
-+
-+// Connection failed because the server store is empty.
-+func TestConnector_EmptyNodeStore(t *testing.T) {
-+ store := newStore(t, []string{})
-+ log, check := newLogFunc(t)
-+ connector := protocol.NewConnector(0, store, protocol.Config{}, log)
-+
-+ ctx, cancel := context.WithTimeout(context.Background(), 5*time.Millisecond)
-+ defer cancel()
-+
-+ _, err := connector.Connect(ctx)
-+ assert.Equal(t, protocol.ErrNoAvailableLeader, err)
-+
-+ check([]string{})
-+}
-+
-+// Connection failed because the context was canceled.
-+func TestConnector_ContextCanceled(t *testing.T) {
-+ store := newStore(t, []string{"1.2.3.4:666"})
-+
-+ log, check := newLogFunc(t)
-+ connector := protocol.NewConnector(0, store, protocol.Config{}, log)
-+
-+ ctx, cancel := context.WithTimeout(context.Background(), 25*time.Millisecond)
-+ defer cancel()
-+
-+ _, err := connector.Connect(ctx)
-+ assert.Equal(t, protocol.ErrNoAvailableLeader, err)
-+
-+ check([]string{
-+ "WARN: attempt 0: server 1.2.3.4:666: dial: dial tcp 1.2.3.4:666: i/o timeout",
-+ })
-+}
-+
-+// Simulate a server which accepts the connection but doesn't reply within the
-+// attempt timeout.
-+func TestConnector_AttemptTimeout(t *testing.T) {
-+ listener, err := net.Listen("unix", "@1234")
-+ require.NoError(t, err)
-+
-+ store := newStore(t, []string{listener.Addr().String()})
-+ config := protocol.Config{
-+ AttemptTimeout: 100 * time.Millisecond,
-+ RetryLimit: 1,
-+ }
-+ connector := protocol.NewConnector(0, store, config, logging.Test(t))
-+
-+ conns := []net.Conn{}
-+ go func() {
-+ conn, err := listener.Accept()
-+ require.NoError(t, err)
-+ conns = append(conns, conn)
-+ }()
-+
-+ _, err = connector.Connect(context.Background())
-+ assert.Equal(t, protocol.ErrNoAvailableLeader, err)
-+
-+ for _, conn := range conns {
-+ conn.Close()
-+ }
-+}
-+
-+// If an election is in progress, the connector will retry until a leader gets
-+// elected.
-+// func TestConnector_Connect_ElectionInProgress(t *testing.T) {
-+// address1, cleanup := newNode(t, 1)
-+// defer cleanup()
-+
-+// address2, cleanup := newNode(t, 2)
-+// defer cleanup()
-+
-+// address3, cleanup := newNode(t, 3)
-+// defer cleanup()
-+
-+// store := newStore(t, []string{address1, address2, address3})
-+
-+// connector := newConnector(t, store)
-+
-+// go func() {
-+// // Simulate server 1 winning the election after 10ms
-+// time.Sleep(10 * time.Millisecond)
-+// }()
-+
-+// ctx, cancel := context.WithTimeout(context.Background(), 100*time.Millisecond)
-+// defer cancel()
-+
-+// client, err := connector.Connect(ctx)
-+// require.NoError(t, err)
-+
-+// assert.NoError(t, client.Close())
-+// }
-+
-+// If a server reports that it knows about the leader, the hint will be taken
-+// and an attempt will be made to connect to it.
-+// func TestConnector_Connect_NodeKnowsAboutLeader(t *testing.T) {
-+// defer bindings.AssertNoMemoryLeaks(t)
-+
-+// methods1 := &testClusterMethods{}
-+// methods2 := &testClusterMethods{}
-+// methods3 := &testClusterMethods{}
-+
-+// address1, cleanup := newNode(t, 1, methods1)
-+// defer cleanup()
-+
-+// address2, cleanup := newNode(t, 2, methods2)
-+// defer cleanup()
-+
-+// address3, cleanup := newNode(t, 3, methods3)
-+// defer cleanup()
-+
-+// // Node 1 will be contacted first, which will report that server 2 is
-+// // the leader.
-+// store := newStore(t, []string{address1, address2, address3})
-+
-+// methods1.leader = address2
-+// methods2.leader = address2
-+// methods3.leader = address2
-+
-+// connector := newConnector(t, store)
-+
-+// ctx, cancel := context.WithTimeout(context.Background(), 100*time.Millisecond)
-+// defer cancel()
-+
-+// client, err := connector.Connect(ctx)
-+// require.NoError(t, err)
-+
-+// assert.NoError(t, client.Close())
-+// }
-+
-+// If a server reports that it knows about the leader, the hint will be taken
-+// and an attempt will be made to connect to it. If that leader has died, the
-+// next target will be tried.
-+// func TestConnector_Connect_NodeKnowsAboutDeadLeader(t *testing.T) {
-+// defer bindings.AssertNoMemoryLeaks(t)
-+
-+// methods1 := &testClusterMethods{}
-+// methods2 := &testClusterMethods{}
-+// methods3 := &testClusterMethods{}
-+
-+// address1, cleanup := newNode(t, 1, methods1)
-+// defer cleanup()
-+
-+// address2, cleanup := newNode(t, 2, methods2)
-+
-+// // Simulate server 2 crashing.
-+// cleanup()
-+
-+// address3, cleanup := newNode(t, 3, methods3)
-+// defer cleanup()
-+
-+// // Node 1 will be contacted first, which will report that server 2 is
-+// // the leader. However server 2 has crashed, and after a bit server 1
-+// // gets elected.
-+// store := newStore(t, []string{address1, address2, address3})
-+// methods1.leader = address2
-+// methods3.leader = address2
-+
-+// go func() {
-+// // Simulate server 1 becoming the new leader after server 2
-+// // crashed.
-+// time.Sleep(10 * time.Millisecond)
-+// methods1.leader = address1
-+// methods3.leader = address1
-+// }()
-+
-+// connector := newConnector(t, store)
-+
-+// ctx, cancel := context.WithTimeout(context.Background(), 100*time.Millisecond)
-+// defer cancel()
-+
-+// client, err := connector.Connect(ctx)
-+// require.NoError(t, err)
-+
-+// assert.NoError(t, client.Close())
-+// }
-+
-+// If a server reports that it knows about the leader, the hint will be taken
-+// and an attempt will be made to connect to it. If that leader is not actually
-+// the leader the next target will be tried.
-+// func TestConnector_Connect_NodeKnowsAboutStaleLeader(t *testing.T) {
-+// defer bindings.AssertNoMemoryLeaks(t)
-+
-+// methods1 := &testClusterMethods{}
-+// methods2 := &testClusterMethods{}
-+// methods3 := &testClusterMethods{}
-+
-+// address1, cleanup := newNode(t, 1, methods1)
-+// defer cleanup()
-+
-+// address2, cleanup := newNode(t, 2, methods2)
-+// defer cleanup()
-+
-+// address3, cleanup := newNode(t, 3, methods3)
-+// defer cleanup()
-+
-+// // Node 1 will be contacted first, which will report that server 2 is
-+// // the leader. However server 2 thinks that 3 is the leader, and server
-+// // 3 is actually the leader.
-+// store := newStore(t, []string{address1, address2, address3})
-+// methods1.leader = address2
-+// methods2.leader = address3
-+// methods3.leader = address3
-+
-+// connector := newConnector(t, store)
-+
-+// ctx, cancel := context.WithTimeout(context.Background(), 100*time.Millisecond)
-+// defer cancel()
-+
-+// client, err := connector.Connect(ctx)
-+// require.NoError(t, err)
-+
-+// assert.NoError(t, client.Close())
-+// }
-+
-+// Return a log function that emits messages using the test logger as well as
-+// collecting them into a slice. The second function returned can be used to
-+// assert that the collected messages match the given ones.
-+func newLogFunc(t *testing.T) (logging.Func, func([]string)) {
-+ messages := []string{}
-+ log := func(l logging.Level, format string, a ...interface{}) {
-+ message := l.String() + ": " + fmt.Sprintf(format, a...)
-+ messages = append(messages, message)
-+ t.Log(message)
-+
-+ }
-+ check := func(expected []string) {
-+ assert.Equal(t, expected, messages)
-+ }
-+ return log, check
-+}
-+
-+// Create a new in-memory server store populated with the given addresses.
-+func newStore(t *testing.T, addresses []string) protocol.NodeStore {
-+ t.Helper()
-+
-+ servers := make([]protocol.NodeInfo, len(addresses))
-+ for i, address := range addresses {
-+ servers[i].ID = uint64(i)
-+ servers[i].Address = address
-+ }
-+
-+ store := protocol.NewInmemNodeStore()
-+ require.NoError(t, store.Set(context.Background(), servers))
-+
-+ return store
-+}
-+
-+func newNode(t *testing.T, index int) (string, func()) {
-+ t.Helper()
-+
-+ id := uint64(index + 1)
-+ dir, dirCleanup := newDir(t)
-+
-+ address := fmt.Sprintf("@test-%d", index)
-+
-+ server, err := bindings.NewNode(id, address, dir)
-+ require.NoError(t, err)
-+
-+ err = server.SetBindAddress(address)
-+ require.NoError(t, err)
-+
-+ require.NoError(t, server.Start())
-+ cleanup := func() {
-+ require.NoError(t, server.Stop())
-+ server.Close()
-+ dirCleanup()
-+ }
-+
-+ return address, cleanup
-+}
-+
-+// Return a new temporary directory.
-+func newDir(t *testing.T) (string, func()) {
-+ t.Helper()
-+
-+ dir, err := ioutil.TempDir("", "dqlite-connector-test-")
-+ assert.NoError(t, err)
-+
-+ cleanup := func() {
-+ _, err := os.Stat(dir)
-+ if err != nil {
-+ assert.True(t, os.IsNotExist(err))
-+ } else {
-+ assert.NoError(t, os.RemoveAll(dir))
-+ }
-+ }
-+
-+ return dir, cleanup
-+}
-diff --git a/vendor/github.com/canonical/go-dqlite/internal/protocol/constants.go b/vendor/github.com/canonical/go-dqlite/internal/protocol/constants.go
-new file mode 100644
-index 00000000000..12393ceb4e0
---- /dev/null
-+++ b/vendor/github.com/canonical/go-dqlite/internal/protocol/constants.go
-@@ -0,0 +1,153 @@
-+package protocol
-+
-+// VersionOne is version 1 of the server protocol.
-+const VersionOne = uint64(1)
-+
-+// VersionLegacy is the pre 1.0 dqlite server protocol version.
-+const VersionLegacy = uint64(0x86104dd760433fe5)
-+
-+// Cluster response formats
-+const (
-+ ClusterFormatV0 = 0
-+ ClusterFormatV1 = 1
-+)
-+
-+// Node roles
-+const (
-+ Voter = NodeRole(0)
-+ StandBy = NodeRole(1)
-+ Spare = NodeRole(2)
-+)
-+
-+// SQLite datatype codes
-+const (
-+ Integer = 1
-+ Float = 2
-+ Text = 3
-+ Blob = 4
-+ Null = 5
-+)
-+
-+// Special data types for time values.
-+const (
-+ UnixTime = 9
-+ ISO8601 = 10
-+ Boolean = 11
-+)
-+
-+// Request types.
-+const (
-+ RequestLeader = 0
-+ RequestClient = 1
-+ RequestHeartbeat = 2
-+ RequestOpen = 3
-+ RequestPrepare = 4
-+ RequestExec = 5
-+ RequestQuery = 6
-+ RequestFinalize = 7
-+ RequestExecSQL = 8
-+ RequestQuerySQL = 9
-+ RequestInterrupt = 10
-+ RequestAdd = 12
-+ RequestAssign = 13
-+ RequestRemove = 14
-+ RequestDump = 15
-+ RequestCluster = 16
-+ RequestTransfer = 17
-+ RequestDescribe = 18
-+ RequestWeight = 19
-+)
-+
-+// Formats
-+const (
-+ RequestDescribeFormatV0 = 0
-+)
-+
-+// Response types.
-+const (
-+ ResponseFailure = 0
-+ ResponseNode = 1
-+ ResponseNodeLegacy = 1
-+ ResponseWelcome = 2
-+ ResponseNodes = 3
-+ ResponseDb = 4
-+ ResponseStmt = 5
-+ ResponseResult = 6
-+ ResponseRows = 7
-+ ResponseEmpty = 8
-+ ResponseFiles = 9
-+ ResponseMetadata = 10
-+)
-+
-+// Human-readable description of a request type.
-+func requestDesc(code uint8) string {
-+ switch code {
-+ // Requests
-+ case RequestLeader:
-+ return "leader"
-+ case RequestClient:
-+ return "client"
-+ case RequestHeartbeat:
-+ return "heartbeat"
-+ case RequestOpen:
-+ return "open"
-+ case RequestPrepare:
-+ return "prepare"
-+ case RequestExec:
-+ return "exec"
-+ case RequestQuery:
-+ return "query"
-+ case RequestFinalize:
-+ return "finalize"
-+ case RequestExecSQL:
-+ return "exec-sql"
-+ case RequestQuerySQL:
-+ return "query-sql"
-+ case RequestInterrupt:
-+ return "interrupt"
-+ case RequestAdd:
-+ return "add"
-+ case RequestAssign:
-+ return "assign"
-+ case RequestRemove:
-+ return "remove"
-+ case RequestDump:
-+ return "dump"
-+ case RequestCluster:
-+ return "cluster"
-+ case RequestTransfer:
-+ return "transfer"
-+ case RequestDescribe:
-+ return "describe"
-+ }
-+ return "unknown"
-+}
-+
-+// Human-readable description of a response type.
-+func responseDesc(code uint8) string {
-+ switch code {
-+ case ResponseFailure:
-+ return "failure"
-+ case ResponseNode:
-+ return "node"
-+ case ResponseWelcome:
-+ return "welcome"
-+ case ResponseNodes:
-+ return "nodes"
-+ case ResponseDb:
-+ return "db"
-+ case ResponseStmt:
-+ return "stmt"
-+ case ResponseResult:
-+ return "result"
-+ case ResponseRows:
-+ return "rows"
-+ case ResponseEmpty:
-+ return "empty"
-+ case ResponseFiles:
-+ return "files"
-+ case ResponseMetadata:
-+ return "metadata"
-+ }
-+ return "unknown"
-+}
-diff --git a/vendor/github.com/canonical/go-dqlite/internal/protocol/dial.go b/vendor/github.com/canonical/go-dqlite/internal/protocol/dial.go
-new file mode 100644
-index 00000000000..fee4b2f7f80
---- /dev/null
-+++ b/vendor/github.com/canonical/go-dqlite/internal/protocol/dial.go
-@@ -0,0 +1,30 @@
-+package protocol
-+
-+import (
-+ "context"
-+ "crypto/tls"
-+ "net"
-+ "strings"
-+)
-+
-+// Dial function handling plain TCP and Unix socket endpoints.
-+func Dial(ctx context.Context, address string) (net.Conn, error) {
-+ family := "tcp"
-+ if strings.HasPrefix(address, "@") {
-+ family = "unix"
-+ }
-+ dialer := net.Dialer{}
-+ return dialer.DialContext(ctx, family, address)
-+}
-+
-+// TLSCipherSuites are the cipher suites by the go-dqlite TLS helpers.
-+var TLSCipherSuites = []uint16{
-+ tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,
-+ tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,
-+ tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
-+ tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,
-+ tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,
-+ tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,
-+ tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,
-+ tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,
-+}
-diff --git a/vendor/github.com/canonical/go-dqlite/internal/protocol/errors.go b/vendor/github.com/canonical/go-dqlite/internal/protocol/errors.go
-new file mode 100644
-index 00000000000..79d1fedc60f
---- /dev/null
-+++ b/vendor/github.com/canonical/go-dqlite/internal/protocol/errors.go
-@@ -0,0 +1,39 @@
-+package protocol
-+
-+import (
-+ "fmt"
-+)
-+
-+// Client errors.
-+var (
-+ ErrNoAvailableLeader = fmt.Errorf("no available dqlite leader server found")
-+ errStop = fmt.Errorf("connector was stopped")
-+ errStaleLeader = fmt.Errorf("server has lost leadership")
-+ errNotClustered = fmt.Errorf("server is not clustered")
-+ errNegativeRead = fmt.Errorf("reader returned negative count from Read")
-+ errMessageEOF = fmt.Errorf("message eof")
-+)
-+
-+// ErrRequest is returned in case of request failure.
-+type ErrRequest struct {
-+ Code uint64
-+ Description string
-+}
-+
-+func (e ErrRequest) Error() string {
-+ return fmt.Sprintf("%s (%d)", e.Description, e.Code)
-+}
-+
-+// ErrRowsPart is returned when the first batch of a multi-response result
-+// batch is done.
-+var ErrRowsPart = fmt.Errorf("not all rows were returned in this response")
-+
-+// Error holds information about a SQLite error.
-+type Error struct {
-+ Code int
-+ Message string
-+}
-+
-+func (e Error) Error() string {
-+ return e.Message
-+}
-diff --git a/vendor/github.com/canonical/go-dqlite/internal/protocol/message.go b/vendor/github.com/canonical/go-dqlite/internal/protocol/message.go
-new file mode 100644
-index 00000000000..c726d871e8d
---- /dev/null
-+++ b/vendor/github.com/canonical/go-dqlite/internal/protocol/message.go
-@@ -0,0 +1,656 @@
-+package protocol
-+
-+import (
-+ "bytes"
-+ "database/sql/driver"
-+ "encoding/binary"
-+ "fmt"
-+ "io"
-+ "math"
-+ "strings"
-+ "time"
-+)
-+
-+// NamedValues is a type alias of a slice of driver.NamedValue. It's used by
-+// schema.sh to generate encoding logic for statement parameters.
-+type NamedValues = []driver.NamedValue
-+
-+// Nodes is a type alias of a slice of NodeInfo. It's used by schema.sh to
-+// generate decoding logic for the heartbeat response.
-+type Nodes []NodeInfo
-+
-+// Message holds data about a single request or response.
-+type Message struct {
-+ words uint32
-+ mtype uint8
-+ flags uint8
-+ extra uint16
-+ header []byte // Statically allocated header buffer
-+ body buffer // Message body data.
-+}
-+
-+// Init initializes the message using the given initial size for the data
-+// buffer, which is re-used across requests or responses encoded or decoded
-+// using this message object.
-+func (m *Message) Init(initialBufferSize int) {
-+ if (initialBufferSize % messageWordSize) != 0 {
-+ panic("initial buffer size is not aligned to word boundary")
-+ }
-+ m.header = make([]byte, messageHeaderSize)
-+ m.body.Bytes = make([]byte, initialBufferSize)
-+ m.reset()
-+}
-+
-+// Reset the state of the message so it can be used to encode or decode again.
-+func (m *Message) reset() {
-+ m.words = 0
-+ m.mtype = 0
-+ m.flags = 0
-+ m.extra = 0
-+ for i := 0; i < messageHeaderSize; i++ {
-+ m.header[i] = 0
-+ }
-+ m.body.Offset = 0
-+}
-+
-+// Append a byte slice to the message.
-+func (m *Message) putBlob(v []byte) {
-+ size := len(v)
-+ m.putUint64(uint64(size))
-+
-+ pad := 0
-+ if (size % messageWordSize) != 0 {
-+ // Account for padding
-+ pad = messageWordSize - (size % messageWordSize)
-+ size += pad
-+ }
-+
-+ b := m.bufferForPut(size)
-+ defer b.Advance(size)
-+
-+ // Copy the bytes into the buffer.
-+ offset := b.Offset
-+ copy(b.Bytes[offset:], v)
-+ offset += len(v)
-+
-+ // Add padding
-+ for i := 0; i < pad; i++ {
-+ b.Bytes[offset] = 0
-+ offset++
-+ }
-+}
-+
-+// Append a string to the message.
-+func (m *Message) putString(v string) {
-+ size := len(v) + 1
-+ pad := 0
-+ if (size % messageWordSize) != 0 {
-+ // Account for padding
-+ pad = messageWordSize - (size % messageWordSize)
-+ size += pad
-+ }
-+
-+ b := m.bufferForPut(size)
-+ defer b.Advance(size)
-+
-+ // Copy the string bytes into the buffer.
-+ offset := b.Offset
-+ copy(b.Bytes[offset:], v)
-+ offset += len(v)
-+
-+ // Add a nul byte
-+ b.Bytes[offset] = 0
-+ offset++
-+
-+ // Add padding
-+ for i := 0; i < pad; i++ {
-+ b.Bytes[offset] = 0
-+ offset++
-+ }
-+}
-+
-+// Append a byte to the message.
-+func (m *Message) putUint8(v uint8) {
-+ b := m.bufferForPut(1)
-+ defer b.Advance(1)
-+
-+ b.Bytes[b.Offset] = v
-+}
-+
-+// Append a 2-byte word to the message.
-+func (m *Message) putUint16(v uint16) {
-+ b := m.bufferForPut(2)
-+ defer b.Advance(2)
-+
-+ binary.LittleEndian.PutUint16(b.Bytes[b.Offset:], v)
-+}
-+
-+// Append a 4-byte word to the message.
-+func (m *Message) putUint32(v uint32) {
-+ b := m.bufferForPut(4)
-+ defer b.Advance(4)
-+
-+ binary.LittleEndian.PutUint32(b.Bytes[b.Offset:], v)
-+}
-+
-+// Append an 8-byte word to the message.
-+func (m *Message) putUint64(v uint64) {
-+ b := m.bufferForPut(8)
-+ defer b.Advance(8)
-+
-+ binary.LittleEndian.PutUint64(b.Bytes[b.Offset:], v)
-+}
-+
-+// Append a signed 8-byte word to the message.
-+func (m *Message) putInt64(v int64) {
-+ b := m.bufferForPut(8)
-+ defer b.Advance(8)
-+
-+ binary.LittleEndian.PutUint64(b.Bytes[b.Offset:], uint64(v))
-+}
-+
-+// Append a floating point number to the message.
-+func (m *Message) putFloat64(v float64) {
-+ b := m.bufferForPut(8)
-+ defer b.Advance(8)
-+
-+ binary.LittleEndian.PutUint64(b.Bytes[b.Offset:], math.Float64bits(v))
-+}
-+
-+// Encode the given driver values as binding parameters.
-+func (m *Message) putNamedValues(values NamedValues) {
-+ n := uint8(len(values)) // N of params
-+ if n == 0 {
-+ return
-+ }
-+
-+ m.putUint8(n)
-+
-+ for i := range values {
-+ if values[i].Ordinal != i+1 {
-+ panic("unexpected ordinal")
-+ }
-+
-+ switch values[i].Value.(type) {
-+ case int64:
-+ m.putUint8(Integer)
-+ case float64:
-+ m.putUint8(Float)
-+ case bool:
-+ m.putUint8(Boolean)
-+ case []byte:
-+ m.putUint8(Blob)
-+ case string:
-+ m.putUint8(Text)
-+ case nil:
-+ m.putUint8(Null)
-+ case time.Time:
-+ m.putUint8(ISO8601)
-+ default:
-+ panic("unsupported value type")
-+ }
-+ }
-+
-+ b := m.bufferForPut(1)
-+
-+ if trailing := b.Offset % messageWordSize; trailing != 0 {
-+ // Skip padding bytes
-+ b.Advance(messageWordSize - trailing)
-+ }
-+
-+ for i := range values {
-+ switch v := values[i].Value.(type) {
-+ case int64:
-+ m.putInt64(v)
-+ case float64:
-+ m.putFloat64(v)
-+ case bool:
-+ if v {
-+ m.putUint64(1)
-+ } else {
-+ m.putUint64(0)
-+ }
-+ case []byte:
-+ m.putBlob(v)
-+ case string:
-+ m.putString(v)
-+ case nil:
-+ m.putInt64(0)
-+ case time.Time:
-+ timestamp := v.Format(iso8601Formats[0])
-+ m.putString(timestamp)
-+ default:
-+ panic("unsupported value type")
-+ }
-+ }
-+
-+}
-+
-+// Finalize the message by setting the message type and the number
-+// of words in the body (calculated from the body size).
-+func (m *Message) putHeader(mtype uint8) {
-+ if m.body.Offset <= 0 {
-+ panic("static offset is not positive")
-+ }
-+
-+ if (m.body.Offset % messageWordSize) != 0 {
-+ panic("static body is not aligned")
-+ }
-+
-+ m.mtype = mtype
-+ m.flags = 0
-+ m.extra = 0
-+
-+ m.words = uint32(m.body.Offset) / messageWordSize
-+
-+ m.finalize()
-+}
-+
-+func (m *Message) finalize() {
-+ if m.words == 0 {
-+ panic("empty message body")
-+ }
-+
-+ binary.LittleEndian.PutUint32(m.header[0:], m.words)
-+ m.header[4] = m.mtype
-+ m.header[5] = m.flags
-+ binary.LittleEndian.PutUint16(m.header[6:], m.extra)
-+}
-+
-+func (m *Message) bufferForPut(size int) *buffer {
-+ for (m.body.Offset + size) > len(m.body.Bytes) {
-+ // Grow message buffer.
-+ bytes := make([]byte, len(m.body.Bytes)*2)
-+ copy(bytes, m.body.Bytes)
-+ m.body.Bytes = bytes
-+ }
-+
-+ return &m.body
-+}
-+
-+// Return the message type and its flags.
-+func (m *Message) getHeader() (uint8, uint8) {
-+ return m.mtype, m.flags
-+}
-+
-+// Read a string from the message body.
-+func (m *Message) getString() string {
-+ b := m.bufferForGet()
-+
-+ index := bytes.IndexByte(b.Bytes[b.Offset:], 0)
-+ if index == -1 {
-+ panic("no string found")
-+ }
-+ s := string(b.Bytes[b.Offset : b.Offset+index])
-+
-+ index++
-+
-+ if trailing := index % messageWordSize; trailing != 0 {
-+ // Account for padding, moving index to the next word boundary.
-+ index += messageWordSize - trailing
-+ }
-+
-+ b.Advance(index)
-+
-+ return s
-+}
-+
-+func (m *Message) getBlob() []byte {
-+ size := m.getUint64()
-+ data := make([]byte, size)
-+ for i := range data {
-+ data[i] = m.getUint8()
-+ }
-+ pad := 0
-+ if (size % messageWordSize) != 0 {
-+ // Account for padding
-+ pad = int(messageWordSize - (size % messageWordSize))
-+ }
-+ // Consume padding
-+ for i := 0; i < pad; i++ {
-+ m.getUint8()
-+ }
-+ return data
-+}
-+
-+// Read a byte from the message body.
-+func (m *Message) getUint8() uint8 {
-+ b := m.bufferForGet()
-+ defer b.Advance(1)
-+
-+ return b.Bytes[b.Offset]
-+}
-+
-+// Read a 2-byte word from the message body.
-+func (m *Message) getUint16() uint16 {
-+ b := m.bufferForGet()
-+ defer b.Advance(2)
-+
-+ return binary.LittleEndian.Uint16(b.Bytes[b.Offset:])
-+}
-+
-+// Read a 4-byte word from the message body.
-+func (m *Message) getUint32() uint32 {
-+ b := m.bufferForGet()
-+ defer b.Advance(4)
-+
-+ return binary.LittleEndian.Uint32(b.Bytes[b.Offset:])
-+}
-+
-+// Read reads an 8-byte word from the message body.
-+func (m *Message) getUint64() uint64 {
-+ b := m.bufferForGet()
-+ defer b.Advance(8)
-+
-+ return binary.LittleEndian.Uint64(b.Bytes[b.Offset:])
-+}
-+
-+// Read a signed 8-byte word from the message body.
-+func (m *Message) getInt64() int64 {
-+ b := m.bufferForGet()
-+ defer b.Advance(8)
-+
-+ return int64(binary.LittleEndian.Uint64(b.Bytes[b.Offset:]))
-+}
-+
-+// Read a floating point number from the message body.
-+func (m *Message) getFloat64() float64 {
-+ b := m.bufferForGet()
-+ defer b.Advance(8)
-+
-+ return math.Float64frombits(binary.LittleEndian.Uint64(b.Bytes[b.Offset:]))
-+}
-+
-+// Decode a list of server objects from the message body.
-+func (m *Message) getNodes() Nodes {
-+ n := m.getUint64()
-+ servers := make(Nodes, n)
-+
-+ for i := 0; i < int(n); i++ {
-+ servers[i].ID = m.getUint64()
-+ servers[i].Address = m.getString()
-+ servers[i].Role = NodeRole(m.getUint64())
-+ }
-+
-+ return servers
-+}
-+
-+// Decode a statement result object from the message body.
-+func (m *Message) getResult() Result {
-+ return Result{
-+ LastInsertID: m.getUint64(),
-+ RowsAffected: m.getUint64(),
-+ }
-+}
-+
-+// Decode a query result set object from the message body.
-+func (m *Message) getRows() Rows {
-+ // Read the column count and column names.
-+ columns := make([]string, m.getUint64())
-+
-+ for i := range columns {
-+ columns[i] = m.getString()
-+ }
-+
-+ rows := Rows{
-+ Columns: columns,
-+ message: m,
-+ }
-+ return rows
-+}
-+
-+func (m *Message) getFiles() Files {
-+ files := Files{
-+ n: m.getUint64(),
-+ message: m,
-+ }
-+ return files
-+}
-+
-+func (m *Message) hasBeenConsumed() bool {
-+ size := int(m.words * messageWordSize)
-+ return m.body.Offset == size
-+}
-+
-+func (m *Message) lastByte() byte {
-+ size := int(m.words * messageWordSize)
-+ return m.body.Bytes[size-1]
-+}
-+
-+func (m *Message) bufferForGet() *buffer {
-+ size := int(m.words * messageWordSize)
-+ // The static body has been exahusted, use the dynamic one.
-+ if m.body.Offset == size {
-+ err := fmt.Errorf("short message: type=%d words=%d off=%d", m.mtype, m.words, m.body.Offset)
-+ panic(err)
-+ }
-+
-+ return &m.body
-+}
-+
-+// Result holds the result of a statement.
-+type Result struct {
-+ LastInsertID uint64
-+ RowsAffected uint64
-+}
-+
-+// Rows holds a result set encoded in a message body.
-+type Rows struct {
-+ Columns []string
-+ message *Message
-+ types []uint8
-+}
-+
-+// columnTypes returns the row's column types
-+// if save is true, it will restore the buffer offset
-+func (r *Rows) columnTypes(save bool) ([]uint8, error) {
-+ // use cached values if possible if not advancing the buffer offset
-+ if save && r.types != nil {
-+ return r.types, nil
-+ }
-+ // column types should never change between rows
-+ // use cached copy to allow getting types when no more rows
-+ if r.types == nil {
-+ r.types = make([]uint8, len(r.Columns))
-+ }
-+
-+ // Each column needs a 4 byte slot to store the column type. The row
-+ // header must be padded to reach word boundary.
-+ headerBits := len(r.types) * 4
-+ padBits := 0
-+ if trailingBits := (headerBits % messageWordBits); trailingBits != 0 {
-+ padBits = (messageWordBits - trailingBits)
-+ }
-+
-+ headerSize := (headerBits + padBits) / messageWordBits * messageWordSize
-+
-+ for i := 0; i < headerSize; i++ {
-+ slot := r.message.getUint8()
-+
-+ if slot == 0xee {
-+ // More rows are available.
-+ if save {
-+ r.message.bufferForGet().Advance(-(i + 1))
-+ }
-+ return r.types, ErrRowsPart
-+ }
-+
-+ if slot == 0xff {
-+ // Rows EOF marker
-+ if save {
-+ r.message.bufferForGet().Advance(-(i + 1))
-+ }
-+ return r.types, io.EOF
-+ }
-+
-+ index := i * 2
-+
-+ if index >= len(r.types) {
-+ continue // This is padding.
-+ }
-+
-+ r.types[index] = slot & 0x0f
-+
-+ index++
-+
-+ if index >= len(r.types) {
-+ continue // This is padding byte.
-+ }
-+
-+ r.types[index] = slot >> 4
-+ }
-+ if save {
-+ r.message.bufferForGet().Advance(-headerSize)
-+ }
-+ return r.types, nil
-+}
-+
-+// Next returns the next row in the result set.
-+func (r *Rows) Next(dest []driver.Value) error {
-+ types, err := r.columnTypes(false)
-+ if err != nil {
-+ return err
-+ }
-+
-+ for i := range types {
-+ switch types[i] {
-+ case Integer:
-+ dest[i] = r.message.getInt64()
-+ case Float:
-+ dest[i] = r.message.getFloat64()
-+ case Blob:
-+ dest[i] = r.message.getBlob()
-+ case Text:
-+ dest[i] = r.message.getString()
-+ case Null:
-+ r.message.getUint64()
-+ dest[i] = nil
-+ case UnixTime:
-+ timestamp := time.Unix(r.message.getInt64(), 0)
-+ dest[i] = timestamp
-+ case ISO8601:
-+ value := r.message.getString()
-+ if value == "" {
-+ dest[i] = time.Time{}
-+ break
-+ }
-+ var t time.Time
-+ var timeVal time.Time
-+ var err error
-+ value = strings.TrimSuffix(value, "Z")
-+ for _, format := range iso8601Formats {
-+ if timeVal, err = time.ParseInLocation(format, value, time.UTC); err == nil {
-+ t = timeVal
-+ break
-+ }
-+ }
-+ if err != nil {
-+ return err
-+ }
-+ t = t.In(time.Local)
-+ dest[i] = t
-+ case Boolean:
-+ dest[i] = r.message.getInt64() != 0
-+ default:
-+ panic("unknown data type")
-+ }
-+ }
-+
-+ return nil
-+}
-+
-+// Close the result set and reset the underlying message.
-+func (r *Rows) Close() error {
-+ // If we didn't go through all rows, let's look at the last byte.
-+ var err error
-+ if !r.message.hasBeenConsumed() {
-+ slot := r.message.lastByte()
-+ if slot == 0xee {
-+ // More rows are available.
-+ err = ErrRowsPart
-+ } else if slot == 0xff {
-+ // Rows EOF marker
-+ err = io.EOF
-+ } else {
-+ err = fmt.Errorf("unexpected end of message")
-+ }
-+ }
-+ r.message.reset()
-+ return err
-+}
-+
-+// Files holds a set of files encoded in a message body.
-+type Files struct {
-+ n uint64
-+ message *Message
-+}
-+
-+func (f *Files) Next() (string, []byte) {
-+ if f.n == 0 {
-+ return "", nil
-+ }
-+ f.n--
-+ name := f.message.getString()
-+ length := f.message.getUint64()
-+ data := make([]byte, length)
-+ for i := 0; i < int(length); i++ {
-+ data[i] = f.message.getUint8()
-+ }
-+ return name, data
-+}
-+
-+func (f *Files) Close() {
-+ f.message.reset()
-+}
-+
-+const (
-+ messageWordSize = 8
-+ messageWordBits = messageWordSize * 8
-+ messageHeaderSize = messageWordSize
-+ messageMaxConsecutiveEmptyReads = 100
-+)
-+
-+var iso8601Formats = []string{
-+ // By default, store timestamps with whatever timezone they come with.
-+ // When parsed, they will be returned with the same timezone.
-+ "2006-01-02 15:04:05.999999999-07:00",
-+ "2006-01-02T15:04:05.999999999-07:00",
-+ "2006-01-02 15:04:05.999999999",
-+ "2006-01-02T15:04:05.999999999",
-+ "2006-01-02 15:04:05",
-+ "2006-01-02T15:04:05",
-+ "2006-01-02 15:04",
-+ "2006-01-02T15:04",
-+ "2006-01-02",
-+}
-+
-+// ColumnTypes returns the column types for the the result set.
-+func (r *Rows) ColumnTypes() ([]string, error) {
-+ types, err := r.columnTypes(true)
-+ kinds := make([]string, len(types))
-+
-+ for i, t := range types {
-+ switch t {
-+ case Integer:
-+ kinds[i] = "INTEGER"
-+ case Float:
-+ kinds[i] = "FLOAT"
-+ case Blob:
-+ kinds[i] = "BLOB"
-+ case Text:
-+ kinds[i] = "TEXT"
-+ case Null:
-+ kinds[i] = "NULL"
-+ case UnixTime:
-+ kinds[i] = "TIME"
-+ case ISO8601:
-+ kinds[i] = "TIME"
-+ case Boolean:
-+ kinds[i] = "BOOL"
-+ default:
-+ return nil, fmt.Errorf("unknown data type: %d", t)
-+ }
-+ }
-+
-+ return kinds, err
-+}
-diff --git a/vendor/github.com/canonical/go-dqlite/internal/protocol/message_export_test.go b/vendor/github.com/canonical/go-dqlite/internal/protocol/message_export_test.go
-new file mode 100644
-index 00000000000..a3e4286db14
---- /dev/null
-+++ b/vendor/github.com/canonical/go-dqlite/internal/protocol/message_export_test.go
-@@ -0,0 +1,9 @@
-+package protocol
-+
-+func (m *Message) Body() ([]byte, int) {
-+ return m.body.Bytes, m.body.Offset
-+}
-+
-+func (m *Message) Rewind() {
-+ m.body.Offset = 0
-+}
-diff --git a/vendor/github.com/canonical/go-dqlite/internal/protocol/message_internal_test.go b/vendor/github.com/canonical/go-dqlite/internal/protocol/message_internal_test.go
-new file mode 100644
-index 00000000000..462573f69d1
---- /dev/null
-+++ b/vendor/github.com/canonical/go-dqlite/internal/protocol/message_internal_test.go
-@@ -0,0 +1,289 @@
-+package protocol
-+
-+import (
-+ "fmt"
-+ "testing"
-+ "time"
-+ "unsafe"
-+
-+ "github.com/stretchr/testify/assert"
-+ "github.com/stretchr/testify/require"
-+)
-+
-+func TestMessage_StaticBytesAlignment(t *testing.T) {
-+ message := Message{}
-+ message.Init(4096)
-+ pointer := uintptr(unsafe.Pointer(&message.body.Bytes))
-+ assert.Equal(t, pointer%messageWordSize, uintptr(0))
-+}
-+
-+func TestMessage_putBlob(t *testing.T) {
-+ cases := []struct {
-+ Blob []byte
-+ Offset int
-+ }{
-+ {[]byte{1, 2, 3, 4, 5}, 16},
-+ {[]byte{1, 2, 3, 4, 5, 6, 7, 8}, 16},
-+ {[]byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}, 24},
-+ }
-+
-+ message := Message{}
-+ message.Init(64)
-+
-+ for _, c := range cases {
-+ t.Run(fmt.Sprintf("%d", c.Offset), func(t *testing.T) {
-+ message.putBlob(c.Blob)
-+
-+ bytes, offset := message.Body()
-+
-+ assert.Equal(t, bytes[8:len(c.Blob)+8], c.Blob)
-+ assert.Equal(t, offset, c.Offset)
-+
-+ message.reset()
-+ })
-+ }
-+}
-+
-+func TestMessage_putString(t *testing.T) {
-+ cases := []struct {
-+ String string
-+ Offset int
-+ }{
-+ {"hello", 8},
-+ {"hello!!", 8},
-+ {"hello world", 16},
-+ }
-+
-+ message := Message{}
-+ message.Init(16)
-+
-+ for _, c := range cases {
-+ t.Run(c.String, func(t *testing.T) {
-+ message.putString(c.String)
-+
-+ bytes, offset := message.Body()
-+
-+ assert.Equal(t, string(bytes[:len(c.String)]), c.String)
-+ assert.Equal(t, offset, c.Offset)
-+
-+ message.reset()
-+ })
-+ }
-+}
-+
-+func TestMessage_putUint8(t *testing.T) {
-+ message := Message{}
-+ message.Init(8)
-+
-+ v := uint8(12)
-+
-+ message.putUint8(v)
-+
-+ bytes, offset := message.Body()
-+
-+ assert.Equal(t, bytes[0], byte(v))
-+
-+ assert.Equal(t, offset, 1)
-+}
-+
-+func TestMessage_putUint16(t *testing.T) {
-+ message := Message{}
-+ message.Init(8)
-+
-+ v := uint16(666)
-+
-+ message.putUint16(v)
-+
-+ bytes, offset := message.Body()
-+
-+ assert.Equal(t, bytes[0], byte((v & 0x00ff)))
-+ assert.Equal(t, bytes[1], byte((v&0xff00)>>8))
-+
-+ assert.Equal(t, offset, 2)
-+}
-+
-+func TestMessage_putUint32(t *testing.T) {
-+ message := Message{}
-+ message.Init(8)
-+
-+ v := uint32(130000)
-+
-+ message.putUint32(v)
-+
-+ bytes, offset := message.Body()
-+
-+ assert.Equal(t, bytes[0], byte((v & 0x000000ff)))
-+ assert.Equal(t, bytes[1], byte((v&0x0000ff00)>>8))
-+ assert.Equal(t, bytes[2], byte((v&0x00ff0000)>>16))
-+ assert.Equal(t, bytes[3], byte((v&0xff000000)>>24))
-+
-+ assert.Equal(t, offset, 4)
-+}
-+
-+func TestMessage_putUint64(t *testing.T) {
-+ message := Message{}
-+ message.Init(8)
-+
-+ v := uint64(5000000000)
-+
-+ message.putUint64(v)
-+
-+ bytes, offset := message.Body()
-+
-+ assert.Equal(t, bytes[0], byte((v & 0x00000000000000ff)))
-+ assert.Equal(t, bytes[1], byte((v&0x000000000000ff00)>>8))
-+ assert.Equal(t, bytes[2], byte((v&0x0000000000ff0000)>>16))
-+ assert.Equal(t, bytes[3], byte((v&0x00000000ff000000)>>24))
-+ assert.Equal(t, bytes[4], byte((v&0x000000ff00000000)>>32))
-+ assert.Equal(t, bytes[5], byte((v&0x0000ff0000000000)>>40))
-+ assert.Equal(t, bytes[6], byte((v&0x00ff000000000000)>>48))
-+ assert.Equal(t, bytes[7], byte((v&0xff00000000000000)>>56))
-+
-+ assert.Equal(t, offset, 8)
-+}
-+
-+func TestMessage_putNamedValues(t *testing.T) {
-+ message := Message{}
-+ message.Init(256)
-+
-+ timestamp, err := time.ParseInLocation("2006-01-02", "2018-08-01", time.UTC)
-+ require.NoError(t, err)
-+
-+ values := NamedValues{
-+ {Ordinal: 1, Value: int64(123)},
-+ {Ordinal: 2, Value: float64(3.1415)},
-+ {Ordinal: 3, Value: true},
-+ {Ordinal: 4, Value: []byte{1, 2, 3, 4, 5, 6}},
-+ {Ordinal: 5, Value: "hello"},
-+ {Ordinal: 6, Value: nil},
-+ {Ordinal: 7, Value: timestamp},
-+ }
-+
-+ message.putNamedValues(values)
-+
-+ bytes, offset := message.Body()
-+
-+ assert.Equal(t, 96, offset)
-+ assert.Equal(t, bytes[0], byte(7))
-+ assert.Equal(t, bytes[1], byte(Integer))
-+ assert.Equal(t, bytes[2], byte(Float))
-+ assert.Equal(t, bytes[3], byte(Boolean))
-+ assert.Equal(t, bytes[4], byte(Blob))
-+ assert.Equal(t, bytes[5], byte(Text))
-+ assert.Equal(t, bytes[6], byte(Null))
-+ assert.Equal(t, bytes[7], byte(ISO8601))
-+}
-+
-+func TestMessage_putHeader(t *testing.T) {
-+ message := Message{}
-+ message.Init(64)
-+
-+ message.putString("hello")
-+ message.putHeader(RequestExec)
-+}
-+
-+func BenchmarkMessage_putString(b *testing.B) {
-+ message := Message{}
-+ message.Init(4096)
-+
-+ b.ReportAllocs()
-+ b.ResetTimer()
-+ for i := 0; i < b.N; i++ {
-+ message.reset()
-+ message.putString("hello")
-+ }
-+}
-+
-+func BenchmarkMessage_putUint64(b *testing.B) {
-+ message := Message{}
-+ message.Init(4096)
-+
-+ b.ReportAllocs()
-+ b.ResetTimer()
-+ for i := 0; i < b.N; i++ {
-+ message.reset()
-+ message.putUint64(270)
-+ }
-+}
-+
-+func TestMessage_getString(t *testing.T) {
-+ cases := []struct {
-+ String string
-+ Offset int
-+ }{
-+ {"hello", 8},
-+ {"hello!!", 8},
-+ {"hello!!!", 16},
-+ {"hello world", 16},
-+ }
-+
-+ for _, c := range cases {
-+ t.Run(c.String, func(t *testing.T) {
-+ message := Message{}
-+ message.Init(16)
-+
-+ message.putString(c.String)
-+ message.putHeader(0)
-+
-+ message.Rewind()
-+
-+ s := message.getString()
-+
-+ _, offset := message.Body()
-+
-+ assert.Equal(t, s, c.String)
-+ assert.Equal(t, offset, c.Offset)
-+ })
-+ }
-+}
-+
-+func TestMessage_getBlob(t *testing.T) {
-+ cases := []struct {
-+ Blob []byte
-+ Offset int
-+ }{
-+ {[]byte{1, 2, 3, 4, 5}, 16},
-+ {[]byte{1, 2, 3, 4, 5, 6, 7, 8}, 16},
-+ {[]byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}, 24},
-+ }
-+
-+ for _, c := range cases {
-+ t.Run(fmt.Sprintf("%d", c.Offset), func(t *testing.T) {
-+ message := Message{}
-+ message.Init(64)
-+
-+ message.putBlob(c.Blob)
-+ message.putHeader(0)
-+
-+ message.Rewind()
-+
-+ bytes := message.getBlob()
-+
-+ _, offset := message.Body()
-+
-+ assert.Equal(t, bytes, c.Blob)
-+ assert.Equal(t, offset, c.Offset)
-+ })
-+ }
-+}
-+
-+// The overflowing string ends exactly at word boundary.
-+func TestMessage_getString_Overflow_WordBoundary(t *testing.T) {
-+ message := Message{}
-+ message.Init(8)
-+
-+ message.putBlob([]byte{
-+ 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h',
-+ 'i', 'l', 'm', 'n', 'o', 'p', 'q', 'r',
-+ 0, 0, 0, 0, 0, 0, 0,
-+ })
-+ message.putHeader(0)
-+
-+ message.Rewind()
-+ message.getUint64()
-+
-+ s := message.getString()
-+ assert.Equal(t, "abcdefghilmnopqr", s)
-+
-+ assert.Equal(t, 32, message.body.Offset)
-+}
-diff --git a/vendor/github.com/canonical/go-dqlite/internal/protocol/protocol.go b/vendor/github.com/canonical/go-dqlite/internal/protocol/protocol.go
-new file mode 100644
-index 00000000000..2094db7de29
---- /dev/null
-+++ b/vendor/github.com/canonical/go-dqlite/internal/protocol/protocol.go
-@@ -0,0 +1,308 @@
-+package protocol
-+
-+import (
-+ "context"
-+ "encoding/binary"
-+ "io"
-+ "net"
-+ "sync"
-+ "time"
-+
-+ "github.com/pkg/errors"
-+)
-+
-+// Protocol sends and receive the dqlite message on the wire.
-+type Protocol struct {
-+ version uint64 // Protocol version
-+ conn net.Conn // Underlying network connection.
-+ closeCh chan struct{} // Stops the heartbeat when the connection gets closed
-+ mu sync.Mutex // Serialize requests
-+ netErr error // A network error occurred
-+}
-+
-+func newProtocol(version uint64, conn net.Conn) *Protocol {
-+ protocol := &Protocol{
-+ version: version,
-+ conn: conn,
-+ closeCh: make(chan struct{}),
-+ }
-+
-+ return protocol
-+}
-+
-+// Call invokes a dqlite RPC, sending a request message and receiving a
-+// response message.
-+func (p *Protocol) Call(ctx context.Context, request, response *Message) (err error) {
-+ // We need to take a lock since the dqlite server currently does not
-+ // support concurrent requests.
-+ p.mu.Lock()
-+ defer p.mu.Unlock()
-+
-+ if p.netErr != nil {
-+ return p.netErr
-+ }
-+
-+ defer func() {
-+ if err == nil {
-+ return
-+ }
-+ switch errors.Cause(err).(type) {
-+ case *net.OpError:
-+ p.netErr = err
-+ }
-+ }()
-+
-+ var budget time.Duration
-+
-+ // Honor the ctx deadline, if present.
-+ if deadline, ok := ctx.Deadline(); ok {
-+ p.conn.SetDeadline(deadline)
-+ budget = time.Until(deadline)
-+ defer p.conn.SetDeadline(time.Time{})
-+ }
-+
-+ desc := requestDesc(request.mtype)
-+
-+ if err = p.send(request); err != nil {
-+ return errors.Wrapf(err, "call %s (budget %s): send", desc, budget)
-+ }
-+
-+ if err = p.recv(response); err != nil {
-+ return errors.Wrapf(err, "call %s (budget %s): receive", desc, budget)
-+ }
-+
-+ return
-+}
-+
-+// More is used when a request maps to multiple responses.
-+func (p *Protocol) More(ctx context.Context, response *Message) error {
-+ return p.recv(response)
-+}
-+
-+// Interrupt sends an interrupt request and awaits for the server's empty
-+// response.
-+func (p *Protocol) Interrupt(ctx context.Context, request *Message, response *Message) error {
-+ // We need to take a lock since the dqlite server currently does not
-+ // support concurrent requests.
-+ p.mu.Lock()
-+ defer p.mu.Unlock()
-+
-+ // Honor the ctx deadline, if present.
-+ if deadline, ok := ctx.Deadline(); ok {
-+ p.conn.SetDeadline(deadline)
-+ defer p.conn.SetDeadline(time.Time{})
-+ }
-+
-+ EncodeInterrupt(request, 0)
-+
-+ if err := p.send(request); err != nil {
-+ return errors.Wrap(err, "failed to send interrupt request")
-+ }
-+
-+ for {
-+ if err := p.recv(response); err != nil {
-+ return errors.Wrap(err, "failed to receive response")
-+ }
-+
-+ mtype, _ := response.getHeader()
-+
-+ if mtype == ResponseEmpty {
-+ break
-+ }
-+ }
-+
-+ return nil
-+}
-+
-+// Close the client connection.
-+func (p *Protocol) Close() error {
-+ close(p.closeCh)
-+ return p.conn.Close()
-+}
-+
-+func (p *Protocol) send(req *Message) error {
-+ if err := p.sendHeader(req); err != nil {
-+ return errors.Wrap(err, "header")
-+ }
-+
-+ if err := p.sendBody(req); err != nil {
-+ return errors.Wrap(err, "body")
-+ }
-+
-+ return nil
-+}
-+
-+func (p *Protocol) sendHeader(req *Message) error {
-+ n, err := p.conn.Write(req.header[:])
-+ if err != nil {
-+ return err
-+ }
-+
-+ if n != messageHeaderSize {
-+ return io.ErrShortWrite
-+ }
-+
-+ return nil
-+}
-+
-+func (p *Protocol) sendBody(req *Message) error {
-+ buf := req.body.Bytes[:req.body.Offset]
-+ n, err := p.conn.Write(buf)
-+ if err != nil {
-+ return err
-+ }
-+
-+ if n != len(buf) {
-+ return io.ErrShortWrite
-+ }
-+
-+ return nil
-+}
-+
-+func (p *Protocol) recv(res *Message) error {
-+ res.reset()
-+
-+ if err := p.recvHeader(res); err != nil {
-+ return errors.Wrap(err, "header")
-+ }
-+
-+ if err := p.recvBody(res); err != nil {
-+ return errors.Wrap(err, "body")
-+ }
-+
-+ return nil
-+}
-+
-+func (p *Protocol) recvHeader(res *Message) error {
-+ if err := p.recvPeek(res.header); err != nil {
-+ return err
-+ }
-+
-+ res.words = binary.LittleEndian.Uint32(res.header[0:])
-+ res.mtype = res.header[4]
-+ res.flags = res.header[5]
-+ res.extra = binary.LittleEndian.Uint16(res.header[6:])
-+
-+ return nil
-+}
-+
-+func (p *Protocol) recvBody(res *Message) error {
-+ n := int(res.words) * messageWordSize
-+
-+ for n > len(res.body.Bytes) {
-+ // Grow message buffer.
-+ bytes := make([]byte, len(res.body.Bytes)*2)
-+ res.body.Bytes = bytes
-+ }
-+
-+ buf := res.body.Bytes[:n]
-+
-+ if err := p.recvPeek(buf); err != nil {
-+ return err
-+ }
-+
-+ return nil
-+}
-+
-+// Read until buf is full.
-+func (p *Protocol) recvPeek(buf []byte) error {
-+ for offset := 0; offset < len(buf); {
-+ n, err := p.recvFill(buf[offset:])
-+ if err != nil {
-+ return err
-+ }
-+ offset += n
-+ }
-+
-+ return nil
-+}
-+
-+// Try to fill buf, but perform at most one read.
-+func (p *Protocol) recvFill(buf []byte) (int, error) {
-+ // Read new data: try a limited number of times.
-+ //
-+ // This technique is copied from bufio.Reader.
-+ for i := messageMaxConsecutiveEmptyReads; i > 0; i-- {
-+ n, err := p.conn.Read(buf)
-+ if n < 0 {
-+ panic(errNegativeRead)
-+ }
-+ if err != nil {
-+ return -1, err
-+ }
-+ if n > 0 {
-+ return n, nil
-+ }
-+ }
-+ return -1, io.ErrNoProgress
-+}
-+
-+/*
-+func (p *Protocol) heartbeat() {
-+ request := Message{}
-+ request.Init(16)
-+ response := Message{}
-+ response.Init(512)
-+
-+ for {
-+ delay := c.heartbeatTimeout / 3
-+
-+ //c.logger.Debug("sending heartbeat", zap.Duration("delay", delay))
-+ time.Sleep(delay)
-+
-+ // Check if we've been closed.
-+ select {
-+ case <-c.closeCh:
-+ return
-+ default:
-+ }
-+
-+ ctx, cancel := context.WithTimeout(context.Background(), time.Second)
-+
-+ EncodeHeartbeat(&request, uint64(time.Now().Unix()))
-+
-+ err := c.Call(ctx, &request, &response)
-+
-+ // We bail out upon failures.
-+ //
-+ // TODO: make the client survive temporary disconnections.
-+ if err != nil {
-+ cancel()
-+ //c.logger.Error("heartbeat failed", zap.Error(err))
-+ return
-+ }
-+
-+ //addresses, err := DecodeNodes(&response)
-+ _, err = DecodeNodes(&response)
-+ if err != nil {
-+ cancel()
-+ //c.logger.Error("invalid heartbeat response", zap.Error(err))
-+ return
-+ }
-+
-+ // if err := c.store.Set(ctx, addresses); err != nil {
-+ // cancel()
-+ // c.logger.Error("failed to update servers", zap.Error(err))
-+ // return
-+ // }
-+
-+ cancel()
-+
-+ request.Reset()
-+ response.Reset()
-+ }
-+}
-+*/
-+
-+// DecodeNodeCompat handles also pre-1.0 legacy server messages.
-+func DecodeNodeCompat(protocol *Protocol, response *Message) (uint64, string, error) {
-+ if protocol.version == VersionLegacy {
-+ address, err := DecodeNodeLegacy(response)
-+ if err != nil {
-+ return 0, "", err
-+ }
-+ return 0, address, nil
-+
-+ }
-+ return DecodeNode(response)
-+}
-diff --git a/vendor/github.com/canonical/go-dqlite/internal/protocol/protocol_test.go b/vendor/github.com/canonical/go-dqlite/internal/protocol/protocol_test.go
-new file mode 100644
-index 00000000000..0d7726fbc82
---- /dev/null
-+++ b/vendor/github.com/canonical/go-dqlite/internal/protocol/protocol_test.go
-@@ -0,0 +1,189 @@
-+package protocol_test
-+
-+import (
-+ "context"
-+ "testing"
-+ "time"
-+
-+ "github.com/canonical/go-dqlite/internal/logging"
-+ "github.com/canonical/go-dqlite/internal/protocol"
-+ "github.com/stretchr/testify/assert"
-+ "github.com/stretchr/testify/require"
-+)
-+
-+// func TestProtocol_Heartbeat(t *testing.T) {
-+// c, cleanup := newProtocol(t)
-+// defer cleanup()
-+
-+// request, response := newMessagePair(512, 512)
-+
-+// protocol.EncodeHeartbeat(&request, uint64(time.Now().Unix()))
-+
-+// makeCall(t, c, &request, &response)
-+
-+// servers, err := protocol.DecodeNodes(&response)
-+// require.NoError(t, err)
-+
-+// assert.Len(t, servers, 2)
-+// assert.Equal(t, client.Nodes{
-+// {ID: uint64(1), Address: "1.2.3.4:666"},
-+// {ID: uint64(2), Address: "5.6.7.8:666"}},
-+// servers)
-+// }
-+
-+// Test sending a request that needs to be written into the dynamic buffer.
-+func TestProtocol_RequestWithDynamicBuffer(t *testing.T) {
-+ p, cleanup := newProtocol(t)
-+ defer cleanup()
-+
-+ request, response := newMessagePair(64, 64)
-+
-+ protocol.EncodeOpen(&request, "test.db", 0, "test-0")
-+
-+ makeCall(t, p, &request, &response)
-+
-+ id, err := protocol.DecodeDb(&response)
-+ require.NoError(t, err)
-+
-+ sql := `
-+CREATE TABLE foo (n INT);
-+CREATE TABLE bar (n INT);
-+CREATE TABLE egg (n INT);
-+CREATE TABLE baz (n INT);
-+`
-+ protocol.EncodeExecSQL(&request, uint64(id), sql, nil)
-+
-+ makeCall(t, p, &request, &response)
-+}
-+
-+func TestProtocol_Prepare(t *testing.T) {
-+ c, cleanup := newProtocol(t)
-+ defer cleanup()
-+
-+ request, response := newMessagePair(64, 64)
-+
-+ protocol.EncodeOpen(&request, "test.db", 0, "test-0")
-+
-+ makeCall(t, c, &request, &response)
-+
-+ db, err := protocol.DecodeDb(&response)
-+ require.NoError(t, err)
-+
-+ protocol.EncodePrepare(&request, uint64(db), "CREATE TABLE test (n INT)")
-+
-+ makeCall(t, c, &request, &response)
-+
-+ _, stmt, params, err := protocol.DecodeStmt(&response)
-+ require.NoError(t, err)
-+
-+ assert.Equal(t, uint32(0), stmt)
-+ assert.Equal(t, uint64(0), params)
-+}
-+
-+/*
-+func TestProtocol_Exec(t *testing.T) {
-+ client, cleanup := newProtocol(t)
-+ defer cleanup()
-+
-+ ctx, cancel := context.WithTimeout(context.Background(), 250*time.Millisecond)
-+ defer cancel()
-+
-+ db, err := client.Open(ctx, "test.db", "volatile")
-+ require.NoError(t, err)
-+
-+ stmt, err := client.Prepare(ctx, db.ID, "CREATE TABLE test (n INT)")
-+ require.NoError(t, err)
-+
-+ _, err = client.Exec(ctx, db.ID, stmt.ID)
-+ require.NoError(t, err)
-+}
-+
-+func TestProtocol_Query(t *testing.T) {
-+ client, cleanup := newProtocol(t)
-+ defer cleanup()
-+
-+ ctx, cancel := context.WithTimeout(context.Background(), 250*time.Millisecond)
-+ defer cancel()
-+
-+ db, err := client.Open(ctx, "test.db", "volatile")
-+ require.NoError(t, err)
-+
-+ start := time.Now()
-+
-+ stmt, err := client.Prepare(ctx, db.ID, "CREATE TABLE test (n INT)")
-+ require.NoError(t, err)
-+
-+ _, err = client.Exec(ctx, db.ID, stmt.ID)
-+ require.NoError(t, err)
-+
-+ _, err = client.Finalize(ctx, db.ID, stmt.ID)
-+ require.NoError(t, err)
-+
-+ stmt, err = client.Prepare(ctx, db.ID, "INSERT INTO test VALUES(1)")
-+ require.NoError(t, err)
-+
-+ _, err = client.Exec(ctx, db.ID, stmt.ID)
-+ require.NoError(t, err)
-+
-+ _, err = client.Finalize(ctx, db.ID, stmt.ID)
-+ require.NoError(t, err)
-+
-+ stmt, err = client.Prepare(ctx, db.ID, "SELECT n FROM test")
-+ require.NoError(t, err)
-+
-+ _, err = client.Query(ctx, db.ID, stmt.ID)
-+ require.NoError(t, err)
-+
-+ _, err = client.Finalize(ctx, db.ID, stmt.ID)
-+ require.NoError(t, err)
-+
-+ fmt.Printf("time %s\n", time.Since(start))
-+}
-+*/
-+
-+func newProtocol(t *testing.T) (*protocol.Protocol, func()) {
-+ t.Helper()
-+
-+ address, serverCleanup := newNode(t, 0)
-+
-+ store := newStore(t, []string{address})
-+ config := protocol.Config{
-+ AttemptTimeout: 100 * time.Millisecond,
-+ BackoffFactor: time.Millisecond,
-+ }
-+ connector := protocol.NewConnector(0, store, config, logging.Test(t))
-+
-+ ctx, cancel := context.WithTimeout(context.Background(), 250*time.Millisecond)
-+ defer cancel()
-+
-+ client, err := connector.Connect(ctx)
-+
-+ require.NoError(t, err)
-+
-+ cleanup := func() {
-+ client.Close()
-+ serverCleanup()
-+ }
-+
-+ return client, cleanup
-+}
-+
-+// Perform a client call.
-+func makeCall(t *testing.T, p *protocol.Protocol, request, response *protocol.Message) {
-+ ctx, cancel := context.WithTimeout(context.Background(), 250*time.Millisecond)
-+ defer cancel()
-+
-+ err := p.Call(ctx, request, response)
-+ require.NoError(t, err)
-+}
-+
-+// Return a new message pair to be used as request and response.
-+func newMessagePair(size1, size2 int) (protocol.Message, protocol.Message) {
-+ message1 := protocol.Message{}
-+ message1.Init(size1)
-+
-+ message2 := protocol.Message{}
-+ message2.Init(size2)
-+
-+ return message1, message2
-+}
-diff --git a/vendor/github.com/canonical/go-dqlite/internal/protocol/request.go b/vendor/github.com/canonical/go-dqlite/internal/protocol/request.go
-new file mode 100644
-index 00000000000..bf934c65fc1
---- /dev/null
-+++ b/vendor/github.com/canonical/go-dqlite/internal/protocol/request.go
-@@ -0,0 +1,172 @@
-+package protocol
-+
-+// DO NOT EDIT
-+//
-+// This file was generated by ./schema.sh
-+
-+
-+// EncodeLeader encodes a Leader request.
-+func EncodeLeader(request *Message) {
-+ request.reset()
-+ request.putUint64(0)
-+
-+ request.putHeader(RequestLeader)
-+}
-+
-+// EncodeClient encodes a Client request.
-+func EncodeClient(request *Message, id uint64) {
-+ request.reset()
-+ request.putUint64(id)
-+
-+ request.putHeader(RequestClient)
-+}
-+
-+// EncodeHeartbeat encodes a Heartbeat request.
-+func EncodeHeartbeat(request *Message, timestamp uint64) {
-+ request.reset()
-+ request.putUint64(timestamp)
-+
-+ request.putHeader(RequestHeartbeat)
-+}
-+
-+// EncodeOpen encodes a Open request.
-+func EncodeOpen(request *Message, name string, flags uint64, vfs string) {
-+ request.reset()
-+ request.putString(name)
-+ request.putUint64(flags)
-+ request.putString(vfs)
-+
-+ request.putHeader(RequestOpen)
-+}
-+
-+// EncodePrepare encodes a Prepare request.
-+func EncodePrepare(request *Message, db uint64, sql string) {
-+ request.reset()
-+ request.putUint64(db)
-+ request.putString(sql)
-+
-+ request.putHeader(RequestPrepare)
-+}
-+
-+// EncodeExec encodes a Exec request.
-+func EncodeExec(request *Message, db uint32, stmt uint32, values NamedValues) {
-+ request.reset()
-+ request.putUint32(db)
-+ request.putUint32(stmt)
-+ request.putNamedValues(values)
-+
-+ request.putHeader(RequestExec)
-+}
-+
-+// EncodeQuery encodes a Query request.
-+func EncodeQuery(request *Message, db uint32, stmt uint32, values NamedValues) {
-+ request.reset()
-+ request.putUint32(db)
-+ request.putUint32(stmt)
-+ request.putNamedValues(values)
-+
-+ request.putHeader(RequestQuery)
-+}
-+
-+// EncodeFinalize encodes a Finalize request.
-+func EncodeFinalize(request *Message, db uint32, stmt uint32) {
-+ request.reset()
-+ request.putUint32(db)
-+ request.putUint32(stmt)
-+
-+ request.putHeader(RequestFinalize)
-+}
-+
-+// EncodeExecSQL encodes a ExecSQL request.
-+func EncodeExecSQL(request *Message, db uint64, sql string, values NamedValues) {
-+ request.reset()
-+ request.putUint64(db)
-+ request.putString(sql)
-+ request.putNamedValues(values)
-+
-+ request.putHeader(RequestExecSQL)
-+}
-+
-+// EncodeQuerySQL encodes a QuerySQL request.
-+func EncodeQuerySQL(request *Message, db uint64, sql string, values NamedValues) {
-+ request.reset()
-+ request.putUint64(db)
-+ request.putString(sql)
-+ request.putNamedValues(values)
-+
-+ request.putHeader(RequestQuerySQL)
-+}
-+
-+// EncodeInterrupt encodes a Interrupt request.
-+func EncodeInterrupt(request *Message, db uint64) {
-+ request.reset()
-+ request.putUint64(db)
-+
-+ request.putHeader(RequestInterrupt)
-+}
-+
-+// EncodeAdd encodes a Add request.
-+func EncodeAdd(request *Message, id uint64, address string) {
-+ request.reset()
-+ request.putUint64(id)
-+ request.putString(address)
-+
-+ request.putHeader(RequestAdd)
-+}
-+
-+// EncodeAssign encodes a Assign request.
-+func EncodeAssign(request *Message, id uint64, role uint64) {
-+ request.reset()
-+ request.putUint64(id)
-+ request.putUint64(role)
-+
-+ request.putHeader(RequestAssign)
-+}
-+
-+// EncodeRemove encodes a Remove request.
-+func EncodeRemove(request *Message, id uint64) {
-+ request.reset()
-+ request.putUint64(id)
-+
-+ request.putHeader(RequestRemove)
-+}
-+
-+// EncodeDump encodes a Dump request.
-+func EncodeDump(request *Message, name string) {
-+ request.reset()
-+ request.putString(name)
-+
-+ request.putHeader(RequestDump)
-+}
-+
-+// EncodeCluster encodes a Cluster request.
-+func EncodeCluster(request *Message, format uint64) {
-+ request.reset()
-+ request.putUint64(format)
-+
-+ request.putHeader(RequestCluster)
-+}
-+
-+// EncodeTransfer encodes a Transfer request.
-+func EncodeTransfer(request *Message, id uint64) {
-+ request.reset()
-+ request.putUint64(id)
-+
-+ request.putHeader(RequestTransfer)
-+}
-+
-+// EncodeDescribe encodes a Describe request.
-+func EncodeDescribe(request *Message, format uint64) {
-+ request.reset()
-+ request.putUint64(format)
-+
-+ request.putHeader(RequestDescribe)
-+}
-+
-+// EncodeWeight encodes a Weight request.
-+func EncodeWeight(request *Message, weight uint64) {
-+ request.reset()
-+ request.putUint64(weight)
-+
-+ request.putHeader(RequestWeight)
-+}
-diff --git a/vendor/github.com/canonical/go-dqlite/internal/protocol/response.go b/vendor/github.com/canonical/go-dqlite/internal/protocol/response.go
-new file mode 100644
-index 00000000000..3d101af3fde
---- /dev/null
-+++ b/vendor/github.com/canonical/go-dqlite/internal/protocol/response.go
-@@ -0,0 +1,278 @@
-+package protocol
-+
-+// DO NOT EDIT
-+//
-+// This file was generated by ./schema.sh
-+
-+import "fmt"
-+
-+
-+// DecodeFailure decodes a Failure response.
-+func DecodeFailure(response *Message) (code uint64, message string, err error) {
-+ mtype, _ := response.getHeader()
-+
-+ if mtype == ResponseFailure {
-+ e := ErrRequest{}
-+ e.Code = response.getUint64()
-+ e.Description = response.getString()
-+ err = e
-+ return
-+ }
-+
-+ if mtype != ResponseFailure {
-+ err = fmt.Errorf("decode %s: unexpected type %d", responseDesc(ResponseFailure), mtype)
-+ return
-+ }
-+
-+ code = response.getUint64()
-+ message = response.getString()
-+
-+ return
-+}
-+
-+// DecodeWelcome decodes a Welcome response.
-+func DecodeWelcome(response *Message) (heartbeatTimeout uint64, err error) {
-+ mtype, _ := response.getHeader()
-+
-+ if mtype == ResponseFailure {
-+ e := ErrRequest{}
-+ e.Code = response.getUint64()
-+ e.Description = response.getString()
-+ err = e
-+ return
-+ }
-+
-+ if mtype != ResponseWelcome {
-+ err = fmt.Errorf("decode %s: unexpected type %d", responseDesc(ResponseWelcome), mtype)
-+ return
-+ }
-+
-+ heartbeatTimeout = response.getUint64()
-+
-+ return
-+}
-+
-+// DecodeNodeLegacy decodes a NodeLegacy response.
-+func DecodeNodeLegacy(response *Message) (address string, err error) {
-+ mtype, _ := response.getHeader()
-+
-+ if mtype == ResponseFailure {
-+ e := ErrRequest{}
-+ e.Code = response.getUint64()
-+ e.Description = response.getString()
-+ err = e
-+ return
-+ }
-+
-+ if mtype != ResponseNodeLegacy {
-+ err = fmt.Errorf("decode %s: unexpected type %d", responseDesc(ResponseNodeLegacy), mtype)
-+ return
-+ }
-+
-+ address = response.getString()
-+
-+ return
-+}
-+
-+// DecodeNode decodes a Node response.
-+func DecodeNode(response *Message) (id uint64, address string, err error) {
-+ mtype, _ := response.getHeader()
-+
-+ if mtype == ResponseFailure {
-+ e := ErrRequest{}
-+ e.Code = response.getUint64()
-+ e.Description = response.getString()
-+ err = e
-+ return
-+ }
-+
-+ if mtype != ResponseNode {
-+ err = fmt.Errorf("decode %s: unexpected type %d", responseDesc(ResponseNode), mtype)
-+ return
-+ }
-+
-+ id = response.getUint64()
-+ address = response.getString()
-+
-+ return
-+}
-+
-+// DecodeNodes decodes a Nodes response.
-+func DecodeNodes(response *Message) (servers Nodes, err error) {
-+ mtype, _ := response.getHeader()
-+
-+ if mtype == ResponseFailure {
-+ e := ErrRequest{}
-+ e.Code = response.getUint64()
-+ e.Description = response.getString()
-+ err = e
-+ return
-+ }
-+
-+ if mtype != ResponseNodes {
-+ err = fmt.Errorf("decode %s: unexpected type %d", responseDesc(ResponseNodes), mtype)
-+ return
-+ }
-+
-+ servers = response.getNodes()
-+
-+ return
-+}
-+
-+// DecodeDb decodes a Db response.
-+func DecodeDb(response *Message) (id uint32, err error) {
-+ mtype, _ := response.getHeader()
-+
-+ if mtype == ResponseFailure {
-+ e := ErrRequest{}
-+ e.Code = response.getUint64()
-+ e.Description = response.getString()
-+ err = e
-+ return
-+ }
-+
-+ if mtype != ResponseDb {
-+ err = fmt.Errorf("decode %s: unexpected type %d", responseDesc(ResponseDb), mtype)
-+ return
-+ }
-+
-+ id = response.getUint32()
-+ response.getUint32()
-+
-+ return
-+}
-+
-+// DecodeStmt decodes a Stmt response.
-+func DecodeStmt(response *Message) (db uint32, id uint32, params uint64, err error) {
-+ mtype, _ := response.getHeader()
-+
-+ if mtype == ResponseFailure {
-+ e := ErrRequest{}
-+ e.Code = response.getUint64()
-+ e.Description = response.getString()
-+ err = e
-+ return
-+ }
-+
-+ if mtype != ResponseStmt {
-+ err = fmt.Errorf("decode %s: unexpected type %d", responseDesc(ResponseStmt), mtype)
-+ return
-+ }
-+
-+ db = response.getUint32()
-+ id = response.getUint32()
-+ params = response.getUint64()
-+
-+ return
-+}
-+
-+// DecodeEmpty decodes a Empty response.
-+func DecodeEmpty(response *Message) (err error) {
-+ mtype, _ := response.getHeader()
-+
-+ if mtype == ResponseFailure {
-+ e := ErrRequest{}
-+ e.Code = response.getUint64()
-+ e.Description = response.getString()
-+ err = e
-+ return
-+ }
-+
-+ if mtype != ResponseEmpty {
-+ err = fmt.Errorf("decode %s: unexpected type %d", responseDesc(ResponseEmpty), mtype)
-+ return
-+ }
-+
-+ response.getUint64()
-+
-+ return
-+}
-+
-+// DecodeResult decodes a Result response.
-+func DecodeResult(response *Message) (result Result, err error) {
-+ mtype, _ := response.getHeader()
-+
-+ if mtype == ResponseFailure {
-+ e := ErrRequest{}
-+ e.Code = response.getUint64()
-+ e.Description = response.getString()
-+ err = e
-+ return
-+ }
-+
-+ if mtype != ResponseResult {
-+ err = fmt.Errorf("decode %s: unexpected type %d", responseDesc(ResponseResult), mtype)
-+ return
-+ }
-+
-+ result = response.getResult()
-+
-+ return
-+}
-+
-+// DecodeRows decodes a Rows response.
-+func DecodeRows(response *Message) (rows Rows, err error) {
-+ mtype, _ := response.getHeader()
-+
-+ if mtype == ResponseFailure {
-+ e := ErrRequest{}
-+ e.Code = response.getUint64()
-+ e.Description = response.getString()
-+ err = e
-+ return
-+ }
-+
-+ if mtype != ResponseRows {
-+ err = fmt.Errorf("decode %s: unexpected type %d", responseDesc(ResponseRows), mtype)
-+ return
-+ }
-+
-+ rows = response.getRows()
-+
-+ return
-+}
-+
-+// DecodeFiles decodes a Files response.
-+func DecodeFiles(response *Message) (files Files, err error) {
-+ mtype, _ := response.getHeader()
-+
-+ if mtype == ResponseFailure {
-+ e := ErrRequest{}
-+ e.Code = response.getUint64()
-+ e.Description = response.getString()
-+ err = e
-+ return
-+ }
-+
-+ if mtype != ResponseFiles {
-+ err = fmt.Errorf("decode %s: unexpected type %d", responseDesc(ResponseFiles), mtype)
-+ return
-+ }
-+
-+ files = response.getFiles()
-+
-+ return
-+}
-+
-+// DecodeMetadata decodes a Metadata response.
-+func DecodeMetadata(response *Message) (failureDomain uint64, weight uint64, err error) {
-+ mtype, _ := response.getHeader()
-+
-+ if mtype == ResponseFailure {
-+ e := ErrRequest{}
-+ e.Code = response.getUint64()
-+ e.Description = response.getString()
-+ err = e
-+ return
-+ }
-+
-+ if mtype != ResponseMetadata {
-+ err = fmt.Errorf("decode %s: unexpected type %d", responseDesc(ResponseMetadata), mtype)
-+ return
-+ }
-+
-+ failureDomain = response.getUint64()
-+ weight = response.getUint64()
-+
-+ return
-+}
-diff --git a/vendor/github.com/canonical/go-dqlite/internal/protocol/schema.go b/vendor/github.com/canonical/go-dqlite/internal/protocol/schema.go
-new file mode 100644
-index 00000000000..f82c1355490
---- /dev/null
-+++ b/vendor/github.com/canonical/go-dqlite/internal/protocol/schema.go
-@@ -0,0 +1,37 @@
-+package protocol
-+
-+//go:generate ./schema.sh --request init
-+
-+//go:generate ./schema.sh --request Leader unused:uint64
-+//go:generate ./schema.sh --request Client id:uint64
-+//go:generate ./schema.sh --request Heartbeat timestamp:uint64
-+//go:generate ./schema.sh --request Open name:string flags:uint64 vfs:string
-+//go:generate ./schema.sh --request Prepare db:uint64 sql:string
-+//go:generate ./schema.sh --request Exec db:uint32 stmt:uint32 values:NamedValues
-+//go:generate ./schema.sh --request Query db:uint32 stmt:uint32 values:NamedValues
-+//go:generate ./schema.sh --request Finalize db:uint32 stmt:uint32
-+//go:generate ./schema.sh --request ExecSQL db:uint64 sql:string values:NamedValues
-+//go:generate ./schema.sh --request QuerySQL db:uint64 sql:string values:NamedValues
-+//go:generate ./schema.sh --request Interrupt db:uint64
-+//go:generate ./schema.sh --request Add id:uint64 address:string
-+//go:generate ./schema.sh --request Assign id:uint64 role:uint64
-+//go:generate ./schema.sh --request Remove id:uint64
-+//go:generate ./schema.sh --request Dump name:string
-+//go:generate ./schema.sh --request Cluster format:uint64
-+//go:generate ./schema.sh --request Transfer id:uint64
-+//go:generate ./schema.sh --request Describe format:uint64
-+//go:generate ./schema.sh --request Weight weight:uint64
-+
-+//go:generate ./schema.sh --response init
-+//go:generate ./schema.sh --response Failure code:uint64 message:string
-+//go:generate ./schema.sh --response Welcome heartbeatTimeout:uint64
-+//go:generate ./schema.sh --response NodeLegacy address:string
-+//go:generate ./schema.sh --response Node id:uint64 address:string
-+//go:generate ./schema.sh --response Nodes servers:Nodes
-+//go:generate ./schema.sh --response Db id:uint32 unused:uint32
-+//go:generate ./schema.sh --response Stmt db:uint32 id:uint32 params:uint64
-+//go:generate ./schema.sh --response Empty unused:uint64
-+//go:generate ./schema.sh --response Result result:Result
-+//go:generate ./schema.sh --response Rows rows:Rows
-+//go:generate ./schema.sh --response Files files:Files
-+//go:generate ./schema.sh --response Metadata failureDomain:uint64 weight:uint64
-diff --git a/vendor/github.com/canonical/go-dqlite/internal/protocol/schema.sh b/vendor/github.com/canonical/go-dqlite/internal/protocol/schema.sh
-new file mode 100755
-index 00000000000..221c73917e5
---- /dev/null
-+++ b/vendor/github.com/canonical/go-dqlite/internal/protocol/schema.sh
-@@ -0,0 +1,145 @@
-+#!/bin/bash
-+
-+request_init() {
-+ cat > request.go < response.go <> request.go <> request.go <> request.go <> response.go <> response.go <> response.go < 0 {
-+ result += "\n"
-+ }
-+ result += fmt.Sprintf("%x|%s|%s", server.ID, server.Address, server.Role)
-+ }
-+ case formatJson:
-+ data, err := json.Marshal(cluster)
-+ if err != nil {
-+ return "", err
-+ }
-+ var indented bytes.Buffer
-+ json.Indent(&indented, data, "", "\t")
-+ result = string(indented.Bytes())
-+ }
-+
-+ return result, nil
-+}
-+
-+func (s *Shell) processLeader(ctx context.Context, line string) (string, error) {
-+ cli, err := client.FindLeader(ctx, s.store, client.WithDialFunc(s.dial))
-+ if err != nil {
-+ return "", err
-+ }
-+ leader, err := cli.Leader(ctx)
-+ if err != nil {
-+ return "", err
-+ }
-+ if leader == nil {
-+ return "", nil
-+ }
-+ return leader.Address, nil
-+}
-+
-+func (s *Shell) processRemove(ctx context.Context, line string) (string, error) {
-+ parts := strings.Split(line, " ")
-+ if len(parts) != 2 {
-+ return "", fmt.Errorf("bad command format, should be: .remove ")
-+ }
-+ address := parts[1]
-+ cli, err := client.FindLeader(ctx, s.store, client.WithDialFunc(s.dial))
-+ if err != nil {
-+ return "", err
-+ }
-+ cluster, err := cli.Cluster(ctx)
-+ if err != nil {
-+ return "", err
-+ }
-+ for _, node := range cluster {
-+ if node.Address != address {
-+ continue
-+ }
-+ if err := cli.Remove(ctx, node.ID); err != nil {
-+ return "", fmt.Errorf("remove node %q: %w", address, err)
-+ }
-+ return "", nil
-+ }
-+
-+ return "", fmt.Errorf("no node has address %q", address)
-+}
-+
-+func (s *Shell) processDescribe(ctx context.Context, line string) (string, error) {
-+ parts := strings.Split(line, " ")
-+ if len(parts) != 2 {
-+ return "", fmt.Errorf("bad command format, should be: .describe ")
-+ }
-+ address := parts[1]
-+ cli, err := client.New(ctx, address, client.WithDialFunc(s.dial))
-+ if err != nil {
-+ return "", err
-+ }
-+ metadata, err := cli.Describe(ctx)
-+ if err != nil {
-+ return "", err
-+ }
-+
-+ result := ""
-+ switch s.format {
-+ case formatTabular:
-+ result += fmt.Sprintf("%s|%d|%d", address, metadata.FailureDomain, metadata.Weight)
-+ case formatJson:
-+ data, err := json.Marshal(metadata)
-+ if err != nil {
-+ return "", err
-+ }
-+ var indented bytes.Buffer
-+ json.Indent(&indented, data, "", "\t")
-+ result = string(indented.Bytes())
-+ }
-+
-+ return result, nil
-+}
-+
-+func (s *Shell) processWeight(ctx context.Context, line string) (string, error) {
-+ parts := strings.Split(line, " ")
-+ if len(parts) != 3 {
-+ return "", fmt.Errorf("bad command format, should be: .weight ")
-+ }
-+ address := parts[1]
-+ weight, err := strconv.Atoi(parts[2])
-+ if err != nil || weight < 0 {
-+ return "", fmt.Errorf("bad weight %q", parts[2])
-+ }
-+
-+ cli, err := client.New(ctx, address, client.WithDialFunc(s.dial))
-+ if err != nil {
-+ return "", err
-+ }
-+ if err := cli.Weight(ctx, uint64(weight)); err != nil {
-+ return "", err
-+ }
-+
-+ return "", nil
-+}
-+
-+func (s *Shell) processSelect(ctx context.Context, line string) (string, error) {
-+ tx, err := s.db.BeginTx(ctx, nil)
-+ if err != nil {
-+ return "", fmt.Errorf("begin transaction: %w", err)
-+ }
-+
-+ rows, err := tx.Query(line)
-+ if err != nil {
-+ return "", fmt.Errorf("query: %w", err)
-+ }
-+ defer rows.Close()
-+
-+ columns, err := rows.Columns()
-+ if err != nil {
-+ return "", fmt.Errorf("columns: %w", err)
-+ }
-+ n := len(columns)
-+
-+ result := ""
-+ for rows.Next() {
-+ row := make([]interface{}, n)
-+ rowPointers := make([]interface{}, n)
-+ for i := range row {
-+ rowPointers[i] = &row[i]
-+ }
-+
-+ if err := rows.Scan(rowPointers...); err != nil {
-+ return "", fmt.Errorf("scan: %w", err)
-+ }
-+
-+ for i, column := range row {
-+ s := fmt.Sprintf("%v", column)
-+ if i == 0 {
-+ result += s
-+ } else {
-+ result += "|" + s
-+ }
-+
-+ }
-+ result += "\n"
-+ }
-+ result = strings.TrimRight(result, "\n")
-+
-+ if err := rows.Err(); err != nil {
-+ return "", fmt.Errorf("rows: %w", err)
-+ }
-+
-+ if err := tx.Commit(); err != nil {
-+ return "", fmt.Errorf("commit: %w", err)
-+ }
-+
-+ return result, nil
-+}
-+
-+func (s *Shell) processExec(ctx context.Context, line string) error {
-+ tx, err := s.db.BeginTx(ctx, nil)
-+ if err != nil {
-+ return err
-+ }
-+
-+ if _, err := tx.Exec(line); err != nil {
-+ return err
-+ }
-+
-+ if err := tx.Commit(); err != nil {
-+ return err
-+ }
-+
-+ return nil
-+}
-diff --git a/vendor/github.com/canonical/go-dqlite/node.go b/vendor/github.com/canonical/go-dqlite/node.go
-new file mode 100644
-index 00000000000..da1942d42e9
---- /dev/null
-+++ b/vendor/github.com/canonical/go-dqlite/node.go
-@@ -0,0 +1,166 @@
-+package dqlite
-+
-+import (
-+ "time"
-+
-+ "github.com/canonical/go-dqlite/client"
-+ "github.com/canonical/go-dqlite/internal/bindings"
-+ "github.com/pkg/errors"
-+)
-+
-+// Node runs a dqlite node.
-+type Node struct {
-+ log client.LogFunc // Logger
-+ server *bindings.Node // Low-level C implementation
-+ acceptCh chan error // Receives connection handling errors
-+ id uint64
-+ address string
-+ bindAddress string
-+}
-+
-+// NodeInfo is a convenience alias for client.NodeInfo.
-+type NodeInfo = client.NodeInfo
-+
-+// Option can be used to tweak node parameters.
-+type Option func(*options)
-+
-+// WithDialFunc sets a custom dial function for the server.
-+func WithDialFunc(dial client.DialFunc) Option {
-+ return func(options *options) {
-+ options.DialFunc = dial
-+ }
-+}
-+
-+// WithBindAddress sets a custom bind address for the server.
-+func WithBindAddress(address string) Option {
-+ return func(options *options) {
-+ options.BindAddress = address
-+ }
-+}
-+
-+// WithNetworkLatency sets the average one-way network latency.
-+func WithNetworkLatency(latency time.Duration) Option {
-+ return func(options *options) {
-+ options.NetworkLatency = uint64(latency.Nanoseconds())
-+ }
-+}
-+
-+// WithFailureDomain sets the code of the failure domain the node belongs to.
-+func WithFailureDomain(code uint64) Option {
-+ return func(options *options) {
-+ options.FailureDomain = code
-+ }
-+}
-+
-+// New creates a new Node instance.
-+func New(id uint64, address string, dir string, options ...Option) (*Node, error) {
-+ o := defaultOptions()
-+
-+ for _, option := range options {
-+ option(o)
-+ }
-+
-+ server, err := bindings.NewNode(id, address, dir)
-+ if err != nil {
-+ return nil, err
-+ }
-+ if o.DialFunc != nil {
-+ if err := server.SetDialFunc(o.DialFunc); err != nil {
-+ return nil, err
-+ }
-+ }
-+ if o.BindAddress != "" {
-+ if err := server.SetBindAddress(o.BindAddress); err != nil {
-+ return nil, err
-+ }
-+ }
-+ if o.NetworkLatency != 0 {
-+ if err := server.SetNetworkLatency(o.NetworkLatency); err != nil {
-+ return nil, err
-+ }
-+ }
-+ if o.FailureDomain != 0 {
-+ if err := server.SetFailureDomain(o.FailureDomain); err != nil {
-+ return nil, err
-+ }
-+ }
-+ s := &Node{
-+ server: server,
-+ acceptCh: make(chan error, 1),
-+ id: id,
-+ address: address,
-+ bindAddress: o.BindAddress,
-+ }
-+
-+ return s, nil
-+}
-+
-+// BindAddress returns the network address the node is listening to.
-+func (s *Node) BindAddress() string {
-+ return s.server.GetBindAddress()
-+}
-+
-+// Start serving requests.
-+func (s *Node) Start() error {
-+ return s.server.Start()
-+}
-+
-+// Recover a node by forcing a new cluster configuration.
-+//
-+// DEPRECATED: Use ReconfigureMembership instead, which does not require
-+// instantiating a new Node object.
-+func (s *Node) Recover(cluster []NodeInfo) error {
-+ return s.server.Recover(cluster)
-+}
-+
-+// Hold configuration options for a dqlite server.
-+type options struct {
-+ Log client.LogFunc
-+ DialFunc client.DialFunc
-+ BindAddress string
-+ NetworkLatency uint64
-+ FailureDomain uint64
-+}
-+
-+// Close the server, releasing all resources it created.
-+func (s *Node) Close() error {
-+ // Send a stop signal to the dqlite event loop.
-+ if err := s.server.Stop(); err != nil {
-+ return errors.Wrap(err, "server failed to stop")
-+ }
-+
-+ s.server.Close()
-+
-+ return nil
-+}
-+
-+// BootstrapID is a magic ID that should be used for the fist node in a
-+// cluster. Alternatively ID 1 can be used as well.
-+const BootstrapID = 0x2dc171858c3155be
-+
-+// GenerateID generates a unique ID for a new node, based on a hash of its
-+// address and the current time.
-+func GenerateID(address string) uint64 {
-+ return bindings.GenerateID(address)
-+}
-+
-+// ReconfigureMembership can be used to recover a cluster whose majority of
-+// nodes have died, and therefore has become unavailable.
-+//
-+// It forces appending a new configuration to the raft log stored in the given
-+// directory, effectively replacing the current configuration.
-+func ReconfigureMembership(dir string, cluster []NodeInfo) error {
-+ server, err := bindings.NewNode(1, "1", dir)
-+ if err != nil {
-+ return err
-+ }
-+ defer server.Close()
-+ return server.Recover(cluster)
-+}
-+
-+// Create a options object with sane defaults.
-+func defaultOptions() *options {
-+ return &options{
-+ DialFunc: client.DefaultDialFunc,
-+ }
-+}
-diff --git a/vendor/github.com/canonical/go-dqlite/test/dqlite-demo.sh b/vendor/github.com/canonical/go-dqlite/test/dqlite-demo.sh
-new file mode 100755
-index 00000000000..c31b547279c
---- /dev/null
-+++ b/vendor/github.com/canonical/go-dqlite/test/dqlite-demo.sh
-@@ -0,0 +1,123 @@
-+#!/bin/sh -eu
-+#
-+# Test the dqlite-demo application.
-+
-+GO=${GO:-go}
-+VERBOSE=${VERBOSE:-0}
-+
-+$GO build -tags libsqlite3 ./cmd/dqlite-demo/
-+
-+DIR=$(mktemp -d)
-+
-+start_node() {
-+ n="${1}"
-+ pidfile="${DIR}/pid.${n}"
-+ join="${2}"
-+ verbose=""
-+
-+ if [ $VERBOSE -eq 1 ]; then
-+ verbose="--verbose"
-+ fi
-+
-+ ./dqlite-demo --dir $DIR --api=127.0.0.1:800${n} --db=127.0.0.1:900${n} $join $verbose &
-+ echo "${!}" > "${pidfile}"
-+
-+ i=0
-+ while ! nc -z 127.0.0.1 800${n} 2>/dev/null; do
-+ i=$(expr $i + 1)
-+ sleep 0.2
-+ if [ $i -eq 25 ]; then
-+ echo "Error: node ${n} not yet up after 5 seconds"
-+ exit 1
-+ fi
-+ done
-+}
-+
-+kill_node() {
-+ n=$1
-+ pidfile="${DIR}/pid.${n}"
-+
-+ if ! [ -e $pidfile ]; then
-+ return
-+ fi
-+
-+ pid=$(cat ${pidfile})
-+
-+ kill -TERM $pid
-+ wait $pid
-+
-+ rm ${pidfile}
-+}
-+
-+set_up_node() {
-+ n=$1
-+ join=""
-+ if [ $n -ne 1 ]; then
-+ join=--join=127.0.0.1:9001
-+ fi
-+
-+ echo "=> Set up dqlite-demo node $n"
-+
-+ start_node "${n}" "${join}"
-+}
-+
-+tear_down_node() {
-+ n=$1
-+
-+ echo "=> Tear down dqlite-demo node $n"
-+
-+ kill_node $n
-+}
-+
-+set_up() {
-+ echo "=> Set up dqlite-demo cluster"
-+ set_up_node 1
-+ set_up_node 2
-+ set_up_node 3
-+}
-+
-+tear_down() {
-+ err=$?
-+ trap '' HUP INT TERM
-+
-+ echo "=> Tear down dqlite-demo cluster"
-+ tear_down_node 3
-+ tear_down_node 2
-+ tear_down_node 1
-+
-+ rm -rf $DIR
-+
-+ exit $err
-+}
-+
-+sig_handler() {
-+ trap '' EXIT
-+ tear_down
-+}
-+
-+trap tear_down EXIT
-+trap sig_handler HUP INT TERM
-+
-+set_up
-+
-+echo "=> Start test"
-+
-+echo "=> Put key to node 1"
-+if [ "$(curl -s -X PUT -d my-key http://127.0.0.1:8001/my-value)" != "done" ]; then
-+ echo "Error: put key to node 1"
-+fi
-+
-+echo "=> Get key from node 1"
-+if [ "$(curl -s http://127.0.0.1:8001/my-value)" != "my-key" ]; then
-+ echo "Error: get key from node 1"
-+fi
-+
-+echo "=> Kill node 1"
-+kill_node 1
-+
-+echo "=> Get key from node 2"
-+if [ "$(curl -s http://127.0.0.1:8002/my-value)" != "my-key" ]; then
-+ echo "Error: get key from node 2"
-+fi
-+
-+echo "=> Test successful"
-diff --git a/vendor/github.com/canonical/go-dqlite/test/roles.sh b/vendor/github.com/canonical/go-dqlite/test/roles.sh
-new file mode 100755
-index 00000000000..8c663a5ab3a
---- /dev/null
-+++ b/vendor/github.com/canonical/go-dqlite/test/roles.sh
-@@ -0,0 +1,247 @@
-+#!/bin/bash -eu
-+#
-+# Test dynamic roles management.
-+
-+GO=${GO:-go}
-+VERBOSE=${VERBOSE:-0}
-+
-+DIR=$(mktemp -d)
-+BINARY=$DIR/main
-+CLUSTER=127.0.0.1:9001,127.0.0.1:9002,127.0.0.1:9003,127.0.0.1:9004,127.0.0.1:9005,127.0.0.1:9006
-+N=7
-+
-+$GO build -tags libsqlite3 ./cmd/dqlite/
-+
-+set_up_binary() {
-+ cat > $DIR/main.go < 1 {
-+ join = append(join, "127.0.0.1:9001")
-+ }
-+ addr := fmt.Sprintf("127.0.0.1:900%d", index)
-+ if err := os.MkdirAll(dir, 0755); err != nil {
-+ panic(err)
-+ }
-+ app, err := app.New(
-+ dir,
-+ app.WithAddress(addr),
-+ app.WithCluster(join),
-+ app.WithLogFunc(logFunc),
-+ app.WithRolesAdjustmentFrequency(3 * time.Second),
-+ )
-+ if err != nil {
-+ panic(err)
-+ }
-+ if err := app.Ready(context.Background()); err != nil {
-+ panic(err)
-+ }
-+ <-ch
-+ ctx, cancel := context.WithTimeout(context.Background(), 2 * time.Second)
-+ defer cancel()
-+ app.Handover(ctx)
-+ app.Close()
-+}
-+EOF
-+ $GO build -o $BINARY -tags libsqlite3 $DIR/main.go
-+}
-+
-+start_node() {
-+ n="${1}"
-+ pidfile="${DIR}/pid.${n}"
-+
-+ $BINARY $n &
-+ echo "${!}" > "${pidfile}"
-+}
-+
-+kill_node() {
-+ n=$1
-+ signal=$2
-+ pidfile="${DIR}/pid.${n}"
-+
-+ if ! [ -e $pidfile ]; then
-+ return
-+ fi
-+
-+ pid=$(cat ${pidfile})
-+
-+ kill -${signal} $pid
-+ wait $pid || true
-+
-+ rm ${pidfile}
-+}
-+
-+# Wait for the cluster to have 3 voters, 2 stand-bys and 1 spare
-+wait_stable() {
-+ i=0
-+ while true; do
-+ i=$(expr $i + 1)
-+ voters=$(./dqlite -s $CLUSTER test .cluster | grep voter | wc -l)
-+ standbys=$(./dqlite -s $CLUSTER test .cluster | grep stand-by | wc -l)
-+ spares=$(./dqlite -s $CLUSTER test .cluster | grep spare | wc -l)
-+ if [ $voters -eq 3 ] && [ $standbys -eq 3 ] && [ $spares -eq 1 ] ; then
-+ break
-+ fi
-+ if [ $i -eq 40 ]; then
-+ echo "Error: node roles not yet stable after 10 seconds"
-+ ./dqlite -s $CLUSTER test .cluster
-+ exit 1
-+ fi
-+ sleep 0.25
-+ done
-+}
-+
-+# Wait for the given node to have the given role
-+wait_role() {
-+ index=$1
-+ role=$2
-+ i=0
-+ while true; do
-+ i=$(expr $i + 1)
-+ current=$(./dqlite -s $CLUSTER test .cluster | grep "900${index}" | cut -f 3 -d "|")
-+ if [ "$current" = "$role" ]; then
-+ break
-+ fi
-+ if [ $i -eq 40 ]; then
-+ echo "Error: node $index has role $current instead of $role"
-+ ./dqlite -s $CLUSTER test .cluster
-+ exit 1
-+ fi
-+ sleep 0.25
-+ done
-+}
-+
-+set_up_node() {
-+ n=$1
-+ echo "=> Set up test node $n"
-+ start_node "${n}"
-+}
-+
-+set_up() {
-+ echo "=> Set up test cluster"
-+ set_up_binary
-+ for i in $(seq $N); do
-+ set_up_node $i
-+ done
-+}
-+
-+tear_down_node() {
-+ n=$1
-+ echo "=> Tear down test node $n"
-+ kill_node $n TERM
-+}
-+
-+tear_down() {
-+ err=$?
-+ trap '' HUP INT TERM
-+
-+ echo "=> Tear down test cluster"
-+
-+ for i in $(seq $N -1 1); do
-+ tear_down_node $i
-+ done
-+
-+ rm -rf $DIR
-+
-+ exit $err
-+}
-+
-+sig_handler() {
-+ trap '' EXIT
-+ tear_down
-+}
-+
-+trap tear_down EXIT
-+trap sig_handler HUP INT TERM
-+
-+set_up
-+
-+echo "=> Wait for roles to get stable"
-+wait_stable
-+
-+# Stop one node at a time gracefully, then check that the cluster is stable.
-+for i in $(seq 10); do
-+ index=$((1 + RANDOM % $N))
-+ echo "=> Stop node $index"
-+ kill_node $index TERM
-+ echo "=> Wait for roles to get stable"
-+ wait_role $index spare
-+ wait_stable
-+ echo "=> Restart node $index"
-+ start_node $index
-+ sleep 0.5
-+done
-+
-+# Kill one node at a time ungracefully, then check that the cluster is stable.
-+for i in $(seq 1); do
-+ index=$((1 + RANDOM % $N))
-+ echo "=> Kill node $index"
-+ kill_node $index KILL
-+ echo "=> Wait for roles to get stable"
-+ wait_role $index spare
-+ wait_stable
-+ echo "=> Restart node $index"
-+ start_node $index
-+ sleep 0.5
-+done
-+
-+# Stop two nodes at a time gracefully, then check that the cluster is stable.
-+for i in $(seq 10); do
-+ index1=$((1 + RANDOM % $N))
-+ index2=$((1 + (index1 + $((RANDOM % ($N - 1)))) % $N))
-+ echo "=> Stop nodes $index1 and $index2"
-+ kill_node $index1 TERM
-+ kill_node $index2 TERM
-+ sleep 2
-+ echo "=> Restart nodes $index1 and $index2"
-+ start_node $index1
-+ start_node $index2
-+ echo "=> Wait for roles to get stable"
-+ wait_stable
-+ sleep 1
-+done
-+
-+# Kill two nodes at a time ungracefully, then check that the cluster is stable.
-+for i in $(seq 10); do
-+ index1=$((1 + RANDOM % $N))
-+ index2=$((1 + (index1 + $((RANDOM % ($N - 1)))) % $N))
-+ echo "=> Stop nodes $index1 and $index2"
-+ kill_node $index1 KILL
-+ kill_node $index2 KILL
-+ sleep 5
-+ echo "=> Restart nodes $index1 and $index2"
-+ start_node $index1
-+ start_node $index2
-+ echo "=> Wait for roles to get stable"
-+ wait_stable
-+ sleep 1
-+done
-+
-+echo "=> Test successful"
-diff --git a/vendor/github.com/canonical/kvsql-dqlite/.dir-locals.el b/vendor/github.com/canonical/kvsql-dqlite/.dir-locals.el
-new file mode 100644
-index 00000000000..9d3ee557d4d
---- /dev/null
-+++ b/vendor/github.com/canonical/kvsql-dqlite/.dir-locals.el
-@@ -0,0 +1,8 @@
-+;;; Directory Local Variables
-+;;; For more information see (info "(emacs) Directory Variables")
-+((go-mode
-+ . ((go-test-args . "-tags libsqlite3,dqlite -timeout 10s")
-+ (eval
-+ . (set
-+ (make-local-variable 'flycheck-go-build-tags)
-+ '("libsqlite3" "dqlite"))))))
-diff --git a/vendor/github.com/canonical/kvsql-dqlite/LICENSE b/vendor/github.com/canonical/kvsql-dqlite/LICENSE
-new file mode 100644
-index 00000000000..f433b1a53f5
---- /dev/null
-+++ b/vendor/github.com/canonical/kvsql-dqlite/LICENSE
-@@ -0,0 +1,177 @@
-+
-+ Apache License
-+ Version 2.0, January 2004
-+ http://www.apache.org/licenses/
-+
-+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-+
-+ 1. Definitions.
-+
-+ "License" shall mean the terms and conditions for use, reproduction,
-+ and distribution as defined by Sections 1 through 9 of this document.
-+
-+ "Licensor" shall mean the copyright owner or entity authorized by
-+ the copyright owner that is granting the License.
-+
-+ "Legal Entity" shall mean the union of the acting entity and all
-+ other entities that control, are controlled by, or are under common
-+ control with that entity. For the purposes of this definition,
-+ "control" means (i) the power, direct or indirect, to cause the
-+ direction or management of such entity, whether by contract or
-+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
-+ outstanding shares, or (iii) beneficial ownership of such entity.
-+
-+ "You" (or "Your") shall mean an individual or Legal Entity
-+ exercising permissions granted by this License.
-+
-+ "Source" form shall mean the preferred form for making modifications,
-+ including but not limited to software source code, documentation
-+ source, and configuration files.
-+
-+ "Object" form shall mean any form resulting from mechanical
-+ transformation or translation of a Source form, including but
-+ not limited to compiled object code, generated documentation,
-+ and conversions to other media types.
-+
-+ "Work" shall mean the work of authorship, whether in Source or
-+ Object form, made available under the License, as indicated by a
-+ copyright notice that is included in or attached to the work
-+ (an example is provided in the Appendix below).
-+
-+ "Derivative Works" shall mean any work, whether in Source or Object
-+ form, that is based on (or derived from) the Work and for which the
-+ editorial revisions, annotations, elaborations, or other modifications
-+ represent, as a whole, an original work of authorship. For the purposes
-+ of this License, Derivative Works shall not include works that remain
-+ separable from, or merely link (or bind by name) to the interfaces of,
-+ the Work and Derivative Works thereof.
-+
-+ "Contribution" shall mean any work of authorship, including
-+ the original version of the Work and any modifications or additions
-+ to that Work or Derivative Works thereof, that is intentionally
-+ submitted to Licensor for inclusion in the Work by the copyright owner
-+ or by an individual or Legal Entity authorized to submit on behalf of
-+ the copyright owner. For the purposes of this definition, "submitted"
-+ means any form of electronic, verbal, or written communication sent
-+ to the Licensor or its representatives, including but not limited to
-+ communication on electronic mailing lists, source code control systems,
-+ and issue tracking systems that are managed by, or on behalf of, the
-+ Licensor for the purpose of discussing and improving the Work, but
-+ excluding communication that is conspicuously marked or otherwise
-+ designated in writing by the copyright owner as "Not a Contribution."
-+
-+ "Contributor" shall mean Licensor and any individual or Legal Entity
-+ on behalf of whom a Contribution has been received by Licensor and
-+ subsequently incorporated within the Work.
-+
-+ 2. Grant of Copyright License. Subject to the terms and conditions of
-+ this License, each Contributor hereby grants to You a perpetual,
-+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
-+ copyright license to reproduce, prepare Derivative Works of,
-+ publicly display, publicly perform, sublicense, and distribute the
-+ Work and such Derivative Works in Source or Object form.
-+
-+ 3. Grant of Patent License. Subject to the terms and conditions of
-+ this License, each Contributor hereby grants to You a perpetual,
-+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
-+ (except as stated in this section) patent license to make, have made,
-+ use, offer to sell, sell, import, and otherwise transfer the Work,
-+ where such license applies only to those patent claims licensable
-+ by such Contributor that are necessarily infringed by their
-+ Contribution(s) alone or by combination of their Contribution(s)
-+ with the Work to which such Contribution(s) was submitted. If You
-+ institute patent litigation against any entity (including a
-+ cross-claim or counterclaim in a lawsuit) alleging that the Work
-+ or a Contribution incorporated within the Work constitutes direct
-+ or contributory patent infringement, then any patent licenses
-+ granted to You under this License for that Work shall terminate
-+ as of the date such litigation is filed.
-+
-+ 4. Redistribution. You may reproduce and distribute copies of the
-+ Work or Derivative Works thereof in any medium, with or without
-+ modifications, and in Source or Object form, provided that You
-+ meet the following conditions:
-+
-+ (a) You must give any other recipients of the Work or
-+ Derivative Works a copy of this License; and
-+
-+ (b) You must cause any modified files to carry prominent notices
-+ stating that You changed the files; and
-+
-+ (c) You must retain, in the Source form of any Derivative Works
-+ that You distribute, all copyright, patent, trademark, and
-+ attribution notices from the Source form of the Work,
-+ excluding those notices that do not pertain to any part of
-+ the Derivative Works; and
-+
-+ (d) If the Work includes a "NOTICE" text file as part of its
-+ distribution, then any Derivative Works that You distribute must
-+ include a readable copy of the attribution notices contained
-+ within such NOTICE file, excluding those notices that do not
-+ pertain to any part of the Derivative Works, in at least one
-+ of the following places: within a NOTICE text file distributed
-+ as part of the Derivative Works; within the Source form or
-+ documentation, if provided along with the Derivative Works; or,
-+ within a display generated by the Derivative Works, if and
-+ wherever such third-party notices normally appear. The contents
-+ of the NOTICE file are for informational purposes only and
-+ do not modify the License. You may add Your own attribution
-+ notices within Derivative Works that You distribute, alongside
-+ or as an addendum to the NOTICE text from the Work, provided
-+ that such additional attribution notices cannot be construed
-+ as modifying the License.
-+
-+ You may add Your own copyright statement to Your modifications and
-+ may provide additional or different license terms and conditions
-+ for use, reproduction, or distribution of Your modifications, or
-+ for any such Derivative Works as a whole, provided Your use,
-+ reproduction, and distribution of the Work otherwise complies with
-+ the conditions stated in this License.
-+
-+ 5. Submission of Contributions. Unless You explicitly state otherwise,
-+ any Contribution intentionally submitted for inclusion in the Work
-+ by You to the Licensor shall be under the terms and conditions of
-+ this License, without any additional terms or conditions.
-+ Notwithstanding the above, nothing herein shall supersede or modify
-+ the terms of any separate license agreement you may have executed
-+ with Licensor regarding such Contributions.
-+
-+ 6. Trademarks. This License does not grant permission to use the trade
-+ names, trademarks, service marks, or product names of the Licensor,
-+ except as required for reasonable and customary use in describing the
-+ origin of the Work and reproducing the content of the NOTICE file.
-+
-+ 7. Disclaimer of Warranty. Unless required by applicable law or
-+ agreed to in writing, Licensor provides the Work (and each
-+ Contributor provides its Contributions) on an "AS IS" BASIS,
-+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-+ implied, including, without limitation, any warranties or conditions
-+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
-+ PARTICULAR PURPOSE. You are solely responsible for determining the
-+ appropriateness of using or redistributing the Work and assume any
-+ risks associated with Your exercise of permissions under this License.
-+
-+ 8. Limitation of Liability. In no event and under no legal theory,
-+ whether in tort (including negligence), contract, or otherwise,
-+ unless required by applicable law (such as deliberate and grossly
-+ negligent acts) or agreed to in writing, shall any Contributor be
-+ liable to You for damages, including any direct, indirect, special,
-+ incidental, or consequential damages of any character arising as a
-+ result of this License or out of the use or inability to use the
-+ Work (including but not limited to damages for loss of goodwill,
-+ work stoppage, computer failure or malfunction, or any and all
-+ other commercial damages or losses), even if such Contributor
-+ has been advised of the possibility of such damages.
-+
-+ 9. Accepting Warranty or Additional Liability. While redistributing
-+ the Work or Derivative Works thereof, You may choose to offer,
-+ and charge a fee for, acceptance of support, warranty, indemnity,
-+ or other liability obligations and/or rights consistent with this
-+ License. However, in accepting such obligations, You may act only
-+ on Your own behalf and on Your sole responsibility, not on behalf
-+ of any other Contributor, and only if You agree to indemnify,
-+ defend, and hold each Contributor harmless for any liability
-+ incurred by, or claims asserted against, such Contributor by reason
-+ of your accepting any such warranty or additional liability.
-+
-+ END OF TERMS AND CONDITIONS
-diff --git a/vendor/github.com/canonical/kvsql-dqlite/cmd/main.go b/vendor/github.com/canonical/kvsql-dqlite/cmd/main.go
-new file mode 100644
-index 00000000000..80ea1b37367
---- /dev/null
-+++ b/vendor/github.com/canonical/kvsql-dqlite/cmd/main.go
-@@ -0,0 +1,13 @@
-+package main
-+
-+import (
-+ "github.com/canonical/kvsql-dqlite/server"
-+ "log"
-+)
-+
-+func main() {
-+ _, err := server.New("/tmp/node1")
-+ if err != nil {
-+ log.Fatal(err)
-+ }
-+}
-diff --git a/vendor/github.com/canonical/kvsql-dqlite/go.mod b/vendor/github.com/canonical/kvsql-dqlite/go.mod
-new file mode 100644
-index 00000000000..5ccab820097
---- /dev/null
-+++ b/vendor/github.com/canonical/kvsql-dqlite/go.mod
-@@ -0,0 +1,36 @@
-+module github.com/canonical/kvsql-dqlite
-+
-+go 1.14
-+
-+require (
-+ github.com/PuerkitoBio/goquery v1.5.1 // indirect
-+ github.com/Rican7/retry v0.1.0 // indirect
-+ github.com/canonical/go-dqlite v1.7.0
-+ github.com/coreos/etcd v3.3.22+incompatible // indirect
-+ github.com/emicklei/go-restful v2.13.0+incompatible
-+ github.com/ghodss/yaml v1.0.0
-+ github.com/go-sql-driver/mysql v1.5.0 // indirect
-+ github.com/gogo/protobuf v1.3.1 // indirect
-+ github.com/golang/protobuf v1.4.2 // indirect
-+ github.com/json-iterator/go v1.1.10 // indirect
-+ github.com/lib/pq v1.7.0 // indirect
-+ github.com/pkg/errors v0.9.1
-+ github.com/rancher/kine v0.4.0
-+ github.com/sirupsen/logrus v1.6.0
-+ github.com/stretchr/testify v1.6.0
-+ go.uber.org/zap v1.15.0 // indirect
-+ golang.org/x/net v0.0.0-20200602114024-627f9648deb9 // indirect
-+ golang.org/x/sys v0.0.0-20200622214017-ed371f2e16b4 // indirect
-+ golang.org/x/text v0.3.3 // indirect
-+ google.golang.org/appengine v1.6.6 // indirect
-+ google.golang.org/genproto v0.0.0-20200624020401-64a14ca9d1ad // indirect
-+ google.golang.org/grpc v1.30.0 // indirect
-+ google.golang.org/protobuf v1.25.0 // indirect
-+ gopkg.in/yaml.v2 v2.3.0
-+ k8s.io/apimachinery v0.17.0
-+ k8s.io/apiserver v0.17.0
-+)
-+
-+replace (
-+ github.com/rancher/kine => github.com/freeekanayaka/kine v0.4.1-0.20200624100627-dd35576ecefb
-+)
-diff --git a/vendor/github.com/canonical/kvsql-dqlite/go.sum b/vendor/github.com/canonical/kvsql-dqlite/go.sum
-new file mode 100644
-index 00000000000..72040f0d886
---- /dev/null
-+++ b/vendor/github.com/canonical/kvsql-dqlite/go.sum
-@@ -0,0 +1,567 @@
-+cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
-+cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
-+cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU=
-+github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8=
-+github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI=
-+github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0=
-+github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA=
-+github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0=
-+github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0=
-+github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc=
-+github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk=
-+github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
-+github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
-+github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ=
-+github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
-+github.com/PuerkitoBio/goquery v1.5.1/go.mod h1:GsLWisAFVj4WgDibEWF4pvYnkVQBpKBKeU+7zCJoLcc=
-+github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
-+github.com/PuerkitoBio/purell v1.1.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
-+github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
-+github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
-+github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
-+github.com/Rican7/retry v0.1.0 h1:FqK94z34ly8Baa6K+G8Mmza9rYWTKOJk+yckIBB5qVk=
-+github.com/Rican7/retry v0.1.0/go.mod h1:FgOROf8P5bebcC1DS0PdOQiqGUridaZvikzUmkFW6gg=
-+github.com/agnivade/levenshtein v1.0.1/go.mod h1:CURSv5d9Uaml+FovSIICkLbAUZ9S4RqaHDIsdSBg7lM=
-+github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
-+github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
-+github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8=
-+github.com/andybalholm/cascadia v1.1.0/go.mod h1:GsXiBklL0woXo1j/WYWtSYYC4ouU9PqHO0sqidkEA4Y=
-+github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8=
-+github.com/asaskevich/govalidator v0.0.0-20180720115003-f9ffefc3facf/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY=
-+github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY=
-+github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
-+github.com/beorn7/perks v1.0.0 h1:HWo1m869IqiPhD389kmkxeTalrjNbbJTC8LXupb+sl0=
-+github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
-+github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
-+github.com/blang/semver v3.5.0+incompatible h1:CGxCgetQ64DKk7rdZ++Vfnb1+ogGNnB17OJKJXD2Cfs=
-+github.com/blang/semver v3.5.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk=
-+github.com/canonical/go-dqlite v1.5.1/go.mod h1:wp00vfMvPYgNCyxcPdHB5XExmDoCGoPUGymloAQT17Y=
-+github.com/canonical/go-dqlite v1.5.2 h1:fe0Rc5OUKdHxB8nKfBEw9REw/HGK4WeXXR04JtvI5ZQ=
-+github.com/canonical/go-dqlite v1.5.2/go.mod h1:wp00vfMvPYgNCyxcPdHB5XExmDoCGoPUGymloAQT17Y=
-+github.com/canonical/go-dqlite v1.6.0 h1:pGsSFWovRZGYamA2ASxyac8UmgVFtsr0byRmgTv4iLY=
-+github.com/canonical/go-dqlite v1.6.0/go.mod h1:MVgCUhFflG7GDAwb6q2CDp5kmdHyIX+XXkATDsiaDzw=
-+github.com/canonical/go-dqlite v1.7.0 h1:5AyISbm9VnPiAjFCUyQ882ppXv8U8d006EGCkmFdAVQ=
-+github.com/canonical/go-dqlite v1.7.0/go.mod h1:MVgCUhFflG7GDAwb6q2CDp5kmdHyIX+XXkATDsiaDzw=
-+github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
-+github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
-+github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
-+github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
-+github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8=
-+github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
-+github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
-+github.com/coreos/etcd v3.3.22+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
-+github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk=
-+github.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc=
-+github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
-+github.com/coreos/go-semver v0.3.0 h1:wkHLiw0WNATZnSG7epLsujiMCgPAc9xhjJ4tgnAxmfM=
-+github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
-+github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
-+github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e h1:Wf6HqHfScWJN9/ZjdUKyjop4mf3Qdd+1TvvltAvM3m8=
-+github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
-+github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
-+github.com/coreos/pkg v0.0.0-20180108230652-97fdf19511ea/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
-+github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f h1:lBNOc5arjvs8E5mO2tbpBpLoyyu8B6e44T7hJy6potg=
-+github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
-+github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE=
-+github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
-+github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY=
-+github.com/davecgh/go-spew v0.0.0-20151105211317-5215b55f46b2/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
-+github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
-+github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
-+github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
-+github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM=
-+github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
-+github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
-+github.com/docker/docker v0.7.3-0.20190327010347-be7ac8be2ae0/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
-+github.com/docker/go-units v0.3.3/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
-+github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
-+github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM=
-+github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
-+github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo=
-+github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
-+github.com/elazarl/goproxy v0.0.0-20170405201442-c4fc26588b6e/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc=
-+github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
-+github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
-+github.com/emicklei/go-restful v2.13.0+incompatible h1:XwckZriGdbXs1EoZ7Y1MdH6hWqZ4XnkFSiEibNi5BXg=
-+github.com/emicklei/go-restful v2.13.0+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
-+github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
-+github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
-+github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
-+github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
-+github.com/evanphx/json-patch v4.2.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
-+github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
-+github.com/freeekanayaka/kine v0.4.1-0.20200624100627-dd35576ecefb h1:SgwrRE5uyngbPBCN5T0yhgoT3wD0FaUAXbdYXaXvomA=
-+github.com/freeekanayaka/kine v0.4.1-0.20200624100627-dd35576ecefb/go.mod h1:IImtCJ68AIkE+VY/kUI0NkyJL5q5WzO8QvMsSXqbrpA=
-+github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
-+github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
-+github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk=
-+github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
-+github.com/globalsign/mgo v0.0.0-20180905125535-1ca0a4f7cbcb/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q=
-+github.com/globalsign/mgo v0.0.0-20181015135952-eeefdecb41b8/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q=
-+github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
-+github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
-+github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
-+github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas=
-+github.com/go-openapi/analysis v0.0.0-20180825180245-b006789cd277/go.mod h1:k70tL6pCuVxPJOHXQ+wIac1FUrvNkHolPie/cLEU6hI=
-+github.com/go-openapi/analysis v0.17.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik=
-+github.com/go-openapi/analysis v0.18.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik=
-+github.com/go-openapi/analysis v0.19.2/go.mod h1:3P1osvZa9jKjb8ed2TPng3f0i/UY9snX6gxi44djMjk=
-+github.com/go-openapi/analysis v0.19.5/go.mod h1:hkEAkxagaIvIP7VTn8ygJNkd4kAYON2rCu0v0ObL0AU=
-+github.com/go-openapi/errors v0.17.0/go.mod h1:LcZQpmvG4wyF5j4IhA73wkLFQg+QJXOQHVjmcZxhka0=
-+github.com/go-openapi/errors v0.18.0/go.mod h1:LcZQpmvG4wyF5j4IhA73wkLFQg+QJXOQHVjmcZxhka0=
-+github.com/go-openapi/errors v0.19.2/go.mod h1:qX0BLWsyaKfvhluLejVpVNwNRdXZhEbTA4kxxpKBC94=
-+github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0=
-+github.com/go-openapi/jsonpointer v0.17.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M=
-+github.com/go-openapi/jsonpointer v0.18.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M=
-+github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg=
-+github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
-+github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg=
-+github.com/go-openapi/jsonreference v0.17.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I=
-+github.com/go-openapi/jsonreference v0.18.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I=
-+github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc=
-+github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8=
-+github.com/go-openapi/loads v0.17.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU=
-+github.com/go-openapi/loads v0.18.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU=
-+github.com/go-openapi/loads v0.19.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU=
-+github.com/go-openapi/loads v0.19.2/go.mod h1:QAskZPMX5V0C2gvfkGZzJlINuP7Hx/4+ix5jWFxsNPs=
-+github.com/go-openapi/loads v0.19.4/go.mod h1:zZVHonKd8DXyxyw4yfnVjPzBjIQcLt0CCsn0N0ZrQsk=
-+github.com/go-openapi/runtime v0.0.0-20180920151709-4f900dc2ade9/go.mod h1:6v9a6LTXWQCdL8k1AO3cvqx5OtZY/Y9wKTgaoP6YRfA=
-+github.com/go-openapi/runtime v0.19.0/go.mod h1:OwNfisksmmaZse4+gpV3Ne9AyMOlP1lt4sK4FXt0O64=
-+github.com/go-openapi/runtime v0.19.4/go.mod h1:X277bwSUBxVlCYR3r7xgZZGKVvBd/29gLDlFGtJ8NL4=
-+github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nAiCcj+friV/PDoE1/3eeccG9LYBs0tYvLOWc=
-+github.com/go-openapi/spec v0.17.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI=
-+github.com/go-openapi/spec v0.18.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI=
-+github.com/go-openapi/spec v0.19.2/go.mod h1:sCxk3jxKgioEJikev4fgkNmwS+3kuYdJtcsZsD5zxMY=
-+github.com/go-openapi/spec v0.19.3/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo=
-+github.com/go-openapi/strfmt v0.17.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU=
-+github.com/go-openapi/strfmt v0.18.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU=
-+github.com/go-openapi/strfmt v0.19.0/go.mod h1:+uW+93UVvGGq2qGaZxdDeJqSAqBqBdl+ZPMF/cC8nDY=
-+github.com/go-openapi/strfmt v0.19.3/go.mod h1:0yX7dbo8mKIvc3XSKp7MNfxw4JytCfCD6+bY1AVL9LU=
-+github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I=
-+github.com/go-openapi/swag v0.17.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg=
-+github.com/go-openapi/swag v0.18.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg=
-+github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
-+github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
-+github.com/go-openapi/validate v0.18.0/go.mod h1:Uh4HdOzKt19xGIGm1qHf/ofbX1YQ4Y+MYsct2VUrAJ4=
-+github.com/go-openapi/validate v0.19.2/go.mod h1:1tRCw7m3jtI8eNWEEliiAqUIcBztB2KDnRCRMUi7GTA=
-+github.com/go-openapi/validate v0.19.5/go.mod h1:8DJv2CVJQ6kGNpFW6eV9N3JviE1C85nY1c2z52x1Gk4=
-+github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
-+github.com/go-sql-driver/mysql v1.5.0 h1:ozyZYNQW3x3HtqT1jira07DN2PArx2v7/mN66gGcHOs=
-+github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg=
-+github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
-+github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
-+github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4=
-+github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
-+github.com/gogo/protobuf v1.3.1 h1:DqDEcV5aeaTmdFBePNpYsp3FlcVH/2ISVVM9Qf8PSls=
-+github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
-+github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
-+github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
-+github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
-+github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
-+github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
-+github.com/golang/protobuf v0.0.0-20161109072736-4bd1920723d7/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
-+github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
-+github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
-+github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
-+github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
-+github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
-+github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
-+github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
-+github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
-+github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
-+github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
-+github.com/golang/protobuf v1.4.2 h1:+Z5KGCizgyZCbGh1KZqA0fcLLkwbsjIzS4aV2v7wJX0=
-+github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
-+github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
-+github.com/google/btree v1.0.0 h1:0udJVsspx3VBr5FwtLhQQtuAsVc79tTq0ocGIPAU6qo=
-+github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
-+github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
-+github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
-+github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
-+github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-+github.com/google/go-cmp v0.5.0 h1:/QaMHBdZ26BB3SSst0Iwl10Epc+xhTquomWX0oZEB6w=
-+github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-+github.com/google/gofuzz v0.0.0-20161122191042-44d81051d367/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI=
-+github.com/google/gofuzz v1.0.0 h1:A8PeW59pxE9IoFRqBp37U+mSNaQoZ46F1f0f863XSXw=
-+github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
-+github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
-+github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
-+github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
-+github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
-+github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY=
-+github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
-+github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
-+github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY=
-+github.com/gophercloud/gophercloud v0.1.0/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEoIEcSTewFxm1c5g8=
-+github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
-+github.com/gorilla/websocket v1.4.0 h1:WDFjx/TMzVgy9VdMMQi2K2Emtwi2QcUQsztZ/zLaH/Q=
-+github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
-+github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA=
-+github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
-+github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4 h1:z53tR0945TRRQO/fLEVPI6SMv7ZflF0TEaTAoU7tOzg=
-+github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
-+github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho=
-+github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
-+github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
-+github.com/grpc-ecosystem/grpc-gateway v1.9.5 h1:UImYN5qQ8tuGpGE16ZmjvcTtTw24zw1QAp/SlnNrZhI=
-+github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
-+github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
-+github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
-+github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
-+github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
-+github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
-+github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
-+github.com/jonboulle/clockwork v0.1.0 h1:VKV+ZcuP6l3yW9doeqz6ziZGgcynBVQO+obU0+0hcPo=
-+github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo=
-+github.com/json-iterator/go v0.0.0-20180612202835-f2b4162afba3/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
-+github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
-+github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
-+github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
-+github.com/json-iterator/go v1.1.10 h1:Kz6Cvnvv2wGdaG/V8yMvfkmNiXq9Ya2KUv4rouJJr68=
-+github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
-+github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
-+github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
-+github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
-+github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00=
-+github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
-+github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
-+github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
-+github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
-+github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
-+github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
-+github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
-+github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA=
-+github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
-+github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
-+github.com/lib/pq v1.1.1/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
-+github.com/lib/pq v1.7.0 h1:h93mCPfUSkaul3Ka/VG8uZdmW1uMHDGxzu0NWHuJmHY=
-+github.com/lib/pq v1.7.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
-+github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
-+github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
-+github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
-+github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
-+github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
-+github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
-+github.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs=
-+github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
-+github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
-+github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
-+github.com/mattn/go-runewidth v0.0.3/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
-+github.com/mattn/go-sqlite3 v1.10.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc=
-+github.com/mattn/go-sqlite3 v1.14.0 h1:mLyGNKR8+Vv9CAU7PphKa2hkEqxxhn8i32J6FPj1/QA=
-+github.com/mattn/go-sqlite3 v1.14.0/go.mod h1:JIl7NbARA7phWnGvh0LKTyg7S9BA+6gx71ShQilpsus=
-+github.com/mattn/go-sqlite3 v2.0.3+incompatible h1:gXHsfypPkaMZrKbD5209QV9jbUTJKjyR5WD3HYQSd+U=
-+github.com/mattn/go-sqlite3 v2.0.3+incompatible/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc=
-+github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU=
-+github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
-+github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
-+github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
-+github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
-+github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
-+github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
-+github.com/modern-go/reflect2 v0.0.0-20180320133207-05fbef0ca5da/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
-+github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
-+github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI=
-+github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
-+github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
-+github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
-+github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
-+github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw=
-+github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U=
-+github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo=
-+github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
-+github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
-+github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
-+github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
-+github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
-+github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k=
-+github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
-+github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU=
-+github.com/peterh/liner v1.2.0/go.mod h1:CRroGNssyjTd/qIG2FyxByd2S8JEAZXBl4qUrZf8GS0=
-+github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
-+github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
-+github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
-+github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
-+github.com/pmezard/go-difflib v0.0.0-20151028094244-d8ed2627bdf0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
-+github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
-+github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
-+github.com/pquerna/cachecontrol v0.0.0-20171018203845-0dec1b30a021/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA=
-+github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
-+github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso=
-+github.com/prometheus/client_golang v1.0.0 h1:vrDKnkGzuGvhNAL56c7DBz29ZL+KxnoR0x7enabFceM=
-+github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
-+github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
-+github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
-+github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4 h1:gQz4mCbXsO+nc9n1hCxHcGA3Zx3Eo+UHZoInFGUIXNM=
-+github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
-+github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
-+github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
-+github.com/prometheus/common v0.4.1 h1:K0MGApIoQvMw27RTdJkPbr3JZ7DNbtxQNyi5STVM6Kw=
-+github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
-+github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
-+github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
-+github.com/prometheus/procfs v0.0.2 h1:6LJUbpNm42llc4HRCuvApCSWB/WfhuNo9K98Q9sNGfs=
-+github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
-+github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
-+github.com/rancher/wrangler v0.4.0/go.mod h1:1cR91WLhZgkZ+U4fV9nVuXqKurWbgXcIReU4wnQvTN8=
-+github.com/remyoudompheng/bigfft v0.0.0-20170806203942-52369c62f446/go.mod h1:uYEyJGbgTkfkS4+E/PavXkNJcbFIpEtjt2B0KDQ5+9M=
-+github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
-+github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
-+github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g=
-+github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
-+github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo=
-+github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
-+github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
-+github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
-+github.com/sirupsen/logrus v1.6.0 h1:UBcNElsrwanuuMsnGSlYmtmgbb23qDR5dG+6X6Oo89I=
-+github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88=
-+github.com/soheilhy/cmux v0.1.4 h1:0HKaf1o97UwFjHH9o5XsHUOF+tqmdA7KEzXLpiyaw0E=
-+github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM=
-+github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
-+github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ=
-+github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk=
-+github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
-+github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
-+github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU=
-+github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE=
-+github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
-+github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
-+github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
-+github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
-+github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
-+github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
-+github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s=
-+github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE=
-+github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
-+github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
-+github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE=
-+github.com/stretchr/testify v0.0.0-20151208002404-e3a8ff8ce365/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
-+github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
-+github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
-+github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
-+github.com/stretchr/testify v1.6.0 h1:jlIyCplCJFULU/01vCkhKuTyc3OorI3bJFuw6obfgho=
-+github.com/stretchr/testify v1.6.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
-+github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk=
-+github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
-+github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5 h1:LnC5Kc/wtumK+WB441p7ynQJzVuNRJiqddSIE3IlSEQ=
-+github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
-+github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc=
-+github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0=
-+github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA=
-+github.com/urfave/cli v1.21.0/go.mod h1:lxDj6qX9Q6lWQxIrbrT0nwecwUtRnhVZAJjJZrVUZZQ=
-+github.com/vektah/gqlparser v1.1.2/go.mod h1:1ycwN7Ij5njmMkPPAOaRFY4rET2Enx7IkVv3vaXspKw=
-+github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 h1:eY9dn8+vbi4tKz5Qo6v2eYzo7kUS51QINcR5jNpbZS8=
-+github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
-+github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
-+go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
-+go.etcd.io/bbolt v1.3.3 h1:MUGmc65QhB3pIlaQ5bB4LwqSj6GIonVJXpZiaKNyaKk=
-+go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
-+go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738 h1:VcrIfasaLFkyjk6KNlXQSzO+B0fZcnECiDrKJsfxka0=
-+go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg=
-+go.mongodb.org/mongo-driver v1.0.3/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM=
-+go.mongodb.org/mongo-driver v1.1.1/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM=
-+go.mongodb.org/mongo-driver v1.1.2/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM=
-+go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
-+go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
-+go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
-+go.uber.org/atomic v1.6.0 h1:Ezj3JGmsOnG1MoRWQkPBsKLe9DwWD9QeXzTRzzldNVk=
-+go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
-+go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
-+go.uber.org/multierr v1.5.0 h1:KCa4XfM8CWFCpxXRGok+Q0SS/0XBhMDbHHGABQLvD2A=
-+go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU=
-+go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA=
-+go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
-+go.uber.org/zap v1.15.0 h1:ZZCA22JRF2gQE5FoNmhmrf7jeJJ2uhqDUNRYKm8dvmM=
-+go.uber.org/zap v1.15.0/go.mod h1:Mb2vm2krFEG5DV0W9qcHBYFtp/Wku1cvYaqPsS/WYfc=
-+golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
-+golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
-+golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
-+golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
-+golang.org/x/crypto v0.0.0-20190320223903-b7391e95e576/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
-+golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
-+golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
-+golang.org/x/crypto v0.0.0-20190617133340-57b3e21c3d56/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
-+golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586 h1:7KByu05hhLed2MO29w7p1XfZvZ13m8mub3shuVftRs0=
-+golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
-+golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
-+golang.org/x/exp v0.0.0-20190125153040-c74c464bbbf2/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
-+golang.org/x/exp v0.0.0-20190312203227-4b39c73a6495/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
-+golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
-+golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
-+golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
-+golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
-+golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
-+golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
-+golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE=
-+golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=
-+golang.org/x/net v0.0.0-20170114055629-f2499483f923/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-+golang.org/x/net v0.0.0-20180218175443-cbe0f9307d01/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-+golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-+golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-+golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-+golang.org/x/net v0.0.0-20181005035420-146acd28ed58/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-+golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-+golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-+golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-+golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-+golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
-+golang.org/x/net v0.0.0-20190320064053-1272bf9dcd53/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
-+golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
-+golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
-+golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
-+golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-+golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-+golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-+golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-+golang.org/x/net v0.0.0-20191004110552-13f9640d40b9/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-+golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-+golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
-+golang.org/x/net v0.0.0-20200602114024-627f9648deb9 h1:pNX+40auqi2JqRfOP1akLGtYcn15TUbkhwuCO3foqqM=
-+golang.org/x/net v0.0.0-20200602114024-627f9648deb9/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
-+golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
-+golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
-+golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
-+golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-+golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-+golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-+golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-+golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-+golang.org/x/sys v0.0.0-20170830134202-bb24a47a89ea/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-+golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-+golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-+golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-+golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-+golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-+golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-+golang.org/x/sys v0.0.0-20190209173611-3b5209105503/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-+golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-+golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-+golang.org/x/sys v0.0.0-20190321052220-f7bb7a8bee54/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-+golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-+golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-+golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-+golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-+golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-+golang.org/x/sys v0.0.0-20200602225109-6fdc65e7d980/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-+golang.org/x/sys v0.0.0-20200622214017-ed371f2e16b4 h1:5/PjkGUjvEU5Gl6BxmvKRPpqo2uNMv4rcHBMwzk/st8=
-+golang.org/x/sys v0.0.0-20200622214017-ed371f2e16b4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-+golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
-+golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
-+golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
-+golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
-+golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k=
-+golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
-+golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
-+golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
-+golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 h1:SvFZT6jyqRaOeXpc5h/JSfZenJ2O330aBsf7JfSUXmQ=
-+golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
-+golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
-+golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
-+golang.org/x/tools v0.0.0-20181011042414-1f849cf54d09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
-+golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
-+golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
-+golang.org/x/tools v0.0.0-20190125232054-d66bd3c5d5a6/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
-+golang.org/x/tools v0.0.0-20190206041539-40960b6deb8e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
-+golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
-+golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
-+golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
-+golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
-+golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
-+golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
-+golang.org/x/tools v0.0.0-20190617190820-da514acc4774/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
-+golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
-+golang.org/x/tools v0.0.0-20190920225731-5eefd052ad72/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-+golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-+golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-+golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
-+golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
-+gonum.org/v1/gonum v0.0.0-20190331200053-3d26580ed485/go.mod h1:2ltnJ7xHfj0zHS40VVPYEAAMTa3ZGguvHGBSJeRWqE0=
-+gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw=
-+gonum.org/v1/netlib v0.0.0-20190331212654-76723241ea4e/go.mod h1:kS+toOQn6AQKjmKJ7gzohV1XkqsFehRA2FbsbkopSuQ=
-+google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
-+google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
-+google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
-+google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
-+google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
-+google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
-+google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
-+google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
-+google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
-+google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
-+google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
-+google.golang.org/genproto v0.0.0-20200624020401-64a14ca9d1ad h1:uAwc13+y0Y8QZLTYhLCu6lHhnG99ecQU5FYTj8zxAng=
-+google.golang.org/genproto v0.0.0-20200624020401-64a14ca9d1ad/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
-+google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
-+google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
-+google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
-+google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
-+google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
-+google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
-+google.golang.org/grpc v1.30.0 h1:M5a8xTlYTxwMn5ZFkwhRabsygDY5G8TYLyQDBxJNAxE=
-+google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
-+google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
-+google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
-+google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
-+google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
-+google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
-+google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
-+google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
-+google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
-+google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4=
-+google.golang.org/protobuf v1.25.0 h1:Ejskq+SyPohKW+1uil0JJMtmHCgJPJ/qWTxr8qp+R4c=
-+google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
-+gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
-+gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
-+gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY=
-+gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
-+gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw=
-+gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
-+gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
-+gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
-+gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
-+gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k=
-+gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo=
-+gopkg.in/square/go-jose.v2 v2.2.2/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI=
-+gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
-+gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74=
-+gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
-+gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
-+gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
-+gopkg.in/yaml.v2 v2.3.0 h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU=
-+gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
-+gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo=
-+gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
-+gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw=
-+honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
-+honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
-+honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
-+honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
-+k8s.io/api v0.17.0/go.mod h1:npsyOePkeP0CPwyGfXDHxvypiYMJxBWAMpQxCaJ4ZxI=
-+k8s.io/apiextensions-apiserver v0.17.0/go.mod h1:XiIFUakZywkUl54fVXa7QTEHcqQz9HG55nHd1DCoHj8=
-+k8s.io/apimachinery v0.17.0 h1:xRBnuie9rXcPxUkDizUsGvPf1cnlZCFu210op7J7LJo=
-+k8s.io/apimachinery v0.17.0/go.mod h1:b9qmWdKlLuU9EBh+06BtLcSf/Mu89rWL33naRxs1uZg=
-+k8s.io/apiserver v0.17.0 h1:XhUix+FKFDcBygWkQNp7wKKvZL030QUlH1o8vFeSgZA=
-+k8s.io/apiserver v0.17.0/go.mod h1:ABM+9x/prjINN6iiffRVNCBR2Wk7uY4z+EtEGZD48cg=
-+k8s.io/client-go v0.17.0/go.mod h1:TYgR6EUHs6k45hb6KWjVD6jFZvJV4gHDikv/It0xz+k=
-+k8s.io/code-generator v0.17.0/go.mod h1:DVmfPQgxQENqDIzVR2ddLXMH34qeszkKSdH/N+s+38s=
-+k8s.io/component-base v0.17.0 h1:BnDFcmBDq+RPpxXjmuYnZXb59XNN9CaFrX8ba9+3xrA=
-+k8s.io/component-base v0.17.0/go.mod h1:rKuRAokNMY2nn2A6LP/MiwpoaMRHpfRnrPaUJJj1Yoc=
-+k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
-+k8s.io/gengo v0.0.0-20190822140433-26a664648505/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
-+k8s.io/gengo v0.0.0-20191120174120-e74f70b9b27e/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
-+k8s.io/klog v0.0.0-20181102134211-b9b56d5dfc92/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk=
-+k8s.io/klog v0.3.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk=
-+k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8=
-+k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I=
-+k8s.io/kube-openapi v0.0.0-20191107075043-30be4d16710a/go.mod h1:1TqjTSzOxsLGIKfj0lK8EeCP7K1iUG65v09OM0/WG5E=
-+k8s.io/utils v0.0.0-20191114184206-e782cd3c129f h1:GiPwtSzdP43eI1hpPCbROQCCIgCuiMMNF8YUVLF3vJo=
-+k8s.io/utils v0.0.0-20191114184206-e782cd3c129f/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew=
-+modernc.org/cc v1.0.0/go.mod h1:1Sk4//wdnYJiUIxnW8ddKpaOJCF37yAdqYnkxUpaYxw=
-+modernc.org/golex v1.0.0/go.mod h1:b/QX9oBD/LhixY6NDh+IdGv17hgB+51fET1i2kPSmvk=
-+modernc.org/mathutil v1.0.0/go.mod h1:wU0vUrJsVWBZ4P6e7xtFJEhFSNsfRLJ8H458uRjg03k=
-+modernc.org/strutil v1.0.0/go.mod h1:lstksw84oURvj9y3tn8lGvRxyRC1S2+g5uuIzNfIOBs=
-+modernc.org/xc v1.0.0/go.mod h1:mRNCo0bvLjGhHO9WsyuKVU4q0ceiDDDoEeWDJHrNx8I=
-+sigs.k8s.io/structured-merge-diff v0.0.0-20190525122527-15d366b2352e/go.mod h1:wWxsB5ozmmv/SG7nM11ayaAW51xMvak/t1r0CSlcokI=
-+sigs.k8s.io/structured-merge-diff v1.0.1-0.20191108220359-b1b620dd3f06/go.mod h1:/ULNhyfzRopfcjskuui0cTITekDduZ7ycKN3oUT9R18=
-+sigs.k8s.io/yaml v1.1.0 h1:4A07+ZFc2wgJwo8YNlQpr1rVlgUDlxXHhPJciaPY5gs=
-+sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o=
-diff --git a/vendor/github.com/canonical/kvsql-dqlite/integration_test.go b/vendor/github.com/canonical/kvsql-dqlite/integration_test.go
-new file mode 100644
-index 00000000000..d70e74b64d4
---- /dev/null
-+++ b/vendor/github.com/canonical/kvsql-dqlite/integration_test.go
-@@ -0,0 +1,180 @@
-+package factory_test
-+
-+import (
-+ "context"
-+ "io/ioutil"
-+ "os"
-+ "path/filepath"
-+ "testing"
-+
-+ "github.com/canonical/kvsql-dqlite/server"
-+ "github.com/canonical/kvsql-dqlite/server/config"
-+ "github.com/stretchr/testify/assert"
-+ "github.com/stretchr/testify/require"
-+ "gopkg.in/yaml.v2"
-+ "k8s.io/apimachinery/pkg/api/apitesting"
-+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-+ "k8s.io/apimachinery/pkg/runtime"
-+ "k8s.io/apimachinery/pkg/runtime/serializer"
-+ utilruntime "k8s.io/apimachinery/pkg/util/runtime"
-+ "k8s.io/apiserver/pkg/apis/example"
-+ examplev1 "k8s.io/apiserver/pkg/apis/example/v1"
-+ "k8s.io/apiserver/pkg/storage"
-+ "k8s.io/apiserver/pkg/storage/storagebackend"
-+ "k8s.io/apiserver/pkg/storage/storagebackend/factory"
-+)
-+
-+func TestCreate_First(t *testing.T) {
-+ store, cleanup := newStore(t)
-+ defer cleanup()
-+
-+ ctx := context.Background()
-+
-+ out := &example.Pod{}
-+ obj := &example.Pod{ObjectMeta: metav1.ObjectMeta{Name: "foo", SelfLink: "testlink"}}
-+ err := store.Create(ctx, "/foo", obj, out, uint64(0))
-+ require.NoError(t, err)
-+ err = store.Create(ctx, "/bar", obj, out, uint64(0))
-+ require.NoError(t, err)
-+}
-+
-+func TestCreate_Existing(t *testing.T) {
-+ store, cleanup := newStore(t)
-+ defer cleanup()
-+
-+ ctx := context.Background()
-+
-+ out := &example.Pod{}
-+ obj := &example.Pod{ObjectMeta: metav1.ObjectMeta{Name: "foo", SelfLink: "testlink"}}
-+ require.NoError(t, store.Create(ctx, "foo", obj, out, uint64(0)))
-+
-+ err := store.Create(ctx, "/foo", obj, out, uint64(0))
-+ if err, ok := err.(*storage.StorageError); ok {
-+ assert.Equal(t, err.Code, storage.ErrCodeKeyExists)
-+ assert.Equal(t, err.Key, "/foo")
-+ } else {
-+ t.Fatalf("Unexpected error: %v", err)
-+ }
-+}
-+
-+func TestCreate_Concurrent(t *testing.T) {
-+ store, cleanup := newStore(t)
-+ defer cleanup()
-+
-+ ctx := context.Background()
-+
-+ errors := make(chan error, 0)
-+
-+ go func() {
-+ out := &example.Pod{}
-+ obj := &example.Pod{ObjectMeta: metav1.ObjectMeta{Name: "foo", SelfLink: "testlink"}}
-+ errors <- store.Create(ctx, "foo", obj, out, uint64(0))
-+ }()
-+
-+ go func() {
-+ out := &example.Pod{}
-+ obj := &example.Pod{ObjectMeta: metav1.ObjectMeta{Name: "bar", SelfLink: "testlink"}}
-+ errors <- store.Create(ctx, "bar", obj, out, uint64(0))
-+ }()
-+
-+ require.NoError(t, <-errors)
-+ require.NoError(t, <-errors)
-+}
-+
-+func TestCreateAgainAfterDeletion(t *testing.T) {
-+ store, cleanup := newStore(t)
-+ defer cleanup()
-+
-+ ctx := context.Background()
-+
-+ out := &example.Pod{}
-+ obj := &example.Pod{ObjectMeta: metav1.ObjectMeta{Name: "foo", SelfLink: "testlink"}}
-+ require.NoError(t, store.Create(ctx, "foo", obj, out, uint64(0)))
-+
-+ err := store.Delete(ctx, "/foo", obj, nil, func(context.Context, runtime.Object) error { return nil })
-+ require.NoError(t, err)
-+
-+ obj = &example.Pod{ObjectMeta: metav1.ObjectMeta{Name: "foo", SelfLink: "testlink"}}
-+ require.NoError(t, store.Create(ctx, "foo", obj, out, uint64(0)))
-+}
-+
-+var scheme = runtime.NewScheme()
-+var codecs = serializer.NewCodecFactory(scheme)
-+
-+func init() {
-+ metav1.AddToGroupVersion(scheme, metav1.SchemeGroupVersion)
-+ utilruntime.Must(example.AddToScheme(scheme))
-+ utilruntime.Must(examplev1.AddToScheme(scheme))
-+}
-+
-+func newStore(t testing.TB) (storage.Interface, func()) {
-+ init := &config.Init{Address: "localhost:9991"}
-+ dir, dirCleanup := newDirWithInit(t, init)
-+
-+ server, err := server.New(dir)
-+ require.NoError(t, err)
-+
-+ codec := apitesting.TestCodec(codecs, examplev1.SchemeGroupVersion)
-+
-+ config := storagebackend.Config{
-+ Codec: codec,
-+ Dir: dir,
-+ Type: storagebackend.StorageTypeDqlite,
-+ }
-+
-+ store, destroy, err := factory.Create(config)
-+ require.NoError(t, err)
-+
-+ cleanup := func() {
-+ destroy()
-+ server.Close(context.Background())
-+ dirCleanup()
-+ }
-+
-+ return store, cleanup
-+
-+}
-+
-+// Return a new temporary directory populated with the test cluster certificate
-+// and an init.yaml file with the given content.
-+func newDirWithInit(t testing.TB, init *config.Init) (string, func()) {
-+ dir, cleanup := newDirWithCert(t)
-+
-+ path := filepath.Join(dir, "init.yaml")
-+ bytes, err := yaml.Marshal(init)
-+ require.NoError(t, err)
-+ require.NoError(t, ioutil.WriteFile(path, bytes, 0644))
-+
-+ return dir, cleanup
-+}
-+
-+// Return a new temporary directory populated with the test cluster certificate.
-+func newDirWithCert(t testing.TB) (string, func()) {
-+ t.Helper()
-+
-+ dir, cleanup := newDir(t)
-+
-+ // Create symlinks to the test certificates.
-+ for _, filename := range []string{"cluster.crt", "cluster.key"} {
-+ link := filepath.Join(dir, filename)
-+ target, err := filepath.Abs(filepath.Join("server/testdata", filename))
-+ require.NoError(t, err)
-+ require.NoError(t, os.Symlink(target, link))
-+ }
-+
-+ return dir, cleanup
-+}
-+
-+// Return a new temporary directory.
-+func newDir(t testing.TB) (string, func()) {
-+ t.Helper()
-+
-+ dir, err := ioutil.TempDir("", "kvsql-server-test-")
-+ require.NoError(t, err)
-+
-+ cleanup := func() {
-+ require.NoError(t, os.RemoveAll(dir))
-+ }
-+
-+ return dir, cleanup
-+}
-diff --git a/vendor/github.com/canonical/kvsql-dqlite/rest.go b/vendor/github.com/canonical/kvsql-dqlite/rest.go
-new file mode 100644
-index 00000000000..b77db492722
---- /dev/null
-+++ b/vendor/github.com/canonical/kvsql-dqlite/rest.go
-@@ -0,0 +1,26 @@
-+package factory
-+
-+import (
-+ restful "github.com/emicklei/go-restful"
-+)
-+
-+type Rest struct{}
-+
-+func (r Rest) Install(c *restful.Container) {
-+ ws := new(restful.WebService)
-+ ws.Path("/dqlite").Consumes(restful.MIME_JSON).Produces(restful.MIME_JSON)
-+ ws.Doc("dqlite cluster management")
-+ ws.Route(ws.GET("/").To(getHandler))
-+ c.Add(ws)
-+}
-+
-+func getHandler(req *restful.Request, resp *restful.Response) {
-+ foo := struct {
-+ A string
-+ B int
-+ }{
-+ A: "foo",
-+ B: 123,
-+ }
-+ resp.WriteEntity(foo)
-+}
-diff --git a/vendor/github.com/canonical/kvsql-dqlite/server/config/config.go b/vendor/github.com/canonical/kvsql-dqlite/server/config/config.go
-new file mode 100644
-index 00000000000..e2fec241b56
---- /dev/null
-+++ b/vendor/github.com/canonical/kvsql-dqlite/server/config/config.go
-@@ -0,0 +1,107 @@
-+package config
-+
-+import (
-+ "crypto/tls"
-+ "crypto/x509"
-+ "fmt"
-+ "io/ioutil"
-+ "os"
-+ "path/filepath"
-+ "strconv"
-+ "strings"
-+
-+ "github.com/pkg/errors"
-+)
-+
-+// Config holds the server configuraton loaded from disk.
-+type Config struct {
-+ KeyPair tls.Certificate
-+ Pool *x509.CertPool
-+ Init *Init // Initialization parameters, for new servers.
-+ Address string // Server address
-+ Update *Update // Configuration updates
-+ FailureDomain uint64
-+}
-+
-+// Load current the configuration from disk.
-+func Load(dir string) (*Config, error) {
-+ // Migrate the legacy node store.
-+ if err := migrateNodeStore(dir); err != nil {
-+ return nil, err
-+ }
-+
-+ // Load the TLS certificates.
-+ crt := filepath.Join(dir, "cluster.crt")
-+ key := filepath.Join(dir, "cluster.key")
-+
-+ keypair, err := tls.LoadX509KeyPair(crt, key)
-+ if err != nil {
-+ return nil, errors.Wrap(err, "load keypair")
-+ }
-+
-+ data, err := ioutil.ReadFile(crt)
-+ if err != nil {
-+ return nil, errors.Wrap(err, "read certificate")
-+ }
-+
-+ pool := x509.NewCertPool()
-+ if !pool.AppendCertsFromPEM(data) {
-+ return nil, fmt.Errorf("bad certificate")
-+ }
-+
-+ // Check if we're initializing a new node (i.e. there's an init.yaml).
-+ init, err := loadInit(dir)
-+ if err != nil {
-+ return nil, err
-+ }
-+
-+ var update *Update
-+ if init == nil {
-+ update, err = loadUpdate(dir)
-+ if err != nil {
-+ return nil, err
-+ }
-+ }
-+
-+ domain, err := loadFailureDomain(dir)
-+ if err != nil {
-+ return nil, err
-+ }
-+
-+ config := &Config{
-+ KeyPair: keypair,
-+ Pool: pool,
-+ Init: init,
-+ Update: update,
-+ FailureDomain: domain,
-+ }
-+
-+ return config, nil
-+}
-+
-+// Load failure-domain if present, or return 0 otherwise.
-+func loadFailureDomain(dir string) (uint64, error) {
-+ path := filepath.Join(dir, "failure-domain")
-+
-+ if _, err := os.Stat(path); err != nil {
-+ if !os.IsNotExist(err) {
-+ return 0, errors.Wrap(err, "check if failure-domain exists")
-+ }
-+ return 0, nil
-+
-+ }
-+
-+ data, err := ioutil.ReadFile(path)
-+ if err != nil {
-+ return 0, errors.Wrap(err, "read failure-domain")
-+ }
-+
-+ text := strings.Trim(string(data), "\n")
-+
-+ n, err := strconv.Atoi(text)
-+ if err != nil || n < 0 {
-+ return 0, errors.Wrapf(err, "invalid failure domain %q", text)
-+ }
-+
-+ return uint64(n), nil
-+}
-diff --git a/vendor/github.com/canonical/kvsql-dqlite/server/config/dqlite.go b/vendor/github.com/canonical/kvsql-dqlite/server/config/dqlite.go
-new file mode 100644
-index 00000000000..b8ff56678df
---- /dev/null
-+++ b/vendor/github.com/canonical/kvsql-dqlite/server/config/dqlite.go
-@@ -0,0 +1,45 @@
-+package config
-+
-+import (
-+ "context"
-+ "os"
-+ "path/filepath"
-+
-+ "github.com/canonical/go-dqlite/client"
-+ "github.com/pkg/errors"
-+)
-+
-+// Migrate the legacy servers.sql SQLite database containing the addresses
-+// of the servers in the cluster.
-+func migrateNodeStore(dir string) error {
-+ // Possibly migrate from older path.
-+ legacyStorePath := filepath.Join(dir, "servers.sql")
-+ if _, err := os.Stat(legacyStorePath); err != nil {
-+ if !os.IsNotExist(err) {
-+ return errors.Wrap(err, "check if legacy node store exists")
-+ }
-+ return nil
-+ }
-+
-+ legacyStore, err := client.DefaultNodeStore(legacyStorePath)
-+ if err != nil {
-+ return errors.Wrap(err, "open legacy node store")
-+ }
-+ servers, err := legacyStore.Get(context.Background())
-+ if err != nil {
-+ return errors.Wrap(err, "get servers from legacy node store")
-+ }
-+
-+ store, err := client.NewYamlNodeStore(filepath.Join(dir, "cluster.yaml"))
-+ if err != nil {
-+ return errors.Wrap(err, "open node store")
-+ }
-+ if err := store.Set(context.Background(), servers); err != nil {
-+ return errors.Wrap(err, "migrate servers to new node store")
-+ }
-+ if err := os.Remove(legacyStorePath); err != nil {
-+ return errors.Wrap(err, "remove legacy store path")
-+ }
-+
-+ return nil
-+}
-diff --git a/vendor/github.com/canonical/kvsql-dqlite/server/config/init.go b/vendor/github.com/canonical/kvsql-dqlite/server/config/init.go
-new file mode 100644
-index 00000000000..5b137ec50b9
---- /dev/null
-+++ b/vendor/github.com/canonical/kvsql-dqlite/server/config/init.go
-@@ -0,0 +1,72 @@
-+package config
-+
-+import (
-+ "fmt"
-+ "io/ioutil"
-+ "os"
-+ "path/filepath"
-+
-+ "github.com/ghodss/yaml"
-+ "github.com/pkg/errors"
-+)
-+
-+// Init holds all information needed to initialize a server.
-+type Init struct {
-+ Address string
-+ Cluster []string
-+}
-+
-+var initialFilenames = []string{
-+ "init.yaml",
-+ "cluster.key",
-+ "cluster.crt",
-+ "failure-domain",
-+}
-+
-+// Load init.yaml if present, or return nil otherwise.
-+func loadInit(dir string) (*Init, error) {
-+ path := filepath.Join(dir, "init.yaml")
-+
-+ if _, err := os.Stat(path); err != nil {
-+ if !os.IsNotExist(err) {
-+ return nil, errors.Wrap(err, "check if init.yaml exists")
-+ }
-+ return nil, nil
-+
-+ }
-+
-+ // Check that the only files in the directory are the TLS certificate.
-+ files, err := ioutil.ReadDir(dir)
-+ if err != nil {
-+ return nil, errors.Wrap(err, "list data directory")
-+ }
-+ for _, file := range files {
-+ expected := false
-+ for _, filename := range initialFilenames {
-+ if filename == file.Name() {
-+ expected = true
-+ break
-+ }
-+ }
-+ if !expected {
-+ return nil, fmt.Errorf("data directory seems to have existing state: %s", file.Name())
-+ }
-+ }
-+
-+ data, err := ioutil.ReadFile(path)
-+ if err != nil {
-+ return nil, errors.Wrap(err, "read init.yaml")
-+ }
-+
-+ init := &Init{}
-+ if err := yaml.Unmarshal(data, init); err != nil {
-+ return nil, errors.Wrap(err, "parse init.yaml")
-+ }
-+
-+ if init.Address == "" {
-+ return nil, fmt.Errorf("server address is empty")
-+ }
-+
-+ return init, nil
-+
-+}
-diff --git a/vendor/github.com/canonical/kvsql-dqlite/server/config/update.go b/vendor/github.com/canonical/kvsql-dqlite/server/config/update.go
-new file mode 100644
-index 00000000000..898e293a0f8
---- /dev/null
-+++ b/vendor/github.com/canonical/kvsql-dqlite/server/config/update.go
-@@ -0,0 +1,45 @@
-+package config
-+
-+import (
-+ "fmt"
-+ "io/ioutil"
-+ "os"
-+ "path/filepath"
-+
-+ "github.com/ghodss/yaml"
-+ "github.com/pkg/errors"
-+)
-+
-+// Update holds configuration updates.
-+type Update struct {
-+ Address string
-+}
-+
-+// Load update.yaml if present, or return nil otherwise.
-+func loadUpdate(dir string) (*Update, error) {
-+ path := filepath.Join(dir, "update.yaml")
-+
-+ if _, err := os.Stat(path); err != nil {
-+ if !os.IsNotExist(err) {
-+ return nil, errors.Wrap(err, "check if update.yaml exists")
-+ }
-+ return nil, nil
-+
-+ }
-+
-+ data, err := ioutil.ReadFile(path)
-+ if err != nil {
-+ return nil, errors.Wrap(err, "read init.yaml")
-+ }
-+
-+ update := &Update{}
-+ if err := yaml.Unmarshal(data, update); err != nil {
-+ return nil, errors.Wrap(err, "parse update.yaml")
-+ }
-+
-+ if update.Address == "" {
-+ return nil, fmt.Errorf("server address is empty")
-+ }
-+
-+ return update, nil
-+}
-diff --git a/vendor/github.com/canonical/kvsql-dqlite/server/server.go b/vendor/github.com/canonical/kvsql-dqlite/server/server.go
-new file mode 100644
-index 00000000000..78bfa54375d
---- /dev/null
-+++ b/vendor/github.com/canonical/kvsql-dqlite/server/server.go
-@@ -0,0 +1,126 @@
-+package server
-+
-+import (
-+ "context"
-+ "fmt"
-+ "io/ioutil"
-+ "os"
-+ "path/filepath"
-+ "time"
-+
-+ "github.com/canonical/go-dqlite"
-+ "github.com/canonical/go-dqlite/app"
-+ "github.com/canonical/go-dqlite/client"
-+ "github.com/canonical/kvsql-dqlite/server/config"
-+ "github.com/ghodss/yaml"
-+ "github.com/pkg/errors"
-+ "github.com/rancher/kine/pkg/endpoint"
-+)
-+
-+// Server sets up a single dqlite node and serves the cluster management API.
-+type Server struct {
-+ dir string // Data directory
-+ address string // Network address
-+ app *app.App
-+ cancelKine context.CancelFunc
-+}
-+
-+func New(dir string) (*Server, error) {
-+ // Check if we're initializing a new node (i.e. there's an init.yaml).
-+ cfg, err := config.Load(dir)
-+ if err != nil {
-+ return nil, err
-+ }
-+
-+ if cfg.Update != nil {
-+ info := client.NodeInfo{}
-+ path := filepath.Join(dir, "info.yaml")
-+ data, err := ioutil.ReadFile(path)
-+ if err != nil {
-+ return nil, err
-+ }
-+ if err := yaml.Unmarshal(data, &info); err != nil {
-+ return nil, err
-+ }
-+ info.Address = cfg.Update.Address
-+ data, err = yaml.Marshal(info)
-+ if err != nil {
-+ return nil, err
-+ }
-+ if err := ioutil.WriteFile(path, data, 0600); err != nil {
-+ return nil, err
-+ }
-+ nodes := []dqlite.NodeInfo{info}
-+ if err := dqlite.ReconfigureMembership(dir, nodes); err != nil {
-+ return nil, err
-+ }
-+ store, err := client.NewYamlNodeStore(filepath.Join(dir, "cluster.yaml"))
-+ if err != nil {
-+ return nil, err
-+ }
-+ if err := store.Set(context.Background(), nodes); err != nil {
-+ return nil, err
-+ }
-+ if err := os.Remove(filepath.Join(dir, "update.yaml")); err != nil {
-+ return nil, errors.Wrap(err, "remove update.yaml")
-+ }
-+ }
-+
-+ options := []app.Option{
-+ app.WithTLS(app.SimpleTLSConfig(cfg.KeyPair, cfg.Pool)),
-+ app.WithFailureDomain(cfg.FailureDomain),
-+ }
-+
-+ // Possibly initialize our ID, address and initial node store content.
-+ if cfg.Init != nil {
-+ options = append(options, app.WithAddress(cfg.Init.Address), app.WithCluster(cfg.Init.Cluster))
-+ }
-+
-+ app, err := app.New(dir, options...)
-+ if err != nil {
-+ return nil, err
-+ }
-+ if cfg.Init != nil {
-+ if err := os.Remove(filepath.Join(dir, "init.yaml")); err != nil {
-+ return nil, err
-+ }
-+ }
-+
-+ ctx, cancel := context.WithTimeout(context.Background(), time.Minute)
-+ defer cancel()
-+
-+ if err := app.Ready(ctx); err != nil {
-+ return nil, err
-+ }
-+
-+ socket := filepath.Join(dir, "kine.sock")
-+ peers := filepath.Join(dir, "cluster.yaml")
-+ config := endpoint.Config{
-+ Listener: fmt.Sprintf("unix://%s", socket),
-+ Endpoint: fmt.Sprintf("dqlite://k8s?peer-file=%s&driver-name=%s", peers, app.Driver()),
-+ }
-+ kineCtx, cancelKine := context.WithCancel(context.Background())
-+ if _, err := endpoint.Listen(kineCtx, config); err != nil {
-+ return nil, errors.Wrap(err, "kine")
-+ }
-+
-+ s := &Server{
-+ dir: dir,
-+ address: cfg.Address,
-+ app: app,
-+ cancelKine: cancelKine,
-+ }
-+
-+ return s, nil
-+}
-+
-+func (s *Server) Close(ctx context.Context) error {
-+ if s.cancelKine != nil {
-+ s.cancelKine()
-+ }
-+ s.app.Handover(ctx)
-+ if err := s.app.Close(); err != nil {
-+ return errors.Wrap(err, "stop dqlite app")
-+ }
-+ return nil
-+}
-diff --git a/vendor/github.com/canonical/kvsql-dqlite/server/server_test.go b/vendor/github.com/canonical/kvsql-dqlite/server/server_test.go
-new file mode 100644
-index 00000000000..d625aea8d48
---- /dev/null
-+++ b/vendor/github.com/canonical/kvsql-dqlite/server/server_test.go
-@@ -0,0 +1,150 @@
-+package server_test
-+
-+import (
-+ "context"
-+ "fmt"
-+ "io/ioutil"
-+ "os"
-+ "path/filepath"
-+ "testing"
-+ "time"
-+
-+ "github.com/canonical/kvsql-dqlite/server"
-+ "github.com/canonical/kvsql-dqlite/server/config"
-+ "github.com/ghodss/yaml"
-+ "github.com/stretchr/testify/require"
-+ "go.etcd.io/etcd/clientv3"
-+)
-+
-+func TestNew_FirstNode_Init(t *testing.T) {
-+ init := &config.Init{Address: "localhost:9991"}
-+ dir, cleanup := newDirWithInit(t, init)
-+ defer cleanup()
-+
-+ server, err := server.New(dir)
-+ require.NoError(t, err)
-+
-+ require.NoError(t, server.Close(context.Background()))
-+}
-+
-+func TestNew_FirstNode_Restart(t *testing.T) {
-+ init := &config.Init{Address: "localhost:9991"}
-+ dir, cleanup := newDirWithInit(t, init)
-+ defer cleanup()
-+
-+ s, err := server.New(dir)
-+ require.NoError(t, err)
-+
-+ require.NoError(t, s.Close(context.Background()))
-+
-+ s, err = server.New(dir)
-+ require.NoError(t, err)
-+
-+ require.NoError(t, s.Close(context.Background()))
-+}
-+
-+func TestNew_SecondNode_Init(t *testing.T) {
-+ init1 := &config.Init{Address: "localhost:9991"}
-+ dir1, cleanup1 := newDirWithInit(t, init1)
-+ defer cleanup1()
-+
-+ s1, err := server.New(dir1)
-+ require.NoError(t, err)
-+
-+ init2 := &config.Init{Address: "localhost:9992", Cluster: []string{"localhost:9991"}}
-+ dir2, cleanup2 := newDirWithInit(t, init2)
-+ defer cleanup2()
-+
-+ s2, err := server.New(dir2)
-+ require.NoError(t, err)
-+
-+ ctx, cancel := context.WithTimeout(context.Background(), 250*time.Millisecond)
-+ defer cancel()
-+
-+ s1.Close(ctx)
-+ s2.Close(ctx)
-+}
-+
-+func TestNew_FirstNode_Kine(t *testing.T) {
-+ init := &config.Init{Address: "localhost:9991"}
-+ dir, cleanup := newDirWithInit(t, init)
-+ defer cleanup()
-+
-+ server, err := server.New(dir)
-+ require.NoError(t, err)
-+
-+ sock := filepath.Join(dir, "kine.sock")
-+ cfg := clientv3.Config{Endpoints: []string{fmt.Sprintf("unix://%s", sock)}}
-+ client, err := clientv3.New(cfg)
-+ require.NoError(t, err)
-+
-+ _, err = client.Get(context.Background(), "/")
-+ require.NoError(t, err)
-+
-+ require.NoError(t, server.Close(context.Background()))
-+}
-+
-+func TestNew_Update(t *testing.T) {
-+ init := &config.Init{Address: "localhost:9991"}
-+ dir, cleanup := newDirWithInit(t, init)
-+ defer cleanup()
-+
-+ s, err := server.New(dir)
-+ require.NoError(t, err)
-+
-+ require.NoError(t, s.Close(context.Background()))
-+
-+ path := filepath.Join(dir, "update.yaml")
-+ data, err := yaml.Marshal(struct{ Address string }{Address: "localhost:9992"})
-+ require.NoError(t, err)
-+ require.NoError(t, ioutil.WriteFile(path, data, 0644))
-+
-+ s, err = server.New(dir)
-+ require.NoError(t, err)
-+
-+ require.NoError(t, s.Close(context.Background()))
-+}
-+
-+// Return a new temporary directory populated with the test cluster certificate
-+// and an init.yaml file with the given content.
-+func newDirWithInit(t *testing.T, init *config.Init) (string, func()) {
-+ dir, cleanup := newDirWithCert(t)
-+
-+ path := filepath.Join(dir, "init.yaml")
-+ bytes, err := yaml.Marshal(init)
-+ require.NoError(t, err)
-+ require.NoError(t, ioutil.WriteFile(path, bytes, 0644))
-+
-+ return dir, cleanup
-+}
-+
-+// Return a new temporary directory populated with the test cluster certificate.
-+func newDirWithCert(t *testing.T) (string, func()) {
-+ t.Helper()
-+
-+ dir, cleanup := newDir(t)
-+
-+ // Create symlinks to the test certificates.
-+ for _, filename := range []string{"cluster.crt", "cluster.key"} {
-+ link := filepath.Join(dir, filename)
-+ target, err := filepath.Abs(filepath.Join("testdata", filename))
-+ require.NoError(t, err)
-+ require.NoError(t, os.Symlink(target, link))
-+ }
-+
-+ return dir, cleanup
-+}
-+
-+// Return a new temporary directory.
-+func newDir(t *testing.T) (string, func()) {
-+ t.Helper()
-+
-+ dir, err := ioutil.TempDir("", "kvsql-server-test-")
-+ require.NoError(t, err)
-+
-+ cleanup := func() {
-+ require.NoError(t, os.RemoveAll(dir))
-+ }
-+
-+ return dir, cleanup
-+}
-diff --git a/vendor/github.com/canonical/kvsql-dqlite/server/testdata/cluster.crt b/vendor/github.com/canonical/kvsql-dqlite/server/testdata/cluster.crt
-new file mode 100644
-index 00000000000..a2f6b6b6b88
---- /dev/null
-+++ b/vendor/github.com/canonical/kvsql-dqlite/server/testdata/cluster.crt
-@@ -0,0 +1,30 @@
-+-----BEGIN CERTIFICATE-----
-+MIIFEjCCAvqgAwIBAgIUYSqxgQfT5f+IidyCbx68R8qWgqswDQYJKoZIhvcNAQEL
-+BQAwDjEMMAoGA1UEAwwDazhzMB4XDTE5MTAwODExMDc0MloXDTI5MTAwNTExMDc0
-+MlowDjEMMAoGA1UEAwwDazhzMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKC
-+AgEAtifR9tqD0eXGh/duH02eaYWu8cNp373kTft5zuWd5/iCxPURdceNYjbbnA25
-+ez4bfQcz7tGRZMPcxfBkxJqR0Jais6Jg4Lm8SMJHioPkVWULZIWV9Q6DUDTnpB20
-+4NQrLXTc93GvvCdQ529fEnmW2cpPsCiVq+S33ffJg9+9O9bW18qJ8um1tkPH2SLc
-+tsTJabKe1Yd++QduGBesC1OqToXIiZ0ZVCBkipjqKnapx1vPrgvsHd+tBH+ii/Tj
-+a1BPwoV4W5D0rnHDu5qaloDVoYUDpmJw/mawY/qDJ5MdnD5osfYMXC4X92jL/kix
-+IJLpCBFN0ALHEz3txYyoH5pSXrhIPvt0A95z9VDa2UTSkudxlhcfd9BVmCtp0Sl6
-+iK5UANYxDHEjLa3sUq1t80lgOIvfgafEtaBbjjY3eJGJpVROGt57pk7yt5MgfAJP
-+i+Z3trpl5ZmHurPnQTfXzT3Gqp+4ih2a8dHCtDnUAminVp5RYETfuV+UK5a/UH7a
-+PJVOqnljZXQ0Yi2rd1v+7pdWp0zKtZIrkxvcsO3s9sSYV20CAj9oiRNX4XWqOX4G
-+VgmVDhWnl1rrRpXBZ85T+E9MqZMzgez+i03Y1q7nhg27Api9nt0Yra8bF8TFk19E
-+UR3ItF6076TqoMAbnQ3SqDkjJWP5ZjX1qJ/fnvnc5hUdks8CAwEAAaNoMGYwHQYD
-+VR0OBBYEFMP4a6mttRfQ5/HvlVrRak/C9HTdMB8GA1UdIwQYMBaAFMP4a6mttRfQ
-+5/HvlVrRak/C9HTdMA8GA1UdEwEB/wQFMAMBAf8wEwYDVR0RBAwwCoICeDGHBMCo
-+AnUwDQYJKoZIhvcNAQELBQADggIBABgjEzueUDUGHJIXIhm8jVS7G76h4X9MPjUn
-+koNNDPss78tp5f6YCYATuztqYnHJr4qMTv0VdeVxBwIxeNJZWTuIRqutGxt/968S
-+ty4mbX2UykWaka4deNrtKx0lsjflOiuJmNW2MqokjWVDlc3hHF1SdEr9cAyhqF+z
-+qm+MA11kn0V8CMXhwiRTgcgDpwlbe9uwtzmAVBggwIX+jsjxw9VicQbsdxQVi9we
-+asEIIud0xPQ9Dy/V2oI/M0bgkRhYk2Xi6SJ3oInGhP5+SIwQLnv+TPEPNOQQ2LbP
-+1FIWXKtK2Z2YmUF1EPKpdiOL/7DwuFR9Z17P5wg2+ytofpJT/kF+m0ul1sHYB9Vn
-+NA+5FYfKbGktbt8PT8/2Njwy3lauyY5IXn9U9v0Hi/c3zssouPQUuJufRVQeI31T
-+2toAWG1s89fFvaRRpvLZLU+jw4zhzWtSIsiX+3Mj17too3NK1OtKuDDrCv84VlpN
-+i1l87Xytv19QffDbF1r7PitNJeT26GG5OyMYWfcU+GSOLREPwadqW3gZXyYxF6xC
-+zkLBpj81W3nXXnGA8GVavsouY2QTYZAL3LM5L2omWAebLXU2lHLcaCc1p4lK7mLC
-+qy6suSrihITSqCgJ0VkZKO4dFsMPQLAQzra4GfhCsyztF7R7g3tZ7423jKQckI2t
-+ISYPCIqj
-+-----END CERTIFICATE-----
-diff --git a/vendor/github.com/canonical/kvsql-dqlite/server/testdata/cluster.key b/vendor/github.com/canonical/kvsql-dqlite/server/testdata/cluster.key
-new file mode 100644
-index 00000000000..78862e05e6e
---- /dev/null
-+++ b/vendor/github.com/canonical/kvsql-dqlite/server/testdata/cluster.key
-@@ -0,0 +1,52 @@
-+-----BEGIN PRIVATE KEY-----
-+MIIJQgIBADANBgkqhkiG9w0BAQEFAASCCSwwggkoAgEAAoICAQC2J9H22oPR5caH
-+924fTZ5pha7xw2nfveRN+3nO5Z3n+ILE9RF1x41iNtucDbl7Pht9BzPu0ZFkw9zF
-+8GTEmpHQlqKzomDgubxIwkeKg+RVZQtkhZX1DoNQNOekHbTg1CstdNz3ca+8J1Dn
-+b18SeZbZyk+wKJWr5Lfd98mD37071tbXyony6bW2Q8fZIty2xMlpsp7Vh375B24Y
-+F6wLU6pOhciJnRlUIGSKmOoqdqnHW8+uC+wd360Ef6KL9ONrUE/ChXhbkPSuccO7
-+mpqWgNWhhQOmYnD+ZrBj+oMnkx2cPmix9gxcLhf3aMv+SLEgkukIEU3QAscTPe3F
-+jKgfmlJeuEg++3QD3nP1UNrZRNKS53GWFx930FWYK2nRKXqIrlQA1jEMcSMtrexS
-+rW3zSWA4i9+Bp8S1oFuONjd4kYmlVE4a3numTvK3kyB8Ak+L5ne2umXlmYe6s+dB
-+N9fNPcaqn7iKHZrx0cK0OdQCaKdWnlFgRN+5X5Qrlr9Qfto8lU6qeWNldDRiLat3
-+W/7ul1anTMq1kiuTG9yw7ez2xJhXbQICP2iJE1fhdao5fgZWCZUOFaeXWutGlcFn
-+zlP4T0ypkzOB7P6LTdjWrueGDbsCmL2e3RitrxsXxMWTX0RRHci0XrTvpOqgwBud
-+DdKoOSMlY/lmNfWon9+e+dzmFR2SzwIDAQABAoICACIQkFN5ye0W62nB+/YereFN
-+/sl3ofUVrfxnawAmMMGQ1+O9cOZ1yxry517nsig6FSnsjum6h1Ywyri5nB6oSVyY
-+zxSI6VUdZvi0XGyq/ah6nQbPdJu/ew8k1az7YTgqdWiq0if0OskVESw2x7DMzEku
-+bHilHlhnDXuVOzpOPTVTmBoC5olm63SKeHVC78O+SWxa2xlTnYs0uIII60OzAcZt
-+fnM9zd7Nr/+7WQ9X4ahGozAJSr7Lda/BrsQ9p8oJ2uCl1TxKf3egDtqmrwI2Vb0/
-+As+62Sa6TYJk9uIPwRnXEf015lnWxOSf1IZZUcalsq6vjb24x0Hx8uVeyJIgaga7
-+lsuU/a/ev+y4zBClPc9b9sOfELgGhYUdWrAYu7IM7pJhHc//4SWZV3+0bntmvn9B
-+Mr+UyRrCx69xAZbPELWYOeM9RC2CyQzEq75zeWmfh7rG5F6zyESZR+st9aFGCVkB
-+PnWhjqYgEw1fDDA52DBKiQfc+b9SBSREUOz59mduJwqswCgcKOqj95nNFhyRLfXl
-+ZXoSMtf5w994T9giFdKl/VHqUd+ifL7Fu2y9T970A624i7NbSmu34q/5WAu7R3tz
-+FKc+hIjNBAY5I6FmNan3sSeAIkt3OkpLmdW0T8HgAosHsEfgEOy4zVjFAPAejgOn
-+3oS3zFJjwv18EO1nCHQBAoIBAQDsWXmq26e2KtJuSyXJE8hfxkw2mgFqrdQWqd53
-+vuxf7+Vy5jclyIH19vgrfUuPt5pHKvjXMGv7C7E26vmGdaLFvyQF88QdDHbsuvv2
-+opX0jvZ3roYaYMk7wnpi0Rn5pPS5zBOlRFazHDwJVHAFwnxuOXSaaBRbEpd6J/Re
-+DWQFwjWBoP688h3mkz4KAHYJaHVLk3BGTy/ke3m3RDhc9eqfGTa0lkvQPvEzvGVP
-+WOZ4twtJvxUg7Y4ke1I0t4a6mgmKHONWcDPVQQeGl2515H7uTSEnXA2BGZWvXVHj
-+KEq8XfdO1EOzSijdF0gKjJvGPXL5r8mY9OEDcZTxc7294fefAoIBAQDFTN3Mua3H
-+CWXgtO9HTryfmpZJtG/lX1W72xJfRb7iGJdiSSOL1N/4L6Z08alJfmIeOt42qgU4
-+T3uP/Mrk0mii9KgolbHslQzlYhfKV3cYUHkaoTLgmBAvBprtvYaSyCn60pXihpIf
-+dhA1T8u+Sj0OJOeHrTHORZ/yCfb4oAiXRGsyWlzERXd52MWWgfINYFheOJ5eTnkC
-+qYuDNMQLxWri9wAv1rXRizW4W7rvP+Uf2fbYE7ThI6xc669qp7NZegERcPkXUAbl
-+Roeyd6Rr6OROTRUTxxPzENmFnQhf7BrFlfRTfBXhESl63ojD6AfH0u9zmY7HbNlS
-+dV2afa4wrVbRAoIBAFCb87MiV4D5+etEFg8OgDiqGTRGMlWAE+Xm6clNhLDz0P3v
-+Jr/sMucA5INwqGTL6gJ71YMPMMTeqAJjAdeqJ2nqVw7wpOKDbcwvud/Uc/CFEeqf
-+y30TpY6MNdGeH43VhOIZiFnFhQxxhMhp1Asi2e1+CxbeiJLHNIKlPeMWDhcotbcQ
-+gdFPgoMxlOc0YQ20Yszz7EcOmvcCZfDrg42xEeRRZScSW6kazokmqCb32yMwytjM
-+Ixin7OLkbJUuQvVBtB822xKQEh2ZoEd3PmVXGTONetFBxBlT8Ilu+o7PHy4/nil0
-+m+J6WJWPazKt4jMqohwmqEtSuavDAVmYcA5jE2ECggEBAJpdQG+rmn3OKYo8pbUp
-+skRBQ3JZHO7dvwsiJGar/+OPj+mTBZ+iprTcOyTl6igZHJVAQ5mLPlhHP4pxIyES
-+SlGsj2BpdnJ+Tz+slqZ19jTsyUN/eXnlH0xbo518Giz/NHEMj75SwUKiMZ40V1kD
-+Zm+JiihJ5lzXdoN9LmBpfS80lZYowsnA+/A2HKlb89BpX9of/DAWv3DIy+GUyAnZ
-+pH38+IV1PH28qAQcqM4FNLPN6eY2zN8U9QciUvVkTdskrG9D729A6pQ5sXN9TgQr
-+i2yDvIjjygTzWuzB5sp9xJhwcV9cBxY3Qjfioih8gpHhw/yy2NXIoON19g0oZb+p
-+7vECggEAFi2D1pvHZ8FKT4jo12clqW2BgpNq4KR0vq0NZhtV4XvBZh+CLc/IAvTY
-+JvZ5u5hvXkM9GklNLj7ykW6bRuNIlTR4pAypPmdaBH/Re6cI8CFqyNinLqvNqtfI
-+Zoo3b5+AB353mpz2siycd7dF93YfaMncBd1ZA5aa0XmHECmRng8IJ6rSVdOWpO7O
-+8F+REM29zm9qa5KYxvxg0wSX6rVByCSXrPQkMlGxmNjhIAKroWxIze3GKIIns23E
-+bbQ+hnTuB0WVcAr2y5eKNja0dw17r0Hq/zmjn1NU8OgPJrPjAEO85x/hgphnJIca
-+5/fhWKFchx6XC0C1gg4ve6yVyZeQmg==
-+-----END PRIVATE KEY-----
-diff --git a/vendor/github.com/docker/docker/pkg/locker/README.md b/vendor/github.com/docker/docker/pkg/locker/README.md
-new file mode 100644
-index 00000000000..b1227f52c30
---- /dev/null
-+++ b/vendor/github.com/docker/docker/pkg/locker/README.md
-@@ -0,0 +1,64 @@
-+Locker
-+=====
-+
-+locker provides a mechanism for creating finer-grained locking to help
-+free up more global locks to handle other tasks.
-+
-+The implementation looks close to a sync.Mutex, however, the user must provide a
-+reference to use to refer to the underlying lock when locking and unlocking,
-+and unlock may generate an error.
-+
-+If a lock with a given name does not exist when `Lock` is called, one is
-+created.
-+Lock references are automatically cleaned up on `Unlock` if nothing else is
-+waiting for the lock.
-+
-+
-+## Usage
-+
-+```go
-+package important
-+
-+import (
-+ "sync"
-+ "time"
-+
-+ "github.com/docker/docker/pkg/locker"
-+)
-+
-+type important struct {
-+ locks *locker.Locker
-+ data map[string]interface{}
-+ mu sync.Mutex
-+}
-+
-+func (i *important) Get(name string) interface{} {
-+ i.locks.Lock(name)
-+ defer i.locks.Unlock(name)
-+ return i.data[name]
-+}
-+
-+func (i *important) Create(name string, data interface{}) {
-+ i.locks.Lock(name)
-+ defer i.locks.Unlock(name)
-+
-+ i.createImportant(data)
-+
-+ i.mu.Lock()
-+ i.data[name] = data
-+ i.mu.Unlock()
-+}
-+
-+func (i *important) createImportant(data interface{}) {
-+ time.Sleep(10 * time.Second)
-+}
-+```
-+
-+For functions dealing with a given name, always lock at the beginning of the
-+function (or before doing anything with the underlying state), this ensures any
-+other function that is dealing with the same name will block.
-+
-+When needing to modify the underlying data, use the global lock to ensure nothing
-+else is modifying it at the same time.
-+Since name lock is already in place, no reads will occur while the modification
-+is being performed.
-diff --git a/vendor/github.com/docker/docker/pkg/locker/locker.go b/vendor/github.com/docker/docker/pkg/locker/locker.go
-new file mode 100644
-index 00000000000..dbd47fc4655
---- /dev/null
-+++ b/vendor/github.com/docker/docker/pkg/locker/locker.go
-@@ -0,0 +1,112 @@
-+/*
-+Package locker provides a mechanism for creating finer-grained locking to help
-+free up more global locks to handle other tasks.
-+
-+The implementation looks close to a sync.Mutex, however the user must provide a
-+reference to use to refer to the underlying lock when locking and unlocking,
-+and unlock may generate an error.
-+
-+If a lock with a given name does not exist when `Lock` is called, one is
-+created.
-+Lock references are automatically cleaned up on `Unlock` if nothing else is
-+waiting for the lock.
-+*/
-+package locker // import "github.com/docker/docker/pkg/locker"
-+
-+import (
-+ "errors"
-+ "sync"
-+ "sync/atomic"
-+)
-+
-+// ErrNoSuchLock is returned when the requested lock does not exist
-+var ErrNoSuchLock = errors.New("no such lock")
-+
-+// Locker provides a locking mechanism based on the passed in reference name
-+type Locker struct {
-+ mu sync.Mutex
-+ locks map[string]*lockCtr
-+}
-+
-+// lockCtr is used by Locker to represent a lock with a given name.
-+type lockCtr struct {
-+ mu sync.Mutex
-+ // waiters is the number of waiters waiting to acquire the lock
-+ // this is int32 instead of uint32 so we can add `-1` in `dec()`
-+ waiters int32
-+}
-+
-+// inc increments the number of waiters waiting for the lock
-+func (l *lockCtr) inc() {
-+ atomic.AddInt32(&l.waiters, 1)
-+}
-+
-+// dec decrements the number of waiters waiting on the lock
-+func (l *lockCtr) dec() {
-+ atomic.AddInt32(&l.waiters, -1)
-+}
-+
-+// count gets the current number of waiters
-+func (l *lockCtr) count() int32 {
-+ return atomic.LoadInt32(&l.waiters)
-+}
-+
-+// Lock locks the mutex
-+func (l *lockCtr) Lock() {
-+ l.mu.Lock()
-+}
-+
-+// Unlock unlocks the mutex
-+func (l *lockCtr) Unlock() {
-+ l.mu.Unlock()
-+}
-+
-+// New creates a new Locker
-+func New() *Locker {
-+ return &Locker{
-+ locks: make(map[string]*lockCtr),
-+ }
-+}
-+
-+// Lock locks a mutex with the given name. If it doesn't exist, one is created
-+func (l *Locker) Lock(name string) {
-+ l.mu.Lock()
-+ if l.locks == nil {
-+ l.locks = make(map[string]*lockCtr)
-+ }
-+
-+ nameLock, exists := l.locks[name]
-+ if !exists {
-+ nameLock = &lockCtr{}
-+ l.locks[name] = nameLock
-+ }
-+
-+ // increment the nameLock waiters while inside the main mutex
-+ // this makes sure that the lock isn't deleted if `Lock` and `Unlock` are called concurrently
-+ nameLock.inc()
-+ l.mu.Unlock()
-+
-+ // Lock the nameLock outside the main mutex so we don't block other operations
-+ // once locked then we can decrement the number of waiters for this lock
-+ nameLock.Lock()
-+ nameLock.dec()
-+}
-+
-+// Unlock unlocks the mutex with the given name
-+// If the given lock is not being waited on by any other callers, it is deleted
-+func (l *Locker) Unlock(name string) error {
-+ l.mu.Lock()
-+ nameLock, exists := l.locks[name]
-+ if !exists {
-+ l.mu.Unlock()
-+ return ErrNoSuchLock
-+ }
-+
-+ if nameLock.count() == 0 {
-+ delete(l.locks, name)
-+ }
-+ nameLock.Unlock()
-+
-+ l.mu.Unlock()
-+ return nil
-+}
-diff --git a/vendor/github.com/docker/docker/pkg/locker/locker_test.go b/vendor/github.com/docker/docker/pkg/locker/locker_test.go
-new file mode 100644
-index 00000000000..2b0a8a55d6d
---- /dev/null
-+++ b/vendor/github.com/docker/docker/pkg/locker/locker_test.go
-@@ -0,0 +1,161 @@
-+package locker // import "github.com/docker/docker/pkg/locker"
-+
-+import (
-+ "math/rand"
-+ "strconv"
-+ "sync"
-+ "testing"
-+ "time"
-+)
-+
-+func TestLockCounter(t *testing.T) {
-+ l := &lockCtr{}
-+ l.inc()
-+
-+ if l.waiters != 1 {
-+ t.Fatal("counter inc failed")
-+ }
-+
-+ l.dec()
-+ if l.waiters != 0 {
-+ t.Fatal("counter dec failed")
-+ }
-+}
-+
-+func TestLockerLock(t *testing.T) {
-+ l := New()
-+ l.Lock("test")
-+ ctr := l.locks["test"]
-+
-+ if ctr.count() != 0 {
-+ t.Fatalf("expected waiters to be 0, got :%d", ctr.waiters)
-+ }
-+
-+ chDone := make(chan struct{})
-+ go func() {
-+ l.Lock("test")
-+ close(chDone)
-+ }()
-+
-+ chWaiting := make(chan struct{})
-+ go func() {
-+ for range time.Tick(1 * time.Millisecond) {
-+ if ctr.count() == 1 {
-+ close(chWaiting)
-+ break
-+ }
-+ }
-+ }()
-+
-+ select {
-+ case <-chWaiting:
-+ case <-time.After(3 * time.Second):
-+ t.Fatal("timed out waiting for lock waiters to be incremented")
-+ }
-+
-+ select {
-+ case <-chDone:
-+ t.Fatal("lock should not have returned while it was still held")
-+ default:
-+ }
-+
-+ if err := l.Unlock("test"); err != nil {
-+ t.Fatal(err)
-+ }
-+
-+ select {
-+ case <-chDone:
-+ case <-time.After(3 * time.Second):
-+ t.Fatalf("lock should have completed")
-+ }
-+
-+ if ctr.count() != 0 {
-+ t.Fatalf("expected waiters to be 0, got: %d", ctr.count())
-+ }
-+}
-+
-+func TestLockerUnlock(t *testing.T) {
-+ l := New()
-+
-+ l.Lock("test")
-+ l.Unlock("test")
-+
-+ chDone := make(chan struct{})
-+ go func() {
-+ l.Lock("test")
-+ close(chDone)
-+ }()
-+
-+ select {
-+ case <-chDone:
-+ case <-time.After(3 * time.Second):
-+ t.Fatalf("lock should not be blocked")
-+ }
-+}
-+
-+func TestLockerConcurrency(t *testing.T) {
-+ l := New()
-+
-+ var wg sync.WaitGroup
-+ for i := 0; i <= 10000; i++ {
-+ wg.Add(1)
-+ go func() {
-+ l.Lock("test")
-+ // if there is a concurrency issue, will very likely panic here
-+ l.Unlock("test")
-+ wg.Done()
-+ }()
-+ }
-+
-+ chDone := make(chan struct{})
-+ go func() {
-+ wg.Wait()
-+ close(chDone)
-+ }()
-+
-+ select {
-+ case <-chDone:
-+ case <-time.After(10 * time.Second):
-+ t.Fatal("timeout waiting for locks to complete")
-+ }
-+
-+ // Since everything has unlocked this should not exist anymore
-+ if ctr, exists := l.locks["test"]; exists {
-+ t.Fatalf("lock should not exist: %v", ctr)
-+ }
-+}
-+
-+func BenchmarkLocker(b *testing.B) {
-+ l := New()
-+ for i := 0; i < b.N; i++ {
-+ l.Lock("test")
-+ l.Unlock("test")
-+ }
-+}
-+
-+func BenchmarkLockerParallel(b *testing.B) {
-+ l := New()
-+ b.SetParallelism(128)
-+ b.RunParallel(func(pb *testing.PB) {
-+ for pb.Next() {
-+ l.Lock("test")
-+ l.Unlock("test")
-+ }
-+ })
-+}
-+
-+func BenchmarkLockerMoreKeys(b *testing.B) {
-+ l := New()
-+ var keys []string
-+ for i := 0; i < 64; i++ {
-+ keys = append(keys, strconv.Itoa(i))
-+ }
-+ b.SetParallelism(128)
-+ b.RunParallel(func(pb *testing.PB) {
-+ for pb.Next() {
-+ k := keys[rand.Intn(len(keys))]
-+ l.Lock(k)
-+ l.Unlock(k)
-+ }
-+ })
-+}
-diff --git a/vendor/github.com/ghodss/yaml/.travis.yml b/vendor/github.com/ghodss/yaml/.travis.yml
-new file mode 100644
-index 00000000000..0e9d6edc010
---- /dev/null
-+++ b/vendor/github.com/ghodss/yaml/.travis.yml
-@@ -0,0 +1,7 @@
-+language: go
-+go:
-+ - 1.3
-+ - 1.4
-+script:
-+ - go test
-+ - go build
-diff --git a/vendor/github.com/ghodss/yaml/LICENSE b/vendor/github.com/ghodss/yaml/LICENSE
-new file mode 100644
-index 00000000000..7805d36de73
---- /dev/null
-+++ b/vendor/github.com/ghodss/yaml/LICENSE
-@@ -0,0 +1,50 @@
-+The MIT License (MIT)
-+
-+Copyright (c) 2014 Sam Ghods
-+
-+Permission is hereby granted, free of charge, to any person obtaining a copy
-+of this software and associated documentation files (the "Software"), to deal
-+in the Software without restriction, including without limitation the rights
-+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-+copies of the Software, and to permit persons to whom the Software is
-+furnished to do so, subject to the following conditions:
-+
-+The above copyright notice and this permission notice shall be included in all
-+copies or substantial portions of the Software.
-+
-+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-+SOFTWARE.
-+
-+
-+Copyright (c) 2012 The Go Authors. All rights reserved.
-+
-+Redistribution and use in source and binary forms, with or without
-+modification, are permitted provided that the following conditions are
-+met:
-+
-+ * Redistributions of source code must retain the above copyright
-+notice, this list of conditions and the following disclaimer.
-+ * Redistributions in binary form must reproduce the above
-+copyright notice, this list of conditions and the following disclaimer
-+in the documentation and/or other materials provided with the
-+distribution.
-+ * Neither the name of Google Inc. nor the names of its
-+contributors may be used to endorse or promote products derived from
-+this software without specific prior written permission.
-+
-+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-diff --git a/vendor/github.com/ghodss/yaml/README.md b/vendor/github.com/ghodss/yaml/README.md
-new file mode 100644
-index 00000000000..0200f75b4d1
---- /dev/null
-+++ b/vendor/github.com/ghodss/yaml/README.md
-@@ -0,0 +1,121 @@
-+# YAML marshaling and unmarshaling support for Go
-+
-+[![Build Status](https://travis-ci.org/ghodss/yaml.svg)](https://travis-ci.org/ghodss/yaml)
-+
-+## Introduction
-+
-+A wrapper around [go-yaml](https://github.com/go-yaml/yaml) designed to enable a better way of handling YAML when marshaling to and from structs.
-+
-+In short, this library first converts YAML to JSON using go-yaml and then uses `json.Marshal` and `json.Unmarshal` to convert to or from the struct. This means that it effectively reuses the JSON struct tags as well as the custom JSON methods `MarshalJSON` and `UnmarshalJSON` unlike go-yaml. For a detailed overview of the rationale behind this method, [see this blog post](http://ghodss.com/2014/the-right-way-to-handle-yaml-in-golang/).
-+
-+## Compatibility
-+
-+This package uses [go-yaml](https://github.com/go-yaml/yaml) and therefore supports [everything go-yaml supports](https://github.com/go-yaml/yaml#compatibility).
-+
-+## Caveats
-+
-+**Caveat #1:** When using `yaml.Marshal` and `yaml.Unmarshal`, binary data should NOT be preceded with the `!!binary` YAML tag. If you do, go-yaml will convert the binary data from base64 to native binary data, which is not compatible with JSON. You can still use binary in your YAML files though - just store them without the `!!binary` tag and decode the base64 in your code (e.g. in the custom JSON methods `MarshalJSON` and `UnmarshalJSON`). This also has the benefit that your YAML and your JSON binary data will be decoded exactly the same way. As an example:
-+
-+```
-+BAD:
-+ exampleKey: !!binary gIGC
-+
-+GOOD:
-+ exampleKey: gIGC
-+... and decode the base64 data in your code.
-+```
-+
-+**Caveat #2:** When using `YAMLToJSON` directly, maps with keys that are maps will result in an error since this is not supported by JSON. This error will occur in `Unmarshal` as well since you can't unmarshal map keys anyways since struct fields can't be keys.
-+
-+## Installation and usage
-+
-+To install, run:
-+
-+```
-+$ go get github.com/ghodss/yaml
-+```
-+
-+And import using:
-+
-+```
-+import "github.com/ghodss/yaml"
-+```
-+
-+Usage is very similar to the JSON library:
-+
-+```go
-+package main
-+
-+import (
-+ "fmt"
-+
-+ "github.com/ghodss/yaml"
-+)
-+
-+type Person struct {
-+ Name string `json:"name"` // Affects YAML field names too.
-+ Age int `json:"age"`
-+}
-+
-+func main() {
-+ // Marshal a Person struct to YAML.
-+ p := Person{"John", 30}
-+ y, err := yaml.Marshal(p)
-+ if err != nil {
-+ fmt.Printf("err: %v\n", err)
-+ return
-+ }
-+ fmt.Println(string(y))
-+ /* Output:
-+ age: 30
-+ name: John
-+ */
-+
-+ // Unmarshal the YAML back into a Person struct.
-+ var p2 Person
-+ err = yaml.Unmarshal(y, &p2)
-+ if err != nil {
-+ fmt.Printf("err: %v\n", err)
-+ return
-+ }
-+ fmt.Println(p2)
-+ /* Output:
-+ {John 30}
-+ */
-+}
-+```
-+
-+`yaml.YAMLToJSON` and `yaml.JSONToYAML` methods are also available:
-+
-+```go
-+package main
-+
-+import (
-+ "fmt"
-+
-+ "github.com/ghodss/yaml"
-+)
-+
-+func main() {
-+ j := []byte(`{"name": "John", "age": 30}`)
-+ y, err := yaml.JSONToYAML(j)
-+ if err != nil {
-+ fmt.Printf("err: %v\n", err)
-+ return
-+ }
-+ fmt.Println(string(y))
-+ /* Output:
-+ name: John
-+ age: 30
-+ */
-+ j2, err := yaml.YAMLToJSON(y)
-+ if err != nil {
-+ fmt.Printf("err: %v\n", err)
-+ return
-+ }
-+ fmt.Println(string(j2))
-+ /* Output:
-+ {"age":30,"name":"John"}
-+ */
-+}
-+```
-diff --git a/vendor/github.com/ghodss/yaml/fields.go b/vendor/github.com/ghodss/yaml/fields.go
-new file mode 100644
-index 00000000000..58600740266
---- /dev/null
-+++ b/vendor/github.com/ghodss/yaml/fields.go
-@@ -0,0 +1,501 @@
-+// Copyright 2013 The Go Authors. All rights reserved.
-+// Use of this source code is governed by a BSD-style
-+// license that can be found in the LICENSE file.
-+package yaml
-+
-+import (
-+ "bytes"
-+ "encoding"
-+ "encoding/json"
-+ "reflect"
-+ "sort"
-+ "strings"
-+ "sync"
-+ "unicode"
-+ "unicode/utf8"
-+)
-+
-+// indirect walks down v allocating pointers as needed,
-+// until it gets to a non-pointer.
-+// if it encounters an Unmarshaler, indirect stops and returns that.
-+// if decodingNull is true, indirect stops at the last pointer so it can be set to nil.
-+func indirect(v reflect.Value, decodingNull bool) (json.Unmarshaler, encoding.TextUnmarshaler, reflect.Value) {
-+ // If v is a named type and is addressable,
-+ // start with its address, so that if the type has pointer methods,
-+ // we find them.
-+ if v.Kind() != reflect.Ptr && v.Type().Name() != "" && v.CanAddr() {
-+ v = v.Addr()
-+ }
-+ for {
-+ // Load value from interface, but only if the result will be
-+ // usefully addressable.
-+ if v.Kind() == reflect.Interface && !v.IsNil() {
-+ e := v.Elem()
-+ if e.Kind() == reflect.Ptr && !e.IsNil() && (!decodingNull || e.Elem().Kind() == reflect.Ptr) {
-+ v = e
-+ continue
-+ }
-+ }
-+
-+ if v.Kind() != reflect.Ptr {
-+ break
-+ }
-+
-+ if v.Elem().Kind() != reflect.Ptr && decodingNull && v.CanSet() {
-+ break
-+ }
-+ if v.IsNil() {
-+ if v.CanSet() {
-+ v.Set(reflect.New(v.Type().Elem()))
-+ } else {
-+ v = reflect.New(v.Type().Elem())
-+ }
-+ }
-+ if v.Type().NumMethod() > 0 {
-+ if u, ok := v.Interface().(json.Unmarshaler); ok {
-+ return u, nil, reflect.Value{}
-+ }
-+ if u, ok := v.Interface().(encoding.TextUnmarshaler); ok {
-+ return nil, u, reflect.Value{}
-+ }
-+ }
-+ v = v.Elem()
-+ }
-+ return nil, nil, v
-+}
-+
-+// A field represents a single field found in a struct.
-+type field struct {
-+ name string
-+ nameBytes []byte // []byte(name)
-+ equalFold func(s, t []byte) bool // bytes.EqualFold or equivalent
-+
-+ tag bool
-+ index []int
-+ typ reflect.Type
-+ omitEmpty bool
-+ quoted bool
-+}
-+
-+func fillField(f field) field {
-+ f.nameBytes = []byte(f.name)
-+ f.equalFold = foldFunc(f.nameBytes)
-+ return f
-+}
-+
-+// byName sorts field by name, breaking ties with depth,
-+// then breaking ties with "name came from json tag", then
-+// breaking ties with index sequence.
-+type byName []field
-+
-+func (x byName) Len() int { return len(x) }
-+
-+func (x byName) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
-+
-+func (x byName) Less(i, j int) bool {
-+ if x[i].name != x[j].name {
-+ return x[i].name < x[j].name
-+ }
-+ if len(x[i].index) != len(x[j].index) {
-+ return len(x[i].index) < len(x[j].index)
-+ }
-+ if x[i].tag != x[j].tag {
-+ return x[i].tag
-+ }
-+ return byIndex(x).Less(i, j)
-+}
-+
-+// byIndex sorts field by index sequence.
-+type byIndex []field
-+
-+func (x byIndex) Len() int { return len(x) }
-+
-+func (x byIndex) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
-+
-+func (x byIndex) Less(i, j int) bool {
-+ for k, xik := range x[i].index {
-+ if k >= len(x[j].index) {
-+ return false
-+ }
-+ if xik != x[j].index[k] {
-+ return xik < x[j].index[k]
-+ }
-+ }
-+ return len(x[i].index) < len(x[j].index)
-+}
-+
-+// typeFields returns a list of fields that JSON should recognize for the given type.
-+// The algorithm is breadth-first search over the set of structs to include - the top struct
-+// and then any reachable anonymous structs.
-+func typeFields(t reflect.Type) []field {
-+ // Anonymous fields to explore at the current level and the next.
-+ current := []field{}
-+ next := []field{{typ: t}}
-+
-+ // Count of queued names for current level and the next.
-+ count := map[reflect.Type]int{}
-+ nextCount := map[reflect.Type]int{}
-+
-+ // Types already visited at an earlier level.
-+ visited := map[reflect.Type]bool{}
-+
-+ // Fields found.
-+ var fields []field
-+
-+ for len(next) > 0 {
-+ current, next = next, current[:0]
-+ count, nextCount = nextCount, map[reflect.Type]int{}
-+
-+ for _, f := range current {
-+ if visited[f.typ] {
-+ continue
-+ }
-+ visited[f.typ] = true
-+
-+ // Scan f.typ for fields to include.
-+ for i := 0; i < f.typ.NumField(); i++ {
-+ sf := f.typ.Field(i)
-+ if sf.PkgPath != "" { // unexported
-+ continue
-+ }
-+ tag := sf.Tag.Get("json")
-+ if tag == "-" {
-+ continue
-+ }
-+ name, opts := parseTag(tag)
-+ if !isValidTag(name) {
-+ name = ""
-+ }
-+ index := make([]int, len(f.index)+1)
-+ copy(index, f.index)
-+ index[len(f.index)] = i
-+
-+ ft := sf.Type
-+ if ft.Name() == "" && ft.Kind() == reflect.Ptr {
-+ // Follow pointer.
-+ ft = ft.Elem()
-+ }
-+
-+ // Record found field and index sequence.
-+ if name != "" || !sf.Anonymous || ft.Kind() != reflect.Struct {
-+ tagged := name != ""
-+ if name == "" {
-+ name = sf.Name
-+ }
-+ fields = append(fields, fillField(field{
-+ name: name,
-+ tag: tagged,
-+ index: index,
-+ typ: ft,
-+ omitEmpty: opts.Contains("omitempty"),
-+ quoted: opts.Contains("string"),
-+ }))
-+ if count[f.typ] > 1 {
-+ // If there were multiple instances, add a second,
-+ // so that the annihilation code will see a duplicate.
-+ // It only cares about the distinction between 1 or 2,
-+ // so don't bother generating any more copies.
-+ fields = append(fields, fields[len(fields)-1])
-+ }
-+ continue
-+ }
-+
-+ // Record new anonymous struct to explore in next round.
-+ nextCount[ft]++
-+ if nextCount[ft] == 1 {
-+ next = append(next, fillField(field{name: ft.Name(), index: index, typ: ft}))
-+ }
-+ }
-+ }
-+ }
-+
-+ sort.Sort(byName(fields))
-+
-+ // Delete all fields that are hidden by the Go rules for embedded fields,
-+ // except that fields with JSON tags are promoted.
-+
-+ // The fields are sorted in primary order of name, secondary order
-+ // of field index length. Loop over names; for each name, delete
-+ // hidden fields by choosing the one dominant field that survives.
-+ out := fields[:0]
-+ for advance, i := 0, 0; i < len(fields); i += advance {
-+ // One iteration per name.
-+ // Find the sequence of fields with the name of this first field.
-+ fi := fields[i]
-+ name := fi.name
-+ for advance = 1; i+advance < len(fields); advance++ {
-+ fj := fields[i+advance]
-+ if fj.name != name {
-+ break
-+ }
-+ }
-+ if advance == 1 { // Only one field with this name
-+ out = append(out, fi)
-+ continue
-+ }
-+ dominant, ok := dominantField(fields[i : i+advance])
-+ if ok {
-+ out = append(out, dominant)
-+ }
-+ }
-+
-+ fields = out
-+ sort.Sort(byIndex(fields))
-+
-+ return fields
-+}
-+
-+// dominantField looks through the fields, all of which are known to
-+// have the same name, to find the single field that dominates the
-+// others using Go's embedding rules, modified by the presence of
-+// JSON tags. If there are multiple top-level fields, the boolean
-+// will be false: This condition is an error in Go and we skip all
-+// the fields.
-+func dominantField(fields []field) (field, bool) {
-+ // The fields are sorted in increasing index-length order. The winner
-+ // must therefore be one with the shortest index length. Drop all
-+ // longer entries, which is easy: just truncate the slice.
-+ length := len(fields[0].index)
-+ tagged := -1 // Index of first tagged field.
-+ for i, f := range fields {
-+ if len(f.index) > length {
-+ fields = fields[:i]
-+ break
-+ }
-+ if f.tag {
-+ if tagged >= 0 {
-+ // Multiple tagged fields at the same level: conflict.
-+ // Return no field.
-+ return field{}, false
-+ }
-+ tagged = i
-+ }
-+ }
-+ if tagged >= 0 {
-+ return fields[tagged], true
-+ }
-+ // All remaining fields have the same length. If there's more than one,
-+ // we have a conflict (two fields named "X" at the same level) and we
-+ // return no field.
-+ if len(fields) > 1 {
-+ return field{}, false
-+ }
-+ return fields[0], true
-+}
-+
-+var fieldCache struct {
-+ sync.RWMutex
-+ m map[reflect.Type][]field
-+}
-+
-+// cachedTypeFields is like typeFields but uses a cache to avoid repeated work.
-+func cachedTypeFields(t reflect.Type) []field {
-+ fieldCache.RLock()
-+ f := fieldCache.m[t]
-+ fieldCache.RUnlock()
-+ if f != nil {
-+ return f
-+ }
-+
-+ // Compute fields without lock.
-+ // Might duplicate effort but won't hold other computations back.
-+ f = typeFields(t)
-+ if f == nil {
-+ f = []field{}
-+ }
-+
-+ fieldCache.Lock()
-+ if fieldCache.m == nil {
-+ fieldCache.m = map[reflect.Type][]field{}
-+ }
-+ fieldCache.m[t] = f
-+ fieldCache.Unlock()
-+ return f
-+}
-+
-+func isValidTag(s string) bool {
-+ if s == "" {
-+ return false
-+ }
-+ for _, c := range s {
-+ switch {
-+ case strings.ContainsRune("!#$%&()*+-./:<=>?@[]^_{|}~ ", c):
-+ // Backslash and quote chars are reserved, but
-+ // otherwise any punctuation chars are allowed
-+ // in a tag name.
-+ default:
-+ if !unicode.IsLetter(c) && !unicode.IsDigit(c) {
-+ return false
-+ }
-+ }
-+ }
-+ return true
-+}
-+
-+const (
-+ caseMask = ^byte(0x20) // Mask to ignore case in ASCII.
-+ kelvin = '\u212a'
-+ smallLongEss = '\u017f'
-+)
-+
-+// foldFunc returns one of four different case folding equivalence
-+// functions, from most general (and slow) to fastest:
-+//
-+// 1) bytes.EqualFold, if the key s contains any non-ASCII UTF-8
-+// 2) equalFoldRight, if s contains special folding ASCII ('k', 'K', 's', 'S')
-+// 3) asciiEqualFold, no special, but includes non-letters (including _)
-+// 4) simpleLetterEqualFold, no specials, no non-letters.
-+//
-+// The letters S and K are special because they map to 3 runes, not just 2:
-+// * S maps to s and to U+017F 'ſ' Latin small letter long s
-+// * k maps to K and to U+212A 'K' Kelvin sign
-+// See http://play.golang.org/p/tTxjOc0OGo
-+//
-+// The returned function is specialized for matching against s and
-+// should only be given s. It's not curried for performance reasons.
-+func foldFunc(s []byte) func(s, t []byte) bool {
-+ nonLetter := false
-+ special := false // special letter
-+ for _, b := range s {
-+ if b >= utf8.RuneSelf {
-+ return bytes.EqualFold
-+ }
-+ upper := b & caseMask
-+ if upper < 'A' || upper > 'Z' {
-+ nonLetter = true
-+ } else if upper == 'K' || upper == 'S' {
-+ // See above for why these letters are special.
-+ special = true
-+ }
-+ }
-+ if special {
-+ return equalFoldRight
-+ }
-+ if nonLetter {
-+ return asciiEqualFold
-+ }
-+ return simpleLetterEqualFold
-+}
-+
-+// equalFoldRight is a specialization of bytes.EqualFold when s is
-+// known to be all ASCII (including punctuation), but contains an 's',
-+// 'S', 'k', or 'K', requiring a Unicode fold on the bytes in t.
-+// See comments on foldFunc.
-+func equalFoldRight(s, t []byte) bool {
-+ for _, sb := range s {
-+ if len(t) == 0 {
-+ return false
-+ }
-+ tb := t[0]
-+ if tb < utf8.RuneSelf {
-+ if sb != tb {
-+ sbUpper := sb & caseMask
-+ if 'A' <= sbUpper && sbUpper <= 'Z' {
-+ if sbUpper != tb&caseMask {
-+ return false
-+ }
-+ } else {
-+ return false
-+ }
-+ }
-+ t = t[1:]
-+ continue
-+ }
-+ // sb is ASCII and t is not. t must be either kelvin
-+ // sign or long s; sb must be s, S, k, or K.
-+ tr, size := utf8.DecodeRune(t)
-+ switch sb {
-+ case 's', 'S':
-+ if tr != smallLongEss {
-+ return false
-+ }
-+ case 'k', 'K':
-+ if tr != kelvin {
-+ return false
-+ }
-+ default:
-+ return false
-+ }
-+ t = t[size:]
-+
-+ }
-+ if len(t) > 0 {
-+ return false
-+ }
-+ return true
-+}
-+
-+// asciiEqualFold is a specialization of bytes.EqualFold for use when
-+// s is all ASCII (but may contain non-letters) and contains no
-+// special-folding letters.
-+// See comments on foldFunc.
-+func asciiEqualFold(s, t []byte) bool {
-+ if len(s) != len(t) {
-+ return false
-+ }
-+ for i, sb := range s {
-+ tb := t[i]
-+ if sb == tb {
-+ continue
-+ }
-+ if ('a' <= sb && sb <= 'z') || ('A' <= sb && sb <= 'Z') {
-+ if sb&caseMask != tb&caseMask {
-+ return false
-+ }
-+ } else {
-+ return false
-+ }
-+ }
-+ return true
-+}
-+
-+// simpleLetterEqualFold is a specialization of bytes.EqualFold for
-+// use when s is all ASCII letters (no underscores, etc) and also
-+// doesn't contain 'k', 'K', 's', or 'S'.
-+// See comments on foldFunc.
-+func simpleLetterEqualFold(s, t []byte) bool {
-+ if len(s) != len(t) {
-+ return false
-+ }
-+ for i, b := range s {
-+ if b&caseMask != t[i]&caseMask {
-+ return false
-+ }
-+ }
-+ return true
-+}
-+
-+// tagOptions is the string following a comma in a struct field's "json"
-+// tag, or the empty string. It does not include the leading comma.
-+type tagOptions string
-+
-+// parseTag splits a struct field's json tag into its name and
-+// comma-separated options.
-+func parseTag(tag string) (string, tagOptions) {
-+ if idx := strings.Index(tag, ","); idx != -1 {
-+ return tag[:idx], tagOptions(tag[idx+1:])
-+ }
-+ return tag, tagOptions("")
-+}
-+
-+// Contains reports whether a comma-separated list of options
-+// contains a particular substr flag. substr must be surrounded by a
-+// string boundary or commas.
-+func (o tagOptions) Contains(optionName string) bool {
-+ if len(o) == 0 {
-+ return false
-+ }
-+ s := string(o)
-+ for s != "" {
-+ var next string
-+ i := strings.Index(s, ",")
-+ if i >= 0 {
-+ s, next = s[:i], s[i+1:]
-+ }
-+ if s == optionName {
-+ return true
-+ }
-+ s = next
-+ }
-+ return false
-+}
-diff --git a/vendor/github.com/ghodss/yaml/yaml.go b/vendor/github.com/ghodss/yaml/yaml.go
-new file mode 100644
-index 00000000000..4fb4054a8b7
---- /dev/null
-+++ b/vendor/github.com/ghodss/yaml/yaml.go
-@@ -0,0 +1,277 @@
-+package yaml
-+
-+import (
-+ "bytes"
-+ "encoding/json"
-+ "fmt"
-+ "reflect"
-+ "strconv"
-+
-+ "gopkg.in/yaml.v2"
-+)
-+
-+// Marshals the object into JSON then converts JSON to YAML and returns the
-+// YAML.
-+func Marshal(o interface{}) ([]byte, error) {
-+ j, err := json.Marshal(o)
-+ if err != nil {
-+ return nil, fmt.Errorf("error marshaling into JSON: %v", err)
-+ }
-+
-+ y, err := JSONToYAML(j)
-+ if err != nil {
-+ return nil, fmt.Errorf("error converting JSON to YAML: %v", err)
-+ }
-+
-+ return y, nil
-+}
-+
-+// Converts YAML to JSON then uses JSON to unmarshal into an object.
-+func Unmarshal(y []byte, o interface{}) error {
-+ vo := reflect.ValueOf(o)
-+ j, err := yamlToJSON(y, &vo)
-+ if err != nil {
-+ return fmt.Errorf("error converting YAML to JSON: %v", err)
-+ }
-+
-+ err = json.Unmarshal(j, o)
-+ if err != nil {
-+ return fmt.Errorf("error unmarshaling JSON: %v", err)
-+ }
-+
-+ return nil
-+}
-+
-+// Convert JSON to YAML.
-+func JSONToYAML(j []byte) ([]byte, error) {
-+ // Convert the JSON to an object.
-+ var jsonObj interface{}
-+ // We are using yaml.Unmarshal here (instead of json.Unmarshal) because the
-+ // Go JSON library doesn't try to pick the right number type (int, float,
-+ // etc.) when unmarshalling to interface{}, it just picks float64
-+ // universally. go-yaml does go through the effort of picking the right
-+ // number type, so we can preserve number type throughout this process.
-+ err := yaml.Unmarshal(j, &jsonObj)
-+ if err != nil {
-+ return nil, err
-+ }
-+
-+ // Marshal this object into YAML.
-+ return yaml.Marshal(jsonObj)
-+}
-+
-+// Convert YAML to JSON. Since JSON is a subset of YAML, passing JSON through
-+// this method should be a no-op.
-+//
-+// Things YAML can do that are not supported by JSON:
-+// * In YAML you can have binary and null keys in your maps. These are invalid
-+// in JSON. (int and float keys are converted to strings.)
-+// * Binary data in YAML with the !!binary tag is not supported. If you want to
-+// use binary data with this library, encode the data as base64 as usual but do
-+// not use the !!binary tag in your YAML. This will ensure the original base64
-+// encoded data makes it all the way through to the JSON.
-+func YAMLToJSON(y []byte) ([]byte, error) {
-+ return yamlToJSON(y, nil)
-+}
-+
-+func yamlToJSON(y []byte, jsonTarget *reflect.Value) ([]byte, error) {
-+ // Convert the YAML to an object.
-+ var yamlObj interface{}
-+ err := yaml.Unmarshal(y, &yamlObj)
-+ if err != nil {
-+ return nil, err
-+ }
-+
-+ // YAML objects are not completely compatible with JSON objects (e.g. you
-+ // can have non-string keys in YAML). So, convert the YAML-compatible object
-+ // to a JSON-compatible object, failing with an error if irrecoverable
-+ // incompatibilties happen along the way.
-+ jsonObj, err := convertToJSONableObject(yamlObj, jsonTarget)
-+ if err != nil {
-+ return nil, err
-+ }
-+
-+ // Convert this object to JSON and return the data.
-+ return json.Marshal(jsonObj)
-+}
-+
-+func convertToJSONableObject(yamlObj interface{}, jsonTarget *reflect.Value) (interface{}, error) {
-+ var err error
-+
-+ // Resolve jsonTarget to a concrete value (i.e. not a pointer or an
-+ // interface). We pass decodingNull as false because we're not actually
-+ // decoding into the value, we're just checking if the ultimate target is a
-+ // string.
-+ if jsonTarget != nil {
-+ ju, tu, pv := indirect(*jsonTarget, false)
-+ // We have a JSON or Text Umarshaler at this level, so we can't be trying
-+ // to decode into a string.
-+ if ju != nil || tu != nil {
-+ jsonTarget = nil
-+ } else {
-+ jsonTarget = &pv
-+ }
-+ }
-+
-+ // If yamlObj is a number or a boolean, check if jsonTarget is a string -
-+ // if so, coerce. Else return normal.
-+ // If yamlObj is a map or array, find the field that each key is
-+ // unmarshaling to, and when you recurse pass the reflect.Value for that
-+ // field back into this function.
-+ switch typedYAMLObj := yamlObj.(type) {
-+ case map[interface{}]interface{}:
-+ // JSON does not support arbitrary keys in a map, so we must convert
-+ // these keys to strings.
-+ //
-+ // From my reading of go-yaml v2 (specifically the resolve function),
-+ // keys can only have the types string, int, int64, float64, binary
-+ // (unsupported), or null (unsupported).
-+ strMap := make(map[string]interface{})
-+ for k, v := range typedYAMLObj {
-+ // Resolve the key to a string first.
-+ var keyString string
-+ switch typedKey := k.(type) {
-+ case string:
-+ keyString = typedKey
-+ case int:
-+ keyString = strconv.Itoa(typedKey)
-+ case int64:
-+ // go-yaml will only return an int64 as a key if the system
-+ // architecture is 32-bit and the key's value is between 32-bit
-+ // and 64-bit. Otherwise the key type will simply be int.
-+ keyString = strconv.FormatInt(typedKey, 10)
-+ case float64:
-+ // Stolen from go-yaml to use the same conversion to string as
-+ // the go-yaml library uses to convert float to string when
-+ // Marshaling.
-+ s := strconv.FormatFloat(typedKey, 'g', -1, 32)
-+ switch s {
-+ case "+Inf":
-+ s = ".inf"
-+ case "-Inf":
-+ s = "-.inf"
-+ case "NaN":
-+ s = ".nan"
-+ }
-+ keyString = s
-+ case bool:
-+ if typedKey {
-+ keyString = "true"
-+ } else {
-+ keyString = "false"
-+ }
-+ default:
-+ return nil, fmt.Errorf("Unsupported map key of type: %s, key: %+#v, value: %+#v",
-+ reflect.TypeOf(k), k, v)
-+ }
-+
-+ // jsonTarget should be a struct or a map. If it's a struct, find
-+ // the field it's going to map to and pass its reflect.Value. If
-+ // it's a map, find the element type of the map and pass the
-+ // reflect.Value created from that type. If it's neither, just pass
-+ // nil - JSON conversion will error for us if it's a real issue.
-+ if jsonTarget != nil {
-+ t := *jsonTarget
-+ if t.Kind() == reflect.Struct {
-+ keyBytes := []byte(keyString)
-+ // Find the field that the JSON library would use.
-+ var f *field
-+ fields := cachedTypeFields(t.Type())
-+ for i := range fields {
-+ ff := &fields[i]
-+ if bytes.Equal(ff.nameBytes, keyBytes) {
-+ f = ff
-+ break
-+ }
-+ // Do case-insensitive comparison.
-+ if f == nil && ff.equalFold(ff.nameBytes, keyBytes) {
-+ f = ff
-+ }
-+ }
-+ if f != nil {
-+ // Find the reflect.Value of the most preferential
-+ // struct field.
-+ jtf := t.Field(f.index[0])
-+ strMap[keyString], err = convertToJSONableObject(v, &jtf)
-+ if err != nil {
-+ return nil, err
-+ }
-+ continue
-+ }
-+ } else if t.Kind() == reflect.Map {
-+ // Create a zero value of the map's element type to use as
-+ // the JSON target.
-+ jtv := reflect.Zero(t.Type().Elem())
-+ strMap[keyString], err = convertToJSONableObject(v, &jtv)
-+ if err != nil {
-+ return nil, err
-+ }
-+ continue
-+ }
-+ }
-+ strMap[keyString], err = convertToJSONableObject(v, nil)
-+ if err != nil {
-+ return nil, err
-+ }
-+ }
-+ return strMap, nil
-+ case []interface{}:
-+ // We need to recurse into arrays in case there are any
-+ // map[interface{}]interface{}'s inside and to convert any
-+ // numbers to strings.
-+
-+ // If jsonTarget is a slice (which it really should be), find the
-+ // thing it's going to map to. If it's not a slice, just pass nil
-+ // - JSON conversion will error for us if it's a real issue.
-+ var jsonSliceElemValue *reflect.Value
-+ if jsonTarget != nil {
-+ t := *jsonTarget
-+ if t.Kind() == reflect.Slice {
-+ // By default slices point to nil, but we need a reflect.Value
-+ // pointing to a value of the slice type, so we create one here.
-+ ev := reflect.Indirect(reflect.New(t.Type().Elem()))
-+ jsonSliceElemValue = &ev
-+ }
-+ }
-+
-+ // Make and use a new array.
-+ arr := make([]interface{}, len(typedYAMLObj))
-+ for i, v := range typedYAMLObj {
-+ arr[i], err = convertToJSONableObject(v, jsonSliceElemValue)
-+ if err != nil {
-+ return nil, err
-+ }
-+ }
-+ return arr, nil
-+ default:
-+ // If the target type is a string and the YAML type is a number,
-+ // convert the YAML type to a string.
-+ if jsonTarget != nil && (*jsonTarget).Kind() == reflect.String {
-+ // Based on my reading of go-yaml, it may return int, int64,
-+ // float64, or uint64.
-+ var s string
-+ switch typedVal := typedYAMLObj.(type) {
-+ case int:
-+ s = strconv.FormatInt(int64(typedVal), 10)
-+ case int64:
-+ s = strconv.FormatInt(typedVal, 10)
-+ case float64:
-+ s = strconv.FormatFloat(typedVal, 'g', -1, 32)
-+ case uint64:
-+ s = strconv.FormatUint(typedVal, 10)
-+ case bool:
-+ if typedVal {
-+ s = "true"
-+ } else {
-+ s = "false"
-+ }
-+ }
-+ if len(s) > 0 {
-+ yamlObj = interface{}(s)
-+ }
-+ }
-+ return yamlObj, nil
-+ }
-+
-+ return nil, nil
-+}
-diff --git a/vendor/github.com/ghodss/yaml/yaml_test.go b/vendor/github.com/ghodss/yaml/yaml_test.go
-new file mode 100644
-index 00000000000..505af453011
---- /dev/null
-+++ b/vendor/github.com/ghodss/yaml/yaml_test.go
-@@ -0,0 +1,287 @@
-+package yaml
-+
-+import (
-+ "fmt"
-+ "math"
-+ "reflect"
-+ "strconv"
-+ "testing"
-+)
-+
-+type MarshalTest struct {
-+ A string
-+ B int64
-+ // Would like to test float64, but it's not supported in go-yaml.
-+ // (See https://github.com/go-yaml/yaml/issues/83.)
-+ C float32
-+}
-+
-+func TestMarshal(t *testing.T) {
-+ f32String := strconv.FormatFloat(math.MaxFloat32, 'g', -1, 32)
-+ s := MarshalTest{"a", math.MaxInt64, math.MaxFloat32}
-+ e := []byte(fmt.Sprintf("A: a\nB: %d\nC: %s\n", math.MaxInt64, f32String))
-+
-+ y, err := Marshal(s)
-+ if err != nil {
-+ t.Errorf("error marshaling YAML: %v", err)
-+ }
-+
-+ if !reflect.DeepEqual(y, e) {
-+ t.Errorf("marshal YAML was unsuccessful, expected: %#v, got: %#v",
-+ string(e), string(y))
-+ }
-+}
-+
-+type UnmarshalString struct {
-+ A string
-+ True string
-+}
-+
-+type UnmarshalStringMap struct {
-+ A map[string]string
-+}
-+
-+type UnmarshalNestedString struct {
-+ A NestedString
-+}
-+
-+type NestedString struct {
-+ A string
-+}
-+
-+type UnmarshalSlice struct {
-+ A []NestedSlice
-+}
-+
-+type NestedSlice struct {
-+ B string
-+ C *string
-+}
-+
-+func TestUnmarshal(t *testing.T) {
-+ y := []byte("a: 1")
-+ s1 := UnmarshalString{}
-+ e1 := UnmarshalString{A: "1"}
-+ unmarshal(t, y, &s1, &e1)
-+
-+ y = []byte("a: true")
-+ s1 = UnmarshalString{}
-+ e1 = UnmarshalString{A: "true"}
-+ unmarshal(t, y, &s1, &e1)
-+
-+ y = []byte("true: 1")
-+ s1 = UnmarshalString{}
-+ e1 = UnmarshalString{True: "1"}
-+ unmarshal(t, y, &s1, &e1)
-+
-+ y = []byte("a:\n a: 1")
-+ s2 := UnmarshalNestedString{}
-+ e2 := UnmarshalNestedString{NestedString{"1"}}
-+ unmarshal(t, y, &s2, &e2)
-+
-+ y = []byte("a:\n - b: abc\n c: def\n - b: 123\n c: 456\n")
-+ s3 := UnmarshalSlice{}
-+ e3 := UnmarshalSlice{[]NestedSlice{NestedSlice{"abc", strPtr("def")}, NestedSlice{"123", strPtr("456")}}}
-+ unmarshal(t, y, &s3, &e3)
-+
-+ y = []byte("a:\n b: 1")
-+ s4 := UnmarshalStringMap{}
-+ e4 := UnmarshalStringMap{map[string]string{"b": "1"}}
-+ unmarshal(t, y, &s4, &e4)
-+
-+ y = []byte(`
-+a:
-+ name: TestA
-+b:
-+ name: TestB
-+`)
-+ type NamedThing struct {
-+ Name string `json:"name"`
-+ }
-+ s5 := map[string]*NamedThing{}
-+ e5 := map[string]*NamedThing{
-+ "a": &NamedThing{Name: "TestA"},
-+ "b": &NamedThing{Name: "TestB"},
-+ }
-+ unmarshal(t, y, &s5, &e5)
-+}
-+
-+func unmarshal(t *testing.T, y []byte, s, e interface{}) {
-+ err := Unmarshal(y, s)
-+ if err != nil {
-+ t.Errorf("error unmarshaling YAML: %v", err)
-+ }
-+
-+ if !reflect.DeepEqual(s, e) {
-+ t.Errorf("unmarshal YAML was unsuccessful, expected: %+#v, got: %+#v",
-+ e, s)
-+ }
-+}
-+
-+type Case struct {
-+ input string
-+ output string
-+ // By default we test that reversing the output == input. But if there is a
-+ // difference in the reversed output, you can optionally specify it here.
-+ reverse *string
-+}
-+
-+type RunType int
-+
-+const (
-+ RunTypeJSONToYAML RunType = iota
-+ RunTypeYAMLToJSON
-+)
-+
-+func TestJSONToYAML(t *testing.T) {
-+ cases := []Case{
-+ {
-+ `{"t":"a"}`,
-+ "t: a\n",
-+ nil,
-+ }, {
-+ `{"t":null}`,
-+ "t: null\n",
-+ nil,
-+ },
-+ }
-+
-+ runCases(t, RunTypeJSONToYAML, cases)
-+}
-+
-+func TestYAMLToJSON(t *testing.T) {
-+ cases := []Case{
-+ {
-+ "t: a\n",
-+ `{"t":"a"}`,
-+ nil,
-+ }, {
-+ "t: \n",
-+ `{"t":null}`,
-+ strPtr("t: null\n"),
-+ }, {
-+ "t: null\n",
-+ `{"t":null}`,
-+ nil,
-+ }, {
-+ "1: a\n",
-+ `{"1":"a"}`,
-+ strPtr("\"1\": a\n"),
-+ }, {
-+ "1000000000000000000000000000000000000: a\n",
-+ `{"1e+36":"a"}`,
-+ strPtr("\"1e+36\": a\n"),
-+ }, {
-+ "1e+36: a\n",
-+ `{"1e+36":"a"}`,
-+ strPtr("\"1e+36\": a\n"),
-+ }, {
-+ "\"1e+36\": a\n",
-+ `{"1e+36":"a"}`,
-+ nil,
-+ }, {
-+ "\"1.2\": a\n",
-+ `{"1.2":"a"}`,
-+ nil,
-+ }, {
-+ "- t: a\n",
-+ `[{"t":"a"}]`,
-+ nil,
-+ }, {
-+ "- t: a\n" +
-+ "- t:\n" +
-+ " b: 1\n" +
-+ " c: 2\n",
-+ `[{"t":"a"},{"t":{"b":1,"c":2}}]`,
-+ nil,
-+ }, {
-+ `[{t: a}, {t: {b: 1, c: 2}}]`,
-+ `[{"t":"a"},{"t":{"b":1,"c":2}}]`,
-+ strPtr("- t: a\n" +
-+ "- t:\n" +
-+ " b: 1\n" +
-+ " c: 2\n"),
-+ }, {
-+ "- t: \n",
-+ `[{"t":null}]`,
-+ strPtr("- t: null\n"),
-+ }, {
-+ "- t: null\n",
-+ `[{"t":null}]`,
-+ nil,
-+ },
-+ }
-+
-+ // Cases that should produce errors.
-+ _ = []Case{
-+ {
-+ "~: a",
-+ `{"null":"a"}`,
-+ nil,
-+ }, {
-+ "a: !!binary gIGC\n",
-+ "{\"a\":\"\x80\x81\x82\"}",
-+ nil,
-+ },
-+ }
-+
-+ runCases(t, RunTypeYAMLToJSON, cases)
-+}
-+
-+func runCases(t *testing.T, runType RunType, cases []Case) {
-+ var f func([]byte) ([]byte, error)
-+ var invF func([]byte) ([]byte, error)
-+ var msg string
-+ var invMsg string
-+ if runType == RunTypeJSONToYAML {
-+ f = JSONToYAML
-+ invF = YAMLToJSON
-+ msg = "JSON to YAML"
-+ invMsg = "YAML back to JSON"
-+ } else {
-+ f = YAMLToJSON
-+ invF = JSONToYAML
-+ msg = "YAML to JSON"
-+ invMsg = "JSON back to YAML"
-+ }
-+
-+ for _, c := range cases {
-+ // Convert the string.
-+ t.Logf("converting %s\n", c.input)
-+ output, err := f([]byte(c.input))
-+ if err != nil {
-+ t.Errorf("Failed to convert %s, input: `%s`, err: %v", msg, c.input, err)
-+ }
-+
-+ // Check it against the expected output.
-+ if string(output) != c.output {
-+ t.Errorf("Failed to convert %s, input: `%s`, expected `%s`, got `%s`",
-+ msg, c.input, c.output, string(output))
-+ }
-+
-+ // Set the string that we will compare the reversed output to.
-+ reverse := c.input
-+ // If a special reverse string was specified, use that instead.
-+ if c.reverse != nil {
-+ reverse = *c.reverse
-+ }
-+
-+ // Reverse the output.
-+ input, err := invF(output)
-+ if err != nil {
-+ t.Errorf("Failed to convert %s, input: `%s`, err: %v", invMsg, string(output), err)
-+ }
-+
-+ // Check the reverse is equal to the input (or to *c.reverse).
-+ if string(input) != reverse {
-+ t.Errorf("Failed to convert %s, input: `%s`, expected `%s`, got `%s`",
-+ invMsg, string(output), reverse, string(input))
-+ }
-+ }
-+
-+}
-+
-+// To be able to easily fill in the *Case.reverse string above.
-+func strPtr(s string) *string {
-+ return &s
-+}
-diff --git a/vendor/github.com/go-sql-driver/mysql/.github/CONTRIBUTING.md b/vendor/github.com/go-sql-driver/mysql/.github/CONTRIBUTING.md
-new file mode 100644
-index 00000000000..8fe16bcb497
---- /dev/null
-+++ b/vendor/github.com/go-sql-driver/mysql/.github/CONTRIBUTING.md
-@@ -0,0 +1,23 @@
-+# Contributing Guidelines
-+
-+## Reporting Issues
-+
-+Before creating a new Issue, please check first if a similar Issue [already exists](https://github.com/go-sql-driver/mysql/issues?state=open) or was [recently closed](https://github.com/go-sql-driver/mysql/issues?direction=desc&page=1&sort=updated&state=closed).
-+
-+## Contributing Code
-+
-+By contributing to this project, you share your code under the Mozilla Public License 2, as specified in the LICENSE file.
-+Don't forget to add yourself to the AUTHORS file.
-+
-+### Code Review
-+
-+Everyone is invited to review and comment on pull requests.
-+If it looks fine to you, comment with "LGTM" (Looks good to me).
-+
-+If changes are required, notice the reviewers with "PTAL" (Please take another look) after committing the fixes.
-+
-+Before merging the Pull Request, at least one [team member](https://github.com/go-sql-driver?tab=members) must have commented with "LGTM".
-+
-+## Development Ideas
-+
-+If you are looking for ideas for code contributions, please check our [Development Ideas](https://github.com/go-sql-driver/mysql/wiki/Development-Ideas) Wiki page.
-diff --git a/vendor/github.com/go-sql-driver/mysql/.github/ISSUE_TEMPLATE.md b/vendor/github.com/go-sql-driver/mysql/.github/ISSUE_TEMPLATE.md
-new file mode 100644
-index 00000000000..d9771f1ddc4
---- /dev/null
-+++ b/vendor/github.com/go-sql-driver/mysql/.github/ISSUE_TEMPLATE.md
-@@ -0,0 +1,21 @@
-+### Issue description
-+Tell us what should happen and what happens instead
-+
-+### Example code
-+```go
-+If possible, please enter some example code here to reproduce the issue.
-+```
-+
-+### Error log
-+```
-+If you have an error log, please paste it here.
-+```
-+
-+### Configuration
-+*Driver version (or git SHA):*
-+
-+*Go version:* run `go version` in your console
-+
-+*Server version:* E.g. MySQL 5.6, MariaDB 10.0.20
-+
-+*Server OS:* E.g. Debian 8.1 (Jessie), Windows 10
-diff --git a/vendor/github.com/go-sql-driver/mysql/.github/PULL_REQUEST_TEMPLATE.md b/vendor/github.com/go-sql-driver/mysql/.github/PULL_REQUEST_TEMPLATE.md
-new file mode 100644
-index 00000000000..6f5c7ebeb73
---- /dev/null
-+++ b/vendor/github.com/go-sql-driver/mysql/.github/PULL_REQUEST_TEMPLATE.md
-@@ -0,0 +1,9 @@
-+### Description
-+Please explain the changes you made here.
-+
-+### Checklist
-+- [ ] Code compiles correctly
-+- [ ] Created tests which fail without the change (if possible)
-+- [ ] All tests passing
-+- [ ] Extended the README / documentation, if necessary
-+- [ ] Added myself / the copyright holder to the AUTHORS file
-diff --git a/vendor/github.com/go-sql-driver/mysql/.gitignore b/vendor/github.com/go-sql-driver/mysql/.gitignore
-new file mode 100644
-index 00000000000..2de28da1663
---- /dev/null
-+++ b/vendor/github.com/go-sql-driver/mysql/.gitignore
-@@ -0,0 +1,9 @@
-+.DS_Store
-+.DS_Store?
-+._*
-+.Spotlight-V100
-+.Trashes
-+Icon?
-+ehthumbs.db
-+Thumbs.db
-+.idea
-diff --git a/vendor/github.com/go-sql-driver/mysql/.travis.yml b/vendor/github.com/go-sql-driver/mysql/.travis.yml
-new file mode 100644
-index 00000000000..9d313920747
---- /dev/null
-+++ b/vendor/github.com/go-sql-driver/mysql/.travis.yml
-@@ -0,0 +1,129 @@
-+sudo: false
-+language: go
-+go:
-+ - 1.9.x
-+ - 1.10.x
-+ - 1.11.x
-+ - 1.12.x
-+ - master
-+
-+before_install:
-+ - go get golang.org/x/tools/cmd/cover
-+ - go get github.com/mattn/goveralls
-+
-+before_script:
-+ - echo -e "[server]\ninnodb_log_file_size=256MB\ninnodb_buffer_pool_size=512MB\nmax_allowed_packet=16MB" | sudo tee -a /etc/mysql/my.cnf
-+ - sudo service mysql restart
-+ - .travis/wait_mysql.sh
-+ - mysql -e 'create database gotest;'
-+
-+matrix:
-+ include:
-+ - env: DB=MYSQL8
-+ sudo: required
-+ dist: trusty
-+ go: 1.10.x
-+ services:
-+ - docker
-+ before_install:
-+ - go get golang.org/x/tools/cmd/cover
-+ - go get github.com/mattn/goveralls
-+ - docker pull mysql:8.0
-+ - docker run -d -p 127.0.0.1:3307:3306 --name mysqld -e MYSQL_DATABASE=gotest -e MYSQL_USER=gotest -e MYSQL_PASSWORD=secret -e MYSQL_ROOT_PASSWORD=verysecret
-+ mysql:8.0 --innodb_log_file_size=256MB --innodb_buffer_pool_size=512MB --max_allowed_packet=16MB --local-infile=1
-+ - cp .travis/docker.cnf ~/.my.cnf
-+ - .travis/wait_mysql.sh
-+ before_script:
-+ - export MYSQL_TEST_USER=gotest
-+ - export MYSQL_TEST_PASS=secret
-+ - export MYSQL_TEST_ADDR=127.0.0.1:3307
-+ - export MYSQL_TEST_CONCURRENT=1
-+
-+ - env: DB=MYSQL57
-+ sudo: required
-+ dist: trusty
-+ go: 1.10.x
-+ services:
-+ - docker
-+ before_install:
-+ - go get golang.org/x/tools/cmd/cover
-+ - go get github.com/mattn/goveralls
-+ - docker pull mysql:5.7
-+ - docker run -d -p 127.0.0.1:3307:3306 --name mysqld -e MYSQL_DATABASE=gotest -e MYSQL_USER=gotest -e MYSQL_PASSWORD=secret -e MYSQL_ROOT_PASSWORD=verysecret
-+ mysql:5.7 --innodb_log_file_size=256MB --innodb_buffer_pool_size=512MB --max_allowed_packet=16MB --local-infile=1
-+ - cp .travis/docker.cnf ~/.my.cnf
-+ - .travis/wait_mysql.sh
-+ before_script:
-+ - export MYSQL_TEST_USER=gotest
-+ - export MYSQL_TEST_PASS=secret
-+ - export MYSQL_TEST_ADDR=127.0.0.1:3307
-+ - export MYSQL_TEST_CONCURRENT=1
-+
-+ - env: DB=MARIA55
-+ sudo: required
-+ dist: trusty
-+ go: 1.10.x
-+ services:
-+ - docker
-+ before_install:
-+ - go get golang.org/x/tools/cmd/cover
-+ - go get github.com/mattn/goveralls
-+ - docker pull mariadb:5.5
-+ - docker run -d -p 127.0.0.1:3307:3306 --name mysqld -e MYSQL_DATABASE=gotest -e MYSQL_USER=gotest -e MYSQL_PASSWORD=secret -e MYSQL_ROOT_PASSWORD=verysecret
-+ mariadb:5.5 --innodb_log_file_size=256MB --innodb_buffer_pool_size=512MB --max_allowed_packet=16MB --local-infile=1
-+ - cp .travis/docker.cnf ~/.my.cnf
-+ - .travis/wait_mysql.sh
-+ before_script:
-+ - export MYSQL_TEST_USER=gotest
-+ - export MYSQL_TEST_PASS=secret
-+ - export MYSQL_TEST_ADDR=127.0.0.1:3307
-+ - export MYSQL_TEST_CONCURRENT=1
-+
-+ - env: DB=MARIA10_1
-+ sudo: required
-+ dist: trusty
-+ go: 1.10.x
-+ services:
-+ - docker
-+ before_install:
-+ - go get golang.org/x/tools/cmd/cover
-+ - go get github.com/mattn/goveralls
-+ - docker pull mariadb:10.1
-+ - docker run -d -p 127.0.0.1:3307:3306 --name mysqld -e MYSQL_DATABASE=gotest -e MYSQL_USER=gotest -e MYSQL_PASSWORD=secret -e MYSQL_ROOT_PASSWORD=verysecret
-+ mariadb:10.1 --innodb_log_file_size=256MB --innodb_buffer_pool_size=512MB --max_allowed_packet=16MB --local-infile=1
-+ - cp .travis/docker.cnf ~/.my.cnf
-+ - .travis/wait_mysql.sh
-+ before_script:
-+ - export MYSQL_TEST_USER=gotest
-+ - export MYSQL_TEST_PASS=secret
-+ - export MYSQL_TEST_ADDR=127.0.0.1:3307
-+ - export MYSQL_TEST_CONCURRENT=1
-+
-+ - os: osx
-+ osx_image: xcode10.1
-+ addons:
-+ homebrew:
-+ packages:
-+ - mysql
-+ update: true
-+ go: 1.12.x
-+ before_install:
-+ - go get golang.org/x/tools/cmd/cover
-+ - go get github.com/mattn/goveralls
-+ before_script:
-+ - echo -e "[server]\ninnodb_log_file_size=256MB\ninnodb_buffer_pool_size=512MB\nmax_allowed_packet=16MB\nlocal_infile=1" >> /usr/local/etc/my.cnf
-+ - mysql.server start
-+ - mysql -uroot -e 'CREATE USER gotest IDENTIFIED BY "secret"'
-+ - mysql -uroot -e 'GRANT ALL ON *.* TO gotest'
-+ - mysql -uroot -e 'create database gotest;'
-+ - export MYSQL_TEST_USER=gotest
-+ - export MYSQL_TEST_PASS=secret
-+ - export MYSQL_TEST_ADDR=127.0.0.1:3306
-+ - export MYSQL_TEST_CONCURRENT=1
-+
-+script:
-+ - go test -v -covermode=count -coverprofile=coverage.out
-+ - go vet ./...
-+ - .travis/gofmt.sh
-+after_script:
-+ - $HOME/gopath/bin/goveralls -coverprofile=coverage.out -service=travis-ci
-diff --git a/vendor/github.com/go-sql-driver/mysql/.travis/docker.cnf b/vendor/github.com/go-sql-driver/mysql/.travis/docker.cnf
-new file mode 100644
-index 00000000000..e57754e5afc
---- /dev/null
-+++ b/vendor/github.com/go-sql-driver/mysql/.travis/docker.cnf
-@@ -0,0 +1,5 @@
-+[client]
-+user = gotest
-+password = secret
-+host = 127.0.0.1
-+port = 3307
-diff --git a/vendor/github.com/go-sql-driver/mysql/.travis/gofmt.sh b/vendor/github.com/go-sql-driver/mysql/.travis/gofmt.sh
-new file mode 100755
-index 00000000000..9bf0d16847b
---- /dev/null
-+++ b/vendor/github.com/go-sql-driver/mysql/.travis/gofmt.sh
-@@ -0,0 +1,7 @@
-+#!/bin/bash
-+set -ev
-+
-+# Only check for go1.10+ since the gofmt style changed
-+if [[ $(go version) =~ go1\.([0-9]+) ]] && ((${BASH_REMATCH[1]} >= 10)); then
-+ test -z "$(gofmt -d -s . | tee /dev/stderr)"
-+fi
-diff --git a/vendor/github.com/go-sql-driver/mysql/.travis/wait_mysql.sh b/vendor/github.com/go-sql-driver/mysql/.travis/wait_mysql.sh
-new file mode 100755
-index 00000000000..e87993e5723
---- /dev/null
-+++ b/vendor/github.com/go-sql-driver/mysql/.travis/wait_mysql.sh
-@@ -0,0 +1,8 @@
-+#!/bin/sh
-+while :
-+do
-+ if mysql -e 'select version()' 2>&1 | grep 'version()\|ERROR 2059 (HY000):'; then
-+ break
-+ fi
-+ sleep 3
-+done
-diff --git a/vendor/github.com/go-sql-driver/mysql/AUTHORS b/vendor/github.com/go-sql-driver/mysql/AUTHORS
-new file mode 100644
-index 00000000000..9765b53481c
---- /dev/null
-+++ b/vendor/github.com/go-sql-driver/mysql/AUTHORS
-@@ -0,0 +1,103 @@
-+# This is the official list of Go-MySQL-Driver authors for copyright purposes.
-+
-+# If you are submitting a patch, please add your name or the name of the
-+# organization which holds the copyright to this list in alphabetical order.
-+
-+# Names should be added to this file as
-+# Name
-+# The email address is not required for organizations.
-+# Please keep the list sorted.
-+
-+
-+# Individual Persons
-+
-+Aaron Hopkins
-+Achille Roussel
-+Alexey Palazhchenko
-+Andrew Reid
-+Arne Hormann
-+Asta Xie
-+Bulat Gaifullin
-+Carlos Nieto
-+Chris Moos
-+Craig Wilson
-+Daniel Montoya
-+Daniel Nichter
-+Daniël van Eeden
-+Dave Protasowski
-+DisposaBoy
-+Egor Smolyakov
-+Erwan Martin
-+Evan Shaw
-+Frederick Mayle
-+Gustavo Kristic
-+Hajime Nakagami
-+Hanno Braun
-+Henri Yandell
-+Hirotaka Yamamoto
-+Huyiguang
-+ICHINOSE Shogo
-+Ilia Cimpoes
-+INADA Naoki
-+Jacek Szwec
-+James Harr
-+Jeff Hodges
-+Jeffrey Charles
-+Jerome Meyer
-+Jian Zhen
-+Joshua Prunier
-+Julien Lefevre
-+Julien Schmidt
-+Justin Li
-+Justin Nuß
-+Kamil Dziedzic
-+Kevin Malachowski
-+Kieron Woodhouse
-+Lennart Rudolph
-+Leonardo YongUk Kim
-+Linh Tran Tuan
-+Lion Yang
-+Luca Looz
-+Lucas Liu
-+Luke Scott
-+Maciej Zimnoch
-+Michael Woolnough
-+Nicola Peduzzi
-+Olivier Mengué
-+oscarzhao
-+Paul Bonser
-+Peter Schultz
-+Rebecca Chin
-+Reed Allman
-+Richard Wilkes
-+Robert Russell
-+Runrioter Wung
-+Shuode Li
-+Simon J Mudd
-+Soroush Pour
-+Stan Putrya
-+Stanley Gunawan
-+Steven Hartland
-+Thomas Wodarek
-+Tim Ruffles
-+Tom Jenkinson
-+Vladimir Kovpak
-+Xiangyu Hu
-+Xiaobing Jiang
-+Xiuming Chen
-+Zhenye Xie
-+
-+# Organizations
-+
-+Barracuda Networks, Inc.
-+Counting Ltd.
-+DigitalOcean Inc.
-+Facebook Inc.
-+GitHub Inc.
-+Google Inc.
-+InfoSum Ltd.
-+Keybase Inc.
-+Multiplay Ltd.
-+Percona LLC
-+Pivotal Inc.
-+Stripe Inc.
-diff --git a/vendor/github.com/go-sql-driver/mysql/CHANGELOG.md b/vendor/github.com/go-sql-driver/mysql/CHANGELOG.md
-new file mode 100644
-index 00000000000..2d87d74c971
---- /dev/null
-+++ b/vendor/github.com/go-sql-driver/mysql/CHANGELOG.md
-@@ -0,0 +1,167 @@
-+## Version 1.4 (2018-06-03)
-+
-+Changes:
-+
-+ - Documentation fixes (#530, #535, #567)
-+ - Refactoring (#575, #579, #580, #581, #603, #615, #704)
-+ - Cache column names (#444)
-+ - Sort the DSN parameters in DSNs generated from a config (#637)
-+ - Allow native password authentication by default (#644)
-+ - Use the default port if it is missing in the DSN (#668)
-+ - Removed the `strict` mode (#676)
-+ - Do not query `max_allowed_packet` by default (#680)
-+ - Dropped support Go 1.6 and lower (#696)
-+ - Updated `ConvertValue()` to match the database/sql/driver implementation (#760)
-+ - Document the usage of `0000-00-00T00:00:00` as the time.Time zero value (#783)
-+ - Improved the compatibility of the authentication system (#807)
-+
-+New Features:
-+
-+ - Multi-Results support (#537)
-+ - `rejectReadOnly` DSN option (#604)
-+ - `context.Context` support (#608, #612, #627, #761)
-+ - Transaction isolation level support (#619, #744)
-+ - Read-Only transactions support (#618, #634)
-+ - `NewConfig` function which initializes a config with default values (#679)
-+ - Implemented the `ColumnType` interfaces (#667, #724)
-+ - Support for custom string types in `ConvertValue` (#623)
-+ - Implemented `NamedValueChecker`, improving support for uint64 with high bit set (#690, #709, #710)
-+ - `caching_sha2_password` authentication plugin support (#794, #800, #801, #802)
-+ - Implemented `driver.SessionResetter` (#779)
-+ - `sha256_password` authentication plugin support (#808)
-+
-+Bugfixes:
-+
-+ - Use the DSN hostname as TLS default ServerName if `tls=true` (#564, #718)
-+ - Fixed LOAD LOCAL DATA INFILE for empty files (#590)
-+ - Removed columns definition cache since it sometimes cached invalid data (#592)
-+ - Don't mutate registered TLS configs (#600)
-+ - Make RegisterTLSConfig concurrency-safe (#613)
-+ - Handle missing auth data in the handshake packet correctly (#646)
-+ - Do not retry queries when data was written to avoid data corruption (#302, #736)
-+ - Cache the connection pointer for error handling before invalidating it (#678)
-+ - Fixed imports for appengine/cloudsql (#700)
-+ - Fix sending STMT_LONG_DATA for 0 byte data (#734)
-+ - Set correct capacity for []bytes read from length-encoded strings (#766)
-+ - Make RegisterDial concurrency-safe (#773)
-+
-+
-+## Version 1.3 (2016-12-01)
-+
-+Changes:
-+
-+ - Go 1.1 is no longer supported
-+ - Use decimals fields in MySQL to format time types (#249)
-+ - Buffer optimizations (#269)
-+ - TLS ServerName defaults to the host (#283)
-+ - Refactoring (#400, #410, #437)
-+ - Adjusted documentation for second generation CloudSQL (#485)
-+ - Documented DSN system var quoting rules (#502)
-+ - Made statement.Close() calls idempotent to avoid errors in Go 1.6+ (#512)
-+
-+New Features:
-+
-+ - Enable microsecond resolution on TIME, DATETIME and TIMESTAMP (#249)
-+ - Support for returning table alias on Columns() (#289, #359, #382)
-+ - Placeholder interpolation, can be actived with the DSN parameter `interpolateParams=true` (#309, #318, #490)
-+ - Support for uint64 parameters with high bit set (#332, #345)
-+ - Cleartext authentication plugin support (#327)
-+ - Exported ParseDSN function and the Config struct (#403, #419, #429)
-+ - Read / Write timeouts (#401)
-+ - Support for JSON field type (#414)
-+ - Support for multi-statements and multi-results (#411, #431)
-+ - DSN parameter to set the driver-side max_allowed_packet value manually (#489)
-+ - Native password authentication plugin support (#494, #524)
-+
-+Bugfixes:
-+
-+ - Fixed handling of queries without columns and rows (#255)
-+ - Fixed a panic when SetKeepAlive() failed (#298)
-+ - Handle ERR packets while reading rows (#321)
-+ - Fixed reading NULL length-encoded integers in MySQL 5.6+ (#349)
-+ - Fixed absolute paths support in LOAD LOCAL DATA INFILE (#356)
-+ - Actually zero out bytes in handshake response (#378)
-+ - Fixed race condition in registering LOAD DATA INFILE handler (#383)
-+ - Fixed tests with MySQL 5.7.9+ (#380)
-+ - QueryUnescape TLS config names (#397)
-+ - Fixed "broken pipe" error by writing to closed socket (#390)
-+ - Fixed LOAD LOCAL DATA INFILE buffering (#424)
-+ - Fixed parsing of floats into float64 when placeholders are used (#434)
-+ - Fixed DSN tests with Go 1.7+ (#459)
-+ - Handle ERR packets while waiting for EOF (#473)
-+ - Invalidate connection on error while discarding additional results (#513)
-+ - Allow terminating packets of length 0 (#516)
-+
-+
-+## Version 1.2 (2014-06-03)
-+
-+Changes:
-+
-+ - We switched back to a "rolling release". `go get` installs the current master branch again
-+ - Version v1 of the driver will not be maintained anymore. Go 1.0 is no longer supported by this driver
-+ - Exported errors to allow easy checking from application code
-+ - Enabled TCP Keepalives on TCP connections
-+ - Optimized INFILE handling (better buffer size calculation, lazy init, ...)
-+ - The DSN parser also checks for a missing separating slash
-+ - Faster binary date / datetime to string formatting
-+ - Also exported the MySQLWarning type
-+ - mysqlConn.Close returns the first error encountered instead of ignoring all errors
-+ - writePacket() automatically writes the packet size to the header
-+ - readPacket() uses an iterative approach instead of the recursive approach to merge splitted packets
-+
-+New Features:
-+
-+ - `RegisterDial` allows the usage of a custom dial function to establish the network connection
-+ - Setting the connection collation is possible with the `collation` DSN parameter. This parameter should be preferred over the `charset` parameter
-+ - Logging of critical errors is configurable with `SetLogger`
-+ - Google CloudSQL support
-+
-+Bugfixes:
-+
-+ - Allow more than 32 parameters in prepared statements
-+ - Various old_password fixes
-+ - Fixed TestConcurrent test to pass Go's race detection
-+ - Fixed appendLengthEncodedInteger for large numbers
-+ - Renamed readLengthEnodedString to readLengthEncodedString and skipLengthEnodedString to skipLengthEncodedString (fixed typo)
-+
-+
-+## Version 1.1 (2013-11-02)
-+
-+Changes:
-+
-+ - Go-MySQL-Driver now requires Go 1.1
-+ - Connections now use the collation `utf8_general_ci` by default. Adding `&charset=UTF8` to the DSN should not be necessary anymore
-+ - Made closing rows and connections error tolerant. This allows for example deferring rows.Close() without checking for errors
-+ - `[]byte(nil)` is now treated as a NULL value. Before, it was treated like an empty string / `[]byte("")`
-+ - DSN parameter values must now be url.QueryEscape'ed. This allows text values to contain special characters, such as '&'.
-+ - Use the IO buffer also for writing. This results in zero allocations (by the driver) for most queries
-+ - Optimized the buffer for reading
-+ - stmt.Query now caches column metadata
-+ - New Logo
-+ - Changed the copyright header to include all contributors
-+ - Improved the LOAD INFILE documentation
-+ - The driver struct is now exported to make the driver directly accessible
-+ - Refactored the driver tests
-+ - Added more benchmarks and moved all to a separate file
-+ - Other small refactoring
-+
-+New Features:
-+
-+ - Added *old_passwords* support: Required in some cases, but must be enabled by adding `allowOldPasswords=true` to the DSN since it is insecure
-+ - Added a `clientFoundRows` parameter: Return the number of matching rows instead of the number of rows changed on UPDATEs
-+ - Added TLS/SSL support: Use a TLS/SSL encrypted connection to the server. Custom TLS configs can be registered and used
-+
-+Bugfixes:
-+
-+ - Fixed MySQL 4.1 support: MySQL 4.1 sends packets with lengths which differ from the specification
-+ - Convert to DB timezone when inserting `time.Time`
-+ - Splitted packets (more than 16MB) are now merged correctly
-+ - Fixed false positive `io.EOF` errors when the data was fully read
-+ - Avoid panics on reuse of closed connections
-+ - Fixed empty string producing false nil values
-+ - Fixed sign byte for positive TIME fields
-+
-+
-+## Version 1.0 (2013-05-14)
-+
-+Initial Release
-diff --git a/vendor/github.com/go-sql-driver/mysql/LICENSE b/vendor/github.com/go-sql-driver/mysql/LICENSE
-new file mode 100644
-index 00000000000..a612ad9813b
---- /dev/null
-+++ b/vendor/github.com/go-sql-driver/mysql/LICENSE
-@@ -0,0 +1,373 @@
-+Mozilla Public License Version 2.0
-+==================================
-+
-+1. Definitions
-+--------------
-+
-+1.1. "Contributor"
-+ means each individual or legal entity that creates, contributes to
-+ the creation of, or owns Covered Software.
-+
-+1.2. "Contributor Version"
-+ means the combination of the Contributions of others (if any) used
-+ by a Contributor and that particular Contributor's Contribution.
-+
-+1.3. "Contribution"
-+ means Covered Software of a particular Contributor.
-+
-+1.4. "Covered Software"
-+ means Source Code Form to which the initial Contributor has attached
-+ the notice in Exhibit A, the Executable Form of such Source Code
-+ Form, and Modifications of such Source Code Form, in each case
-+ including portions thereof.
-+
-+1.5. "Incompatible With Secondary Licenses"
-+ means
-+
-+ (a) that the initial Contributor has attached the notice described
-+ in Exhibit B to the Covered Software; or
-+
-+ (b) that the Covered Software was made available under the terms of
-+ version 1.1 or earlier of the License, but not also under the
-+ terms of a Secondary License.
-+
-+1.6. "Executable Form"
-+ means any form of the work other than Source Code Form.
-+
-+1.7. "Larger Work"
-+ means a work that combines Covered Software with other material, in
-+ a separate file or files, that is not Covered Software.
-+
-+1.8. "License"
-+ means this document.
-+
-+1.9. "Licensable"
-+ means having the right to grant, to the maximum extent possible,
-+ whether at the time of the initial grant or subsequently, any and
-+ all of the rights conveyed by this License.
-+
-+1.10. "Modifications"
-+ means any of the following:
-+
-+ (a) any file in Source Code Form that results from an addition to,
-+ deletion from, or modification of the contents of Covered
-+ Software; or
-+
-+ (b) any new file in Source Code Form that contains any Covered
-+ Software.
-+
-+1.11. "Patent Claims" of a Contributor
-+ means any patent claim(s), including without limitation, method,
-+ process, and apparatus claims, in any patent Licensable by such
-+ Contributor that would be infringed, but for the grant of the
-+ License, by the making, using, selling, offering for sale, having
-+ made, import, or transfer of either its Contributions or its
-+ Contributor Version.
-+
-+1.12. "Secondary License"
-+ means either the GNU General Public License, Version 2.0, the GNU
-+ Lesser General Public License, Version 2.1, the GNU Affero General
-+ Public License, Version 3.0, or any later versions of those
-+ licenses.
-+
-+1.13. "Source Code Form"
-+ means the form of the work preferred for making modifications.
-+
-+1.14. "You" (or "Your")
-+ means an individual or a legal entity exercising rights under this
-+ License. For legal entities, "You" includes any entity that
-+ controls, is controlled by, or is under common control with You. For
-+ purposes of this definition, "control" means (a) the power, direct
-+ or indirect, to cause the direction or management of such entity,
-+ whether by contract or otherwise, or (b) ownership of more than
-+ fifty percent (50%) of the outstanding shares or beneficial
-+ ownership of such entity.
-+
-+2. License Grants and Conditions
-+--------------------------------
-+
-+2.1. Grants
-+
-+Each Contributor hereby grants You a world-wide, royalty-free,
-+non-exclusive license:
-+
-+(a) under intellectual property rights (other than patent or trademark)
-+ Licensable by such Contributor to use, reproduce, make available,
-+ modify, display, perform, distribute, and otherwise exploit its
-+ Contributions, either on an unmodified basis, with Modifications, or
-+ as part of a Larger Work; and
-+
-+(b) under Patent Claims of such Contributor to make, use, sell, offer
-+ for sale, have made, import, and otherwise transfer either its
-+ Contributions or its Contributor Version.
-+
-+2.2. Effective Date
-+
-+The licenses granted in Section 2.1 with respect to any Contribution
-+become effective for each Contribution on the date the Contributor first
-+distributes such Contribution.
-+
-+2.3. Limitations on Grant Scope
-+
-+The licenses granted in this Section 2 are the only rights granted under
-+this License. No additional rights or licenses will be implied from the
-+distribution or licensing of Covered Software under this License.
-+Notwithstanding Section 2.1(b) above, no patent license is granted by a
-+Contributor:
-+
-+(a) for any code that a Contributor has removed from Covered Software;
-+ or
-+
-+(b) for infringements caused by: (i) Your and any other third party's
-+ modifications of Covered Software, or (ii) the combination of its
-+ Contributions with other software (except as part of its Contributor
-+ Version); or
-+
-+(c) under Patent Claims infringed by Covered Software in the absence of
-+ its Contributions.
-+
-+This License does not grant any rights in the trademarks, service marks,
-+or logos of any Contributor (except as may be necessary to comply with
-+the notice requirements in Section 3.4).
-+
-+2.4. Subsequent Licenses
-+
-+No Contributor makes additional grants as a result of Your choice to
-+distribute the Covered Software under a subsequent version of this
-+License (see Section 10.2) or under the terms of a Secondary License (if
-+permitted under the terms of Section 3.3).
-+
-+2.5. Representation
-+
-+Each Contributor represents that the Contributor believes its
-+Contributions are its original creation(s) or it has sufficient rights
-+to grant the rights to its Contributions conveyed by this License.
-+
-+2.6. Fair Use
-+
-+This License is not intended to limit any rights You have under
-+applicable copyright doctrines of fair use, fair dealing, or other
-+equivalents.
-+
-+2.7. Conditions
-+
-+Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted
-+in Section 2.1.
-+
-+3. Responsibilities
-+-------------------
-+
-+3.1. Distribution of Source Form
-+
-+All distribution of Covered Software in Source Code Form, including any
-+Modifications that You create or to which You contribute, must be under
-+the terms of this License. You must inform recipients that the Source
-+Code Form of the Covered Software is governed by the terms of this
-+License, and how they can obtain a copy of this License. You may not
-+attempt to alter or restrict the recipients' rights in the Source Code
-+Form.
-+
-+3.2. Distribution of Executable Form
-+
-+If You distribute Covered Software in Executable Form then:
-+
-+(a) such Covered Software must also be made available in Source Code
-+ Form, as described in Section 3.1, and You must inform recipients of
-+ the Executable Form how they can obtain a copy of such Source Code
-+ Form by reasonable means in a timely manner, at a charge no more
-+ than the cost of distribution to the recipient; and
-+
-+(b) You may distribute such Executable Form under the terms of this
-+ License, or sublicense it under different terms, provided that the
-+ license for the Executable Form does not attempt to limit or alter
-+ the recipients' rights in the Source Code Form under this License.
-+
-+3.3. Distribution of a Larger Work
-+
-+You may create and distribute a Larger Work under terms of Your choice,
-+provided that You also comply with the requirements of this License for
-+the Covered Software. If the Larger Work is a combination of Covered
-+Software with a work governed by one or more Secondary Licenses, and the
-+Covered Software is not Incompatible With Secondary Licenses, this
-+License permits You to additionally distribute such Covered Software
-+under the terms of such Secondary License(s), so that the recipient of
-+the Larger Work may, at their option, further distribute the Covered
-+Software under the terms of either this License or such Secondary
-+License(s).
-+
-+3.4. Notices
-+
-+You may not remove or alter the substance of any license notices
-+(including copyright notices, patent notices, disclaimers of warranty,
-+or limitations of liability) contained within the Source Code Form of
-+the Covered Software, except that You may alter any license notices to
-+the extent required to remedy known factual inaccuracies.
-+
-+3.5. Application of Additional Terms
-+
-+You may choose to offer, and to charge a fee for, warranty, support,
-+indemnity or liability obligations to one or more recipients of Covered
-+Software. However, You may do so only on Your own behalf, and not on
-+behalf of any Contributor. You must make it absolutely clear that any
-+such warranty, support, indemnity, or liability obligation is offered by
-+You alone, and You hereby agree to indemnify every Contributor for any
-+liability incurred by such Contributor as a result of warranty, support,
-+indemnity or liability terms You offer. You may include additional
-+disclaimers of warranty and limitations of liability specific to any
-+jurisdiction.
-+
-+4. Inability to Comply Due to Statute or Regulation
-+---------------------------------------------------
-+
-+If it is impossible for You to comply with any of the terms of this
-+License with respect to some or all of the Covered Software due to
-+statute, judicial order, or regulation then You must: (a) comply with
-+the terms of this License to the maximum extent possible; and (b)
-+describe the limitations and the code they affect. Such description must
-+be placed in a text file included with all distributions of the Covered
-+Software under this License. Except to the extent prohibited by statute
-+or regulation, such description must be sufficiently detailed for a
-+recipient of ordinary skill to be able to understand it.
-+
-+5. Termination
-+--------------
-+
-+5.1. The rights granted under this License will terminate automatically
-+if You fail to comply with any of its terms. However, if You become
-+compliant, then the rights granted under this License from a particular
-+Contributor are reinstated (a) provisionally, unless and until such
-+Contributor explicitly and finally terminates Your grants, and (b) on an
-+ongoing basis, if such Contributor fails to notify You of the
-+non-compliance by some reasonable means prior to 60 days after You have
-+come back into compliance. Moreover, Your grants from a particular
-+Contributor are reinstated on an ongoing basis if such Contributor
-+notifies You of the non-compliance by some reasonable means, this is the
-+first time You have received notice of non-compliance with this License
-+from such Contributor, and You become compliant prior to 30 days after
-+Your receipt of the notice.
-+
-+5.2. If You initiate litigation against any entity by asserting a patent
-+infringement claim (excluding declaratory judgment actions,
-+counter-claims, and cross-claims) alleging that a Contributor Version
-+directly or indirectly infringes any patent, then the rights granted to
-+You by any and all Contributors for the Covered Software under Section
-+2.1 of this License shall terminate.
-+
-+5.3. In the event of termination under Sections 5.1 or 5.2 above, all
-+end user license agreements (excluding distributors and resellers) which
-+have been validly granted by You or Your distributors under this License
-+prior to termination shall survive termination.
-+
-+************************************************************************
-+* *
-+* 6. Disclaimer of Warranty *
-+* ------------------------- *
-+* *
-+* Covered Software is provided under this License on an "as is" *
-+* basis, without warranty of any kind, either expressed, implied, or *
-+* statutory, including, without limitation, warranties that the *
-+* Covered Software is free of defects, merchantable, fit for a *
-+* particular purpose or non-infringing. The entire risk as to the *
-+* quality and performance of the Covered Software is with You. *
-+* Should any Covered Software prove defective in any respect, You *
-+* (not any Contributor) assume the cost of any necessary servicing, *
-+* repair, or correction. This disclaimer of warranty constitutes an *
-+* essential part of this License. No use of any Covered Software is *
-+* authorized under this License except under this disclaimer. *
-+* *
-+************************************************************************
-+
-+************************************************************************
-+* *
-+* 7. Limitation of Liability *
-+* -------------------------- *
-+* *
-+* Under no circumstances and under no legal theory, whether tort *
-+* (including negligence), contract, or otherwise, shall any *
-+* Contributor, or anyone who distributes Covered Software as *
-+* permitted above, be liable to You for any direct, indirect, *
-+* special, incidental, or consequential damages of any character *
-+* including, without limitation, damages for lost profits, loss of *
-+* goodwill, work stoppage, computer failure or malfunction, or any *
-+* and all other commercial damages or losses, even if such party *
-+* shall have been informed of the possibility of such damages. This *
-+* limitation of liability shall not apply to liability for death or *
-+* personal injury resulting from such party's negligence to the *
-+* extent applicable law prohibits such limitation. Some *
-+* jurisdictions do not allow the exclusion or limitation of *
-+* incidental or consequential damages, so this exclusion and *
-+* limitation may not apply to You. *
-+* *
-+************************************************************************
-+
-+8. Litigation
-+-------------
-+
-+Any litigation relating to this License may be brought only in the
-+courts of a jurisdiction where the defendant maintains its principal
-+place of business and such litigation shall be governed by laws of that
-+jurisdiction, without reference to its conflict-of-law provisions.
-+Nothing in this Section shall prevent a party's ability to bring
-+cross-claims or counter-claims.
-+
-+9. Miscellaneous
-+----------------
-+
-+This License represents the complete agreement concerning the subject
-+matter hereof. If any provision of this License is held to be
-+unenforceable, such provision shall be reformed only to the extent
-+necessary to make it enforceable. Any law or regulation which provides
-+that the language of a contract shall be construed against the drafter
-+shall not be used to construe this License against a Contributor.
-+
-+10. Versions of the License
-+---------------------------
-+
-+10.1. New Versions
-+
-+Mozilla Foundation is the license steward. Except as provided in Section
-+10.3, no one other than the license steward has the right to modify or
-+publish new versions of this License. Each version will be given a
-+distinguishing version number.
-+
-+10.2. Effect of New Versions
-+
-+You may distribute the Covered Software under the terms of the version
-+of the License under which You originally received the Covered Software,
-+or under the terms of any subsequent version published by the license
-+steward.
-+
-+10.3. Modified Versions
-+
-+If you create software not governed by this License, and you want to
-+create a new license for such software, you may create and use a
-+modified version of this License if you rename the license and remove
-+any references to the name of the license steward (except to note that
-+such modified license differs from this License).
-+
-+10.4. Distributing Source Code Form that is Incompatible With Secondary
-+Licenses
-+
-+If You choose to distribute Source Code Form that is Incompatible With
-+Secondary Licenses under the terms of this version of the License, the
-+notice described in Exhibit B of this License must be attached.
-+
-+Exhibit A - Source Code Form License Notice
-+-------------------------------------------
-+
-+ This Source Code Form is subject to the terms of the Mozilla Public
-+ License, v. 2.0. If a copy of the MPL was not distributed with this
-+ file, You can obtain one at http://mozilla.org/MPL/2.0/.
-+
-+If it is not possible or desirable to put the notice in a particular
-+file, then You may include the notice in a location (such as a LICENSE
-+file in a relevant directory) where a recipient would be likely to look
-+for such a notice.
-+
-+You may add additional accurate notices of copyright ownership.
-+
-+Exhibit B - "Incompatible With Secondary Licenses" Notice
-+---------------------------------------------------------
-+
-+ This Source Code Form is "Incompatible With Secondary Licenses", as
-+ defined by the Mozilla Public License, v. 2.0.
-diff --git a/vendor/github.com/go-sql-driver/mysql/README.md b/vendor/github.com/go-sql-driver/mysql/README.md
-new file mode 100644
-index 00000000000..9a09cabdcaa
---- /dev/null
-+++ b/vendor/github.com/go-sql-driver/mysql/README.md
-@@ -0,0 +1,489 @@
-+# Go-MySQL-Driver
-+
-+A MySQL-Driver for Go's [database/sql](https://golang.org/pkg/database/sql/) package
-+
-+![Go-MySQL-Driver logo](https://raw.github.com/wiki/go-sql-driver/mysql/gomysql_m.png "Golang Gopher holding the MySQL Dolphin")
-+
-+---------------------------------------
-+ * [Features](#features)
-+ * [Requirements](#requirements)
-+ * [Installation](#installation)
-+ * [Usage](#usage)
-+ * [DSN (Data Source Name)](#dsn-data-source-name)
-+ * [Password](#password)
-+ * [Protocol](#protocol)
-+ * [Address](#address)
-+ * [Parameters](#parameters)
-+ * [Examples](#examples)
-+ * [Connection pool and timeouts](#connection-pool-and-timeouts)
-+ * [context.Context Support](#contextcontext-support)
-+ * [ColumnType Support](#columntype-support)
-+ * [LOAD DATA LOCAL INFILE support](#load-data-local-infile-support)
-+ * [time.Time support](#timetime-support)
-+ * [Unicode support](#unicode-support)
-+ * [Testing / Development](#testing--development)
-+ * [License](#license)
-+
-+---------------------------------------
-+
-+## Features
-+ * Lightweight and [fast](https://github.com/go-sql-driver/sql-benchmark "golang MySQL-Driver performance")
-+ * Native Go implementation. No C-bindings, just pure Go
-+ * Connections over TCP/IPv4, TCP/IPv6, Unix domain sockets or [custom protocols](https://godoc.org/github.com/go-sql-driver/mysql#DialFunc)
-+ * Automatic handling of broken connections
-+ * Automatic Connection Pooling *(by database/sql package)*
-+ * Supports queries larger than 16MB
-+ * Full [`sql.RawBytes`](https://golang.org/pkg/database/sql/#RawBytes) support.
-+ * Intelligent `LONG DATA` handling in prepared statements
-+ * Secure `LOAD DATA LOCAL INFILE` support with file Whitelisting and `io.Reader` support
-+ * Optional `time.Time` parsing
-+ * Optional placeholder interpolation
-+
-+## Requirements
-+ * Go 1.9 or higher. We aim to support the 3 latest versions of Go.
-+ * MySQL (4.1+), MariaDB, Percona Server, Google CloudSQL or Sphinx (2.2.3+)
-+
-+---------------------------------------
-+
-+## Installation
-+Simple install the package to your [$GOPATH](https://github.com/golang/go/wiki/GOPATH "GOPATH") with the [go tool](https://golang.org/cmd/go/ "go command") from shell:
-+```bash
-+$ go get -u github.com/go-sql-driver/mysql
-+```
-+Make sure [Git is installed](https://git-scm.com/downloads) on your machine and in your system's `PATH`.
-+
-+## Usage
-+_Go MySQL Driver_ is an implementation of Go's `database/sql/driver` interface. You only need to import the driver and can use the full [`database/sql`](https://golang.org/pkg/database/sql/) API then.
-+
-+Use `mysql` as `driverName` and a valid [DSN](#dsn-data-source-name) as `dataSourceName`:
-+```go
-+import "database/sql"
-+import _ "github.com/go-sql-driver/mysql"
-+
-+db, err := sql.Open("mysql", "user:password@/dbname")
-+```
-+
-+[Examples are available in our Wiki](https://github.com/go-sql-driver/mysql/wiki/Examples "Go-MySQL-Driver Examples").
-+
-+
-+### DSN (Data Source Name)
-+
-+The Data Source Name has a common format, like e.g. [PEAR DB](http://pear.php.net/manual/en/package.database.db.intro-dsn.php) uses it, but without type-prefix (optional parts marked by squared brackets):
-+```
-+[username[:password]@][protocol[(address)]]/dbname[?param1=value1&...¶mN=valueN]
-+```
-+
-+A DSN in its fullest form:
-+```
-+username:password@protocol(address)/dbname?param=value
-+```
-+
-+Except for the databasename, all values are optional. So the minimal DSN is:
-+```
-+/dbname
-+```
-+
-+If you do not want to preselect a database, leave `dbname` empty:
-+```
-+/
-+```
-+This has the same effect as an empty DSN string:
-+```
-+
-+```
-+
-+Alternatively, [Config.FormatDSN](https://godoc.org/github.com/go-sql-driver/mysql#Config.FormatDSN) can be used to create a DSN string by filling a struct.
-+
-+#### Password
-+Passwords can consist of any character. Escaping is **not** necessary.
-+
-+#### Protocol
-+See [net.Dial](https://golang.org/pkg/net/#Dial) for more information which networks are available.
-+In general you should use an Unix domain socket if available and TCP otherwise for best performance.
-+
-+#### Address
-+For TCP and UDP networks, addresses have the form `host[:port]`.
-+If `port` is omitted, the default port will be used.
-+If `host` is a literal IPv6 address, it must be enclosed in square brackets.
-+The functions [net.JoinHostPort](https://golang.org/pkg/net/#JoinHostPort) and [net.SplitHostPort](https://golang.org/pkg/net/#SplitHostPort) manipulate addresses in this form.
-+
-+For Unix domain sockets the address is the absolute path to the MySQL-Server-socket, e.g. `/var/run/mysqld/mysqld.sock` or `/tmp/mysql.sock`.
-+
-+#### Parameters
-+*Parameters are case-sensitive!*
-+
-+Notice that any of `true`, `TRUE`, `True` or `1` is accepted to stand for a true boolean value. Not surprisingly, false can be specified as any of: `false`, `FALSE`, `False` or `0`.
-+
-+##### `allowAllFiles`
-+
-+```
-+Type: bool
-+Valid Values: true, false
-+Default: false
-+```
-+
-+`allowAllFiles=true` disables the file Whitelist for `LOAD DATA LOCAL INFILE` and allows *all* files.
-+[*Might be insecure!*](http://dev.mysql.com/doc/refman/5.7/en/load-data-local.html)
-+
-+##### `allowCleartextPasswords`
-+
-+```
-+Type: bool
-+Valid Values: true, false
-+Default: false
-+```
-+
-+`allowCleartextPasswords=true` allows using the [cleartext client side plugin](http://dev.mysql.com/doc/en/cleartext-authentication-plugin.html) if required by an account, such as one defined with the [PAM authentication plugin](http://dev.mysql.com/doc/en/pam-authentication-plugin.html). Sending passwords in clear text may be a security problem in some configurations. To avoid problems if there is any possibility that the password would be intercepted, clients should connect to MySQL Server using a method that protects the password. Possibilities include [TLS / SSL](#tls), IPsec, or a private network.
-+
-+##### `allowNativePasswords`
-+
-+```
-+Type: bool
-+Valid Values: true, false
-+Default: true
-+```
-+`allowNativePasswords=false` disallows the usage of MySQL native password method.
-+
-+##### `allowOldPasswords`
-+
-+```
-+Type: bool
-+Valid Values: true, false
-+Default: false
-+```
-+`allowOldPasswords=true` allows the usage of the insecure old password method. This should be avoided, but is necessary in some cases. See also [the old_passwords wiki page](https://github.com/go-sql-driver/mysql/wiki/old_passwords).
-+
-+##### `charset`
-+
-+```
-+Type: string
-+Valid Values:
-+Default: none
-+```
-+
-+Sets the charset used for client-server interaction (`"SET NAMES "`). If multiple charsets are set (separated by a comma), the following charset is used if setting the charset failes. This enables for example support for `utf8mb4` ([introduced in MySQL 5.5.3](http://dev.mysql.com/doc/refman/5.5/en/charset-unicode-utf8mb4.html)) with fallback to `utf8` for older servers (`charset=utf8mb4,utf8`).
-+
-+Usage of the `charset` parameter is discouraged because it issues additional queries to the server.
-+Unless you need the fallback behavior, please use `collation` instead.
-+
-+##### `collation`
-+
-+```
-+Type: string
-+Valid Values:
-+Default: utf8mb4_general_ci
-+```
-+
-+Sets the collation used for client-server interaction on connection. In contrast to `charset`, `collation` does not issue additional queries. If the specified collation is unavailable on the target server, the connection will fail.
-+
-+A list of valid charsets for a server is retrievable with `SHOW COLLATION`.
-+
-+The default collation (`utf8mb4_general_ci`) is supported from MySQL 5.5. You should use an older collation (e.g. `utf8_general_ci`) for older MySQL.
-+
-+Collations for charset "ucs2", "utf16", "utf16le", and "utf32" can not be used ([ref](https://dev.mysql.com/doc/refman/5.7/en/charset-connection.html#charset-connection-impermissible-client-charset)).
-+
-+
-+##### `clientFoundRows`
-+
-+```
-+Type: bool
-+Valid Values: true, false
-+Default: false
-+```
-+
-+`clientFoundRows=true` causes an UPDATE to return the number of matching rows instead of the number of rows changed.
-+
-+##### `columnsWithAlias`
-+
-+```
-+Type: bool
-+Valid Values: true, false
-+Default: false
-+```
-+
-+When `columnsWithAlias` is true, calls to `sql.Rows.Columns()` will return the table alias and the column name separated by a dot. For example:
-+
-+```
-+SELECT u.id FROM users as u
-+```
-+
-+will return `u.id` instead of just `id` if `columnsWithAlias=true`.
-+
-+##### `interpolateParams`
-+
-+```
-+Type: bool
-+Valid Values: true, false
-+Default: false
-+```
-+
-+If `interpolateParams` is true, placeholders (`?`) in calls to `db.Query()` and `db.Exec()` are interpolated into a single query string with given parameters. This reduces the number of roundtrips, since the driver has to prepare a statement, execute it with given parameters and close the statement again with `interpolateParams=false`.
-+
-+*This can not be used together with the multibyte encodings BIG5, CP932, GB2312, GBK or SJIS. These are blacklisted as they may [introduce a SQL injection vulnerability](http://stackoverflow.com/a/12118602/3430118)!*
-+
-+##### `loc`
-+
-+```
-+Type: string
-+Valid Values:
-+Default: UTC
-+```
-+
-+Sets the location for time.Time values (when using `parseTime=true`). *"Local"* sets the system's location. See [time.LoadLocation](https://golang.org/pkg/time/#LoadLocation) for details.
-+
-+Note that this sets the location for time.Time values but does not change MySQL's [time_zone setting](https://dev.mysql.com/doc/refman/5.5/en/time-zone-support.html). For that see the [time_zone system variable](#system-variables), which can also be set as a DSN parameter.
-+
-+Please keep in mind, that param values must be [url.QueryEscape](https://golang.org/pkg/net/url/#QueryEscape)'ed. Alternatively you can manually replace the `/` with `%2F`. For example `US/Pacific` would be `loc=US%2FPacific`.
-+
-+##### `maxAllowedPacket`
-+```
-+Type: decimal number
-+Default: 4194304
-+```
-+
-+Max packet size allowed in bytes. The default value is 4 MiB and should be adjusted to match the server settings. `maxAllowedPacket=0` can be used to automatically fetch the `max_allowed_packet` variable from server *on every connection*.
-+
-+##### `multiStatements`
-+
-+```
-+Type: bool
-+Valid Values: true, false
-+Default: false
-+```
-+
-+Allow multiple statements in one query. While this allows batch queries, it also greatly increases the risk of SQL injections. Only the result of the first query is returned, all other results are silently discarded.
-+
-+When `multiStatements` is used, `?` parameters must only be used in the first statement.
-+
-+##### `parseTime`
-+
-+```
-+Type: bool
-+Valid Values: true, false
-+Default: false
-+```
-+
-+`parseTime=true` changes the output type of `DATE` and `DATETIME` values to `time.Time` instead of `[]byte` / `string`
-+The date or datetime like `0000-00-00 00:00:00` is converted into zero value of `time.Time`.
-+
-+
-+##### `readTimeout`
-+
-+```
-+Type: duration
-+Default: 0
-+```
-+
-+I/O read timeout. The value must be a decimal number with a unit suffix (*"ms"*, *"s"*, *"m"*, *"h"*), such as *"30s"*, *"0.5m"* or *"1m30s"*.
-+
-+##### `rejectReadOnly`
-+
-+```
-+Type: bool
-+Valid Values: true, false
-+Default: false
-+```
-+
-+
-+`rejectReadOnly=true` causes the driver to reject read-only connections. This
-+is for a possible race condition during an automatic failover, where the mysql
-+client gets connected to a read-only replica after the failover.
-+
-+Note that this should be a fairly rare case, as an automatic failover normally
-+happens when the primary is down, and the race condition shouldn't happen
-+unless it comes back up online as soon as the failover is kicked off. On the
-+other hand, when this happens, a MySQL application can get stuck on a
-+read-only connection until restarted. It is however fairly easy to reproduce,
-+for example, using a manual failover on AWS Aurora's MySQL-compatible cluster.
-+
-+If you are not relying on read-only transactions to reject writes that aren't
-+supposed to happen, setting this on some MySQL providers (such as AWS Aurora)
-+is safer for failovers.
-+
-+Note that ERROR 1290 can be returned for a `read-only` server and this option will
-+cause a retry for that error. However the same error number is used for some
-+other cases. You should ensure your application will never cause an ERROR 1290
-+except for `read-only` mode when enabling this option.
-+
-+
-+##### `serverPubKey`
-+
-+```
-+Type: string
-+Valid Values:
-+Default: none
-+```
-+
-+Server public keys can be registered with [`mysql.RegisterServerPubKey`](https://godoc.org/github.com/go-sql-driver/mysql#RegisterServerPubKey), which can then be used by the assigned name in the DSN.
-+Public keys are used to transmit encrypted data, e.g. for authentication.
-+If the server's public key is known, it should be set manually to avoid expensive and potentially insecure transmissions of the public key from the server to the client each time it is required.
-+
-+
-+##### `timeout`
-+
-+```
-+Type: duration
-+Default: OS default
-+```
-+
-+Timeout for establishing connections, aka dial timeout. The value must be a decimal number with a unit suffix (*"ms"*, *"s"*, *"m"*, *"h"*), such as *"30s"*, *"0.5m"* or *"1m30s"*.
-+
-+
-+##### `tls`
-+
-+```
-+Type: bool / string
-+Valid Values: true, false, skip-verify, preferred,
-+Default: false
-+```
-+
-+`tls=true` enables TLS / SSL encrypted connection to the server. Use `skip-verify` if you want to use a self-signed or invalid certificate (server side) or use `preferred` to use TLS only when advertised by the server. This is similar to `skip-verify`, but additionally allows a fallback to a connection which is not encrypted. Neither `skip-verify` nor `preferred` add any reliable security. You can use a custom TLS config after registering it with [`mysql.RegisterTLSConfig`](https://godoc.org/github.com/go-sql-driver/mysql#RegisterTLSConfig).
-+
-+
-+##### `writeTimeout`
-+
-+```
-+Type: duration
-+Default: 0
-+```
-+
-+I/O write timeout. The value must be a decimal number with a unit suffix (*"ms"*, *"s"*, *"m"*, *"h"*), such as *"30s"*, *"0.5m"* or *"1m30s"*.
-+
-+
-+##### System Variables
-+
-+Any other parameters are interpreted as system variables:
-+ * `=`: `SET =`
-+ * `=`: `SET =`
-+ * `=%27%27`: `SET =''`
-+
-+Rules:
-+* The values for string variables must be quoted with `'`.
-+* The values must also be [url.QueryEscape](http://golang.org/pkg/net/url/#QueryEscape)'ed!
-+ (which implies values of string variables must be wrapped with `%27`).
-+
-+Examples:
-+ * `autocommit=1`: `SET autocommit=1`
-+ * [`time_zone=%27Europe%2FParis%27`](https://dev.mysql.com/doc/refman/5.5/en/time-zone-support.html): `SET time_zone='Europe/Paris'`
-+ * [`tx_isolation=%27REPEATABLE-READ%27`](https://dev.mysql.com/doc/refman/5.5/en/server-system-variables.html#sysvar_tx_isolation): `SET tx_isolation='REPEATABLE-READ'`
-+
-+
-+#### Examples
-+```
-+user@unix(/path/to/socket)/dbname
-+```
-+
-+```
-+root:pw@unix(/tmp/mysql.sock)/myDatabase?loc=Local
-+```
-+
-+```
-+user:password@tcp(localhost:5555)/dbname?tls=skip-verify&autocommit=true
-+```
-+
-+Treat warnings as errors by setting the system variable [`sql_mode`](https://dev.mysql.com/doc/refman/5.7/en/sql-mode.html):
-+```
-+user:password@/dbname?sql_mode=TRADITIONAL
-+```
-+
-+TCP via IPv6:
-+```
-+user:password@tcp([de:ad:be:ef::ca:fe]:80)/dbname?timeout=90s&collation=utf8mb4_unicode_ci
-+```
-+
-+TCP on a remote host, e.g. Amazon RDS:
-+```
-+id:password@tcp(your-amazonaws-uri.com:3306)/dbname
-+```
-+
-+Google Cloud SQL on App Engine:
-+```
-+user:password@unix(/cloudsql/project-id:region-name:instance-name)/dbname
-+```
-+
-+TCP using default port (3306) on localhost:
-+```
-+user:password@tcp/dbname?charset=utf8mb4,utf8&sys_var=esc%40ped
-+```
-+
-+Use the default protocol (tcp) and host (localhost:3306):
-+```
-+user:password@/dbname
-+```
-+
-+No Database preselected:
-+```
-+user:password@/
-+```
-+
-+
-+### Connection pool and timeouts
-+The connection pool is managed by Go's database/sql package. For details on how to configure the size of the pool and how long connections stay in the pool see `*DB.SetMaxOpenConns`, `*DB.SetMaxIdleConns`, and `*DB.SetConnMaxLifetime` in the [database/sql documentation](https://golang.org/pkg/database/sql/). The read, write, and dial timeouts for each individual connection are configured with the DSN parameters [`readTimeout`](#readtimeout), [`writeTimeout`](#writetimeout), and [`timeout`](#timeout), respectively.
-+
-+## `ColumnType` Support
-+This driver supports the [`ColumnType` interface](https://golang.org/pkg/database/sql/#ColumnType) introduced in Go 1.8, with the exception of [`ColumnType.Length()`](https://golang.org/pkg/database/sql/#ColumnType.Length), which is currently not supported.
-+
-+## `context.Context` Support
-+Go 1.8 added `database/sql` support for `context.Context`. This driver supports query timeouts and cancellation via contexts.
-+See [context support in the database/sql package](https://golang.org/doc/go1.8#database_sql) for more details.
-+
-+
-+### `LOAD DATA LOCAL INFILE` support
-+For this feature you need direct access to the package. Therefore you must change the import path (no `_`):
-+```go
-+import "github.com/go-sql-driver/mysql"
-+```
-+
-+Files must be whitelisted by registering them with `mysql.RegisterLocalFile(filepath)` (recommended) or the Whitelist check must be deactivated by using the DSN parameter `allowAllFiles=true` ([*Might be insecure!*](http://dev.mysql.com/doc/refman/5.7/en/load-data-local.html)).
-+
-+To use a `io.Reader` a handler function must be registered with `mysql.RegisterReaderHandler(name, handler)` which returns a `io.Reader` or `io.ReadCloser`. The Reader is available with the filepath `Reader::` then. Choose different names for different handlers and `DeregisterReaderHandler` when you don't need it anymore.
-+
-+See the [godoc of Go-MySQL-Driver](https://godoc.org/github.com/go-sql-driver/mysql "golang mysql driver documentation") for details.
-+
-+
-+### `time.Time` support
-+The default internal output type of MySQL `DATE` and `DATETIME` values is `[]byte` which allows you to scan the value into a `[]byte`, `string` or `sql.RawBytes` variable in your program.
-+
-+However, many want to scan MySQL `DATE` and `DATETIME` values into `time.Time` variables, which is the logical equivalent in Go to `DATE` and `DATETIME` in MySQL. You can do that by changing the internal output type from `[]byte` to `time.Time` with the DSN parameter `parseTime=true`. You can set the default [`time.Time` location](https://golang.org/pkg/time/#Location) with the `loc` DSN parameter.
-+
-+**Caution:** As of Go 1.1, this makes `time.Time` the only variable type you can scan `DATE` and `DATETIME` values into. This breaks for example [`sql.RawBytes` support](https://github.com/go-sql-driver/mysql/wiki/Examples#rawbytes).
-+
-+Alternatively you can use the [`NullTime`](https://godoc.org/github.com/go-sql-driver/mysql#NullTime) type as the scan destination, which works with both `time.Time` and `string` / `[]byte`.
-+
-+
-+### Unicode support
-+Since version 1.1 Go-MySQL-Driver automatically uses the collation `utf8_general_ci` by default.
-+
-+Other collations / charsets can be set using the [`collation`](#collation) DSN parameter.
-+
-+Version 1.0 of the driver recommended adding `&charset=utf8` (alias for `SET NAMES utf8`) to the DSN to enable proper UTF-8 support. This is not necessary anymore. The [`collation`](#collation) parameter should be preferred to set another collation / charset than the default.
-+
-+See http://dev.mysql.com/doc/refman/5.7/en/charset-unicode.html for more details on MySQL's Unicode support.
-+
-+## Testing / Development
-+To run the driver tests you may need to adjust the configuration. See the [Testing Wiki-Page](https://github.com/go-sql-driver/mysql/wiki/Testing "Testing") for details.
-+
-+Go-MySQL-Driver is not feature-complete yet. Your help is very appreciated.
-+If you want to contribute, you can work on an [open issue](https://github.com/go-sql-driver/mysql/issues?state=open) or review a [pull request](https://github.com/go-sql-driver/mysql/pulls).
-+
-+See the [Contribution Guidelines](https://github.com/go-sql-driver/mysql/blob/master/CONTRIBUTING.md) for details.
-+
-+---------------------------------------
-+
-+## License
-+Go-MySQL-Driver is licensed under the [Mozilla Public License Version 2.0](https://raw.github.com/go-sql-driver/mysql/master/LICENSE)
-+
-+Mozilla summarizes the license scope as follows:
-+> MPL: The copyleft applies to any files containing MPLed code.
-+
-+
-+That means:
-+ * You can **use** the **unchanged** source code both in private and commercially.
-+ * When distributing, you **must publish** the source code of any **changed files** licensed under the MPL 2.0 under a) the MPL 2.0 itself or b) a compatible license (e.g. GPL 3.0 or Apache License 2.0).
-+ * You **needn't publish** the source code of your library as long as the files licensed under the MPL 2.0 are **unchanged**.
-+
-+Please read the [MPL 2.0 FAQ](https://www.mozilla.org/en-US/MPL/2.0/FAQ/) if you have further questions regarding the license.
-+
-+You can read the full terms here: [LICENSE](https://raw.github.com/go-sql-driver/mysql/master/LICENSE).
-+
-+![Go Gopher and MySQL Dolphin](https://raw.github.com/wiki/go-sql-driver/mysql/go-mysql-driver_m.jpg "Golang Gopher transporting the MySQL Dolphin in a wheelbarrow")
-diff --git a/vendor/github.com/go-sql-driver/mysql/auth.go b/vendor/github.com/go-sql-driver/mysql/auth.go
-new file mode 100644
-index 00000000000..fec7040d4a2
---- /dev/null
-+++ b/vendor/github.com/go-sql-driver/mysql/auth.go
-@@ -0,0 +1,422 @@
-+// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
-+//
-+// Copyright 2018 The Go-MySQL-Driver Authors. All rights reserved.
-+//
-+// This Source Code Form is subject to the terms of the Mozilla Public
-+// License, v. 2.0. If a copy of the MPL was not distributed with this file,
-+// You can obtain one at http://mozilla.org/MPL/2.0/.
-+
-+package mysql
-+
-+import (
-+ "crypto/rand"
-+ "crypto/rsa"
-+ "crypto/sha1"
-+ "crypto/sha256"
-+ "crypto/x509"
-+ "encoding/pem"
-+ "sync"
-+)
-+
-+// server pub keys registry
-+var (
-+ serverPubKeyLock sync.RWMutex
-+ serverPubKeyRegistry map[string]*rsa.PublicKey
-+)
-+
-+// RegisterServerPubKey registers a server RSA public key which can be used to
-+// send data in a secure manner to the server without receiving the public key
-+// in a potentially insecure way from the server first.
-+// Registered keys can afterwards be used adding serverPubKey= to the DSN.
-+//
-+// Note: The provided rsa.PublicKey instance is exclusively owned by the driver
-+// after registering it and may not be modified.
-+//
-+// data, err := ioutil.ReadFile("mykey.pem")
-+// if err != nil {
-+// log.Fatal(err)
-+// }
-+//
-+// block, _ := pem.Decode(data)
-+// if block == nil || block.Type != "PUBLIC KEY" {
-+// log.Fatal("failed to decode PEM block containing public key")
-+// }
-+//
-+// pub, err := x509.ParsePKIXPublicKey(block.Bytes)
-+// if err != nil {
-+// log.Fatal(err)
-+// }
-+//
-+// if rsaPubKey, ok := pub.(*rsa.PublicKey); ok {
-+// mysql.RegisterServerPubKey("mykey", rsaPubKey)
-+// } else {
-+// log.Fatal("not a RSA public key")
-+// }
-+//
-+func RegisterServerPubKey(name string, pubKey *rsa.PublicKey) {
-+ serverPubKeyLock.Lock()
-+ if serverPubKeyRegistry == nil {
-+ serverPubKeyRegistry = make(map[string]*rsa.PublicKey)
-+ }
-+
-+ serverPubKeyRegistry[name] = pubKey
-+ serverPubKeyLock.Unlock()
-+}
-+
-+// DeregisterServerPubKey removes the public key registered with the given name.
-+func DeregisterServerPubKey(name string) {
-+ serverPubKeyLock.Lock()
-+ if serverPubKeyRegistry != nil {
-+ delete(serverPubKeyRegistry, name)
-+ }
-+ serverPubKeyLock.Unlock()
-+}
-+
-+func getServerPubKey(name string) (pubKey *rsa.PublicKey) {
-+ serverPubKeyLock.RLock()
-+ if v, ok := serverPubKeyRegistry[name]; ok {
-+ pubKey = v
-+ }
-+ serverPubKeyLock.RUnlock()
-+ return
-+}
-+
-+// Hash password using pre 4.1 (old password) method
-+// https://github.com/atcurtis/mariadb/blob/master/mysys/my_rnd.c
-+type myRnd struct {
-+ seed1, seed2 uint32
-+}
-+
-+const myRndMaxVal = 0x3FFFFFFF
-+
-+// Pseudo random number generator
-+func newMyRnd(seed1, seed2 uint32) *myRnd {
-+ return &myRnd{
-+ seed1: seed1 % myRndMaxVal,
-+ seed2: seed2 % myRndMaxVal,
-+ }
-+}
-+
-+// Tested to be equivalent to MariaDB's floating point variant
-+// http://play.golang.org/p/QHvhd4qved
-+// http://play.golang.org/p/RG0q4ElWDx
-+func (r *myRnd) NextByte() byte {
-+ r.seed1 = (r.seed1*3 + r.seed2) % myRndMaxVal
-+ r.seed2 = (r.seed1 + r.seed2 + 33) % myRndMaxVal
-+
-+ return byte(uint64(r.seed1) * 31 / myRndMaxVal)
-+}
-+
-+// Generate binary hash from byte string using insecure pre 4.1 method
-+func pwHash(password []byte) (result [2]uint32) {
-+ var add uint32 = 7
-+ var tmp uint32
-+
-+ result[0] = 1345345333
-+ result[1] = 0x12345671
-+
-+ for _, c := range password {
-+ // skip spaces and tabs in password
-+ if c == ' ' || c == '\t' {
-+ continue
-+ }
-+
-+ tmp = uint32(c)
-+ result[0] ^= (((result[0] & 63) + add) * tmp) + (result[0] << 8)
-+ result[1] += (result[1] << 8) ^ result[0]
-+ add += tmp
-+ }
-+
-+ // Remove sign bit (1<<31)-1)
-+ result[0] &= 0x7FFFFFFF
-+ result[1] &= 0x7FFFFFFF
-+
-+ return
-+}
-+
-+// Hash password using insecure pre 4.1 method
-+func scrambleOldPassword(scramble []byte, password string) []byte {
-+ if len(password) == 0 {
-+ return nil
-+ }
-+
-+ scramble = scramble[:8]
-+
-+ hashPw := pwHash([]byte(password))
-+ hashSc := pwHash(scramble)
-+
-+ r := newMyRnd(hashPw[0]^hashSc[0], hashPw[1]^hashSc[1])
-+
-+ var out [8]byte
-+ for i := range out {
-+ out[i] = r.NextByte() + 64
-+ }
-+
-+ mask := r.NextByte()
-+ for i := range out {
-+ out[i] ^= mask
-+ }
-+
-+ return out[:]
-+}
-+
-+// Hash password using 4.1+ method (SHA1)
-+func scramblePassword(scramble []byte, password string) []byte {
-+ if len(password) == 0 {
-+ return nil
-+ }
-+
-+ // stage1Hash = SHA1(password)
-+ crypt := sha1.New()
-+ crypt.Write([]byte(password))
-+ stage1 := crypt.Sum(nil)
-+
-+ // scrambleHash = SHA1(scramble + SHA1(stage1Hash))
-+ // inner Hash
-+ crypt.Reset()
-+ crypt.Write(stage1)
-+ hash := crypt.Sum(nil)
-+
-+ // outer Hash
-+ crypt.Reset()
-+ crypt.Write(scramble)
-+ crypt.Write(hash)
-+ scramble = crypt.Sum(nil)
-+
-+ // token = scrambleHash XOR stage1Hash
-+ for i := range scramble {
-+ scramble[i] ^= stage1[i]
-+ }
-+ return scramble
-+}
-+
-+// Hash password using MySQL 8+ method (SHA256)
-+func scrambleSHA256Password(scramble []byte, password string) []byte {
-+ if len(password) == 0 {
-+ return nil
-+ }
-+
-+ // XOR(SHA256(password), SHA256(SHA256(SHA256(password)), scramble))
-+
-+ crypt := sha256.New()
-+ crypt.Write([]byte(password))
-+ message1 := crypt.Sum(nil)
-+
-+ crypt.Reset()
-+ crypt.Write(message1)
-+ message1Hash := crypt.Sum(nil)
-+
-+ crypt.Reset()
-+ crypt.Write(message1Hash)
-+ crypt.Write(scramble)
-+ message2 := crypt.Sum(nil)
-+
-+ for i := range message1 {
-+ message1[i] ^= message2[i]
-+ }
-+
-+ return message1
-+}
-+
-+func encryptPassword(password string, seed []byte, pub *rsa.PublicKey) ([]byte, error) {
-+ plain := make([]byte, len(password)+1)
-+ copy(plain, password)
-+ for i := range plain {
-+ j := i % len(seed)
-+ plain[i] ^= seed[j]
-+ }
-+ sha1 := sha1.New()
-+ return rsa.EncryptOAEP(sha1, rand.Reader, pub, plain, nil)
-+}
-+
-+func (mc *mysqlConn) sendEncryptedPassword(seed []byte, pub *rsa.PublicKey) error {
-+ enc, err := encryptPassword(mc.cfg.Passwd, seed, pub)
-+ if err != nil {
-+ return err
-+ }
-+ return mc.writeAuthSwitchPacket(enc)
-+}
-+
-+func (mc *mysqlConn) auth(authData []byte, plugin string) ([]byte, error) {
-+ switch plugin {
-+ case "caching_sha2_password":
-+ authResp := scrambleSHA256Password(authData, mc.cfg.Passwd)
-+ return authResp, nil
-+
-+ case "mysql_old_password":
-+ if !mc.cfg.AllowOldPasswords {
-+ return nil, ErrOldPassword
-+ }
-+ // Note: there are edge cases where this should work but doesn't;
-+ // this is currently "wontfix":
-+ // https://github.com/go-sql-driver/mysql/issues/184
-+ authResp := append(scrambleOldPassword(authData[:8], mc.cfg.Passwd), 0)
-+ return authResp, nil
-+
-+ case "mysql_clear_password":
-+ if !mc.cfg.AllowCleartextPasswords {
-+ return nil, ErrCleartextPassword
-+ }
-+ // http://dev.mysql.com/doc/refman/5.7/en/cleartext-authentication-plugin.html
-+ // http://dev.mysql.com/doc/refman/5.7/en/pam-authentication-plugin.html
-+ return append([]byte(mc.cfg.Passwd), 0), nil
-+
-+ case "mysql_native_password":
-+ if !mc.cfg.AllowNativePasswords {
-+ return nil, ErrNativePassword
-+ }
-+ // https://dev.mysql.com/doc/internals/en/secure-password-authentication.html
-+ // Native password authentication only need and will need 20-byte challenge.
-+ authResp := scramblePassword(authData[:20], mc.cfg.Passwd)
-+ return authResp, nil
-+
-+ case "sha256_password":
-+ if len(mc.cfg.Passwd) == 0 {
-+ return []byte{0}, nil
-+ }
-+ if mc.cfg.tls != nil || mc.cfg.Net == "unix" {
-+ // write cleartext auth packet
-+ return append([]byte(mc.cfg.Passwd), 0), nil
-+ }
-+
-+ pubKey := mc.cfg.pubKey
-+ if pubKey == nil {
-+ // request public key from server
-+ return []byte{1}, nil
-+ }
-+
-+ // encrypted password
-+ enc, err := encryptPassword(mc.cfg.Passwd, authData, pubKey)
-+ return enc, err
-+
-+ default:
-+ errLog.Print("unknown auth plugin:", plugin)
-+ return nil, ErrUnknownPlugin
-+ }
-+}
-+
-+func (mc *mysqlConn) handleAuthResult(oldAuthData []byte, plugin string) error {
-+ // Read Result Packet
-+ authData, newPlugin, err := mc.readAuthResult()
-+ if err != nil {
-+ return err
-+ }
-+
-+ // handle auth plugin switch, if requested
-+ if newPlugin != "" {
-+ // If CLIENT_PLUGIN_AUTH capability is not supported, no new cipher is
-+ // sent and we have to keep using the cipher sent in the init packet.
-+ if authData == nil {
-+ authData = oldAuthData
-+ } else {
-+ // copy data from read buffer to owned slice
-+ copy(oldAuthData, authData)
-+ }
-+
-+ plugin = newPlugin
-+
-+ authResp, err := mc.auth(authData, plugin)
-+ if err != nil {
-+ return err
-+ }
-+ if err = mc.writeAuthSwitchPacket(authResp); err != nil {
-+ return err
-+ }
-+
-+ // Read Result Packet
-+ authData, newPlugin, err = mc.readAuthResult()
-+ if err != nil {
-+ return err
-+ }
-+
-+ // Do not allow to change the auth plugin more than once
-+ if newPlugin != "" {
-+ return ErrMalformPkt
-+ }
-+ }
-+
-+ switch plugin {
-+
-+ // https://insidemysql.com/preparing-your-community-connector-for-mysql-8-part-2-sha256/
-+ case "caching_sha2_password":
-+ switch len(authData) {
-+ case 0:
-+ return nil // auth successful
-+ case 1:
-+ switch authData[0] {
-+ case cachingSha2PasswordFastAuthSuccess:
-+ if err = mc.readResultOK(); err == nil {
-+ return nil // auth successful
-+ }
-+
-+ case cachingSha2PasswordPerformFullAuthentication:
-+ if mc.cfg.tls != nil || mc.cfg.Net == "unix" {
-+ // write cleartext auth packet
-+ err = mc.writeAuthSwitchPacket(append([]byte(mc.cfg.Passwd), 0))
-+ if err != nil {
-+ return err
-+ }
-+ } else {
-+ pubKey := mc.cfg.pubKey
-+ if pubKey == nil {
-+ // request public key from server
-+ data, err := mc.buf.takeSmallBuffer(4 + 1)
-+ if err != nil {
-+ return err
-+ }
-+ data[4] = cachingSha2PasswordRequestPublicKey
-+ mc.writePacket(data)
-+
-+ // parse public key
-+ if data, err = mc.readPacket(); err != nil {
-+ return err
-+ }
-+
-+ block, _ := pem.Decode(data[1:])
-+ pkix, err := x509.ParsePKIXPublicKey(block.Bytes)
-+ if err != nil {
-+ return err
-+ }
-+ pubKey = pkix.(*rsa.PublicKey)
-+ }
-+
-+ // send encrypted password
-+ err = mc.sendEncryptedPassword(oldAuthData, pubKey)
-+ if err != nil {
-+ return err
-+ }
-+ }
-+ return mc.readResultOK()
-+
-+ default:
-+ return ErrMalformPkt
-+ }
-+ default:
-+ return ErrMalformPkt
-+ }
-+
-+ case "sha256_password":
-+ switch len(authData) {
-+ case 0:
-+ return nil // auth successful
-+ default:
-+ block, _ := pem.Decode(authData)
-+ pub, err := x509.ParsePKIXPublicKey(block.Bytes)
-+ if err != nil {
-+ return err
-+ }
-+
-+ // send encrypted password
-+ err = mc.sendEncryptedPassword(oldAuthData, pub.(*rsa.PublicKey))
-+ if err != nil {
-+ return err
-+ }
-+ return mc.readResultOK()
-+ }
-+
-+ default:
-+ return nil // auth successful
-+ }
-+
-+ return err
-+}
-diff --git a/vendor/github.com/go-sql-driver/mysql/auth_test.go b/vendor/github.com/go-sql-driver/mysql/auth_test.go
-new file mode 100644
-index 00000000000..1920ef39f12
---- /dev/null
-+++ b/vendor/github.com/go-sql-driver/mysql/auth_test.go
-@@ -0,0 +1,1330 @@
-+// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
-+//
-+// Copyright 2018 The Go-MySQL-Driver Authors. All rights reserved.
-+//
-+// This Source Code Form is subject to the terms of the Mozilla Public
-+// License, v. 2.0. If a copy of the MPL was not distributed with this file,
-+// You can obtain one at http://mozilla.org/MPL/2.0/.
-+
-+package mysql
-+
-+import (
-+ "bytes"
-+ "crypto/rsa"
-+ "crypto/tls"
-+ "crypto/x509"
-+ "encoding/pem"
-+ "fmt"
-+ "testing"
-+)
-+
-+var testPubKey = []byte("-----BEGIN PUBLIC KEY-----\n" +
-+ "MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAol0Z8G8U+25Btxk/g/fm\n" +
-+ "UAW/wEKjQCTjkibDE4B+qkuWeiumg6miIRhtilU6m9BFmLQSy1ltYQuu4k17A4tQ\n" +
-+ "rIPpOQYZges/qsDFkZh3wyK5jL5WEFVdOasf6wsfszExnPmcZS4axxoYJfiuilrN\n" +
-+ "hnwinBAqfi3S0sw5MpSI4Zl1AbOrHG4zDI62Gti2PKiMGyYDZTS9xPrBLbN95Kby\n" +
-+ "FFclQLEzA9RJcS1nHFsWtRgHjGPhhjCQxEm9NQ1nePFhCfBfApyfH1VM2VCOQum6\n" +
-+ "Ci9bMuHWjTjckC84mzF99kOxOWVU7mwS6gnJqBzpuz8t3zq8/iQ2y7QrmZV+jTJP\n" +
-+ "WQIDAQAB\n" +
-+ "-----END PUBLIC KEY-----\n")
-+
-+var testPubKeyRSA *rsa.PublicKey
-+
-+func init() {
-+ block, _ := pem.Decode(testPubKey)
-+ pub, err := x509.ParsePKIXPublicKey(block.Bytes)
-+ if err != nil {
-+ panic(err)
-+ }
-+ testPubKeyRSA = pub.(*rsa.PublicKey)
-+}
-+
-+func TestScrambleOldPass(t *testing.T) {
-+ scramble := []byte{9, 8, 7, 6, 5, 4, 3, 2}
-+ vectors := []struct {
-+ pass string
-+ out string
-+ }{
-+ {" pass", "47575c5a435b4251"},
-+ {"pass ", "47575c5a435b4251"},
-+ {"123\t456", "575c47505b5b5559"},
-+ {"C0mpl!ca ted#PASS123", "5d5d554849584a45"},
-+ }
-+ for _, tuple := range vectors {
-+ ours := scrambleOldPassword(scramble, tuple.pass)
-+ if tuple.out != fmt.Sprintf("%x", ours) {
-+ t.Errorf("Failed old password %q", tuple.pass)
-+ }
-+ }
-+}
-+
-+func TestScrambleSHA256Pass(t *testing.T) {
-+ scramble := []byte{10, 47, 74, 111, 75, 73, 34, 48, 88, 76, 114, 74, 37, 13, 3, 80, 82, 2, 23, 21}
-+ vectors := []struct {
-+ pass string
-+ out string
-+ }{
-+ {"secret", "f490e76f66d9d86665ce54d98c78d0acfe2fb0b08b423da807144873d30b312c"},
-+ {"secret2", "abc3934a012cf342e876071c8ee202de51785b430258a7a0138bc79c4d800bc6"},
-+ }
-+ for _, tuple := range vectors {
-+ ours := scrambleSHA256Password(scramble, tuple.pass)
-+ if tuple.out != fmt.Sprintf("%x", ours) {
-+ t.Errorf("Failed SHA256 password %q", tuple.pass)
-+ }
-+ }
-+}
-+
-+func TestAuthFastCachingSHA256PasswordCached(t *testing.T) {
-+ conn, mc := newRWMockConn(1)
-+ mc.cfg.User = "root"
-+ mc.cfg.Passwd = "secret"
-+
-+ authData := []byte{90, 105, 74, 126, 30, 48, 37, 56, 3, 23, 115, 127, 69,
-+ 22, 41, 84, 32, 123, 43, 118}
-+ plugin := "caching_sha2_password"
-+
-+ // Send Client Authentication Packet
-+ authResp, err := mc.auth(authData, plugin)
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+ err = mc.writeHandshakeResponsePacket(authResp, plugin)
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+
-+ // check written auth response
-+ authRespStart := 4 + 4 + 4 + 1 + 23 + len(mc.cfg.User) + 1
-+ authRespEnd := authRespStart + 1 + len(authResp)
-+ writtenAuthRespLen := conn.written[authRespStart]
-+ writtenAuthResp := conn.written[authRespStart+1 : authRespEnd]
-+ expectedAuthResp := []byte{102, 32, 5, 35, 143, 161, 140, 241, 171, 232, 56,
-+ 139, 43, 14, 107, 196, 249, 170, 147, 60, 220, 204, 120, 178, 214, 15,
-+ 184, 150, 26, 61, 57, 235}
-+ if writtenAuthRespLen != 32 || !bytes.Equal(writtenAuthResp, expectedAuthResp) {
-+ t.Fatalf("unexpected written auth response (%d bytes): %v", writtenAuthRespLen, writtenAuthResp)
-+ }
-+ conn.written = nil
-+
-+ // auth response
-+ conn.data = []byte{
-+ 2, 0, 0, 2, 1, 3, // Fast Auth Success
-+ 7, 0, 0, 3, 0, 0, 0, 2, 0, 0, 0, // OK
-+ }
-+ conn.maxReads = 1
-+
-+ // Handle response to auth packet
-+ if err := mc.handleAuthResult(authData, plugin); err != nil {
-+ t.Errorf("got error: %v", err)
-+ }
-+}
-+
-+func TestAuthFastCachingSHA256PasswordEmpty(t *testing.T) {
-+ conn, mc := newRWMockConn(1)
-+ mc.cfg.User = "root"
-+ mc.cfg.Passwd = ""
-+
-+ authData := []byte{90, 105, 74, 126, 30, 48, 37, 56, 3, 23, 115, 127, 69,
-+ 22, 41, 84, 32, 123, 43, 118}
-+ plugin := "caching_sha2_password"
-+
-+ // Send Client Authentication Packet
-+ authResp, err := mc.auth(authData, plugin)
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+ err = mc.writeHandshakeResponsePacket(authResp, plugin)
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+
-+ // check written auth response
-+ authRespStart := 4 + 4 + 4 + 1 + 23 + len(mc.cfg.User) + 1
-+ authRespEnd := authRespStart + 1 + len(authResp)
-+ writtenAuthRespLen := conn.written[authRespStart]
-+ writtenAuthResp := conn.written[authRespStart+1 : authRespEnd]
-+ if writtenAuthRespLen != 0 {
-+ t.Fatalf("unexpected written auth response (%d bytes): %v",
-+ writtenAuthRespLen, writtenAuthResp)
-+ }
-+ conn.written = nil
-+
-+ // auth response
-+ conn.data = []byte{
-+ 7, 0, 0, 2, 0, 0, 0, 2, 0, 0, 0, // OK
-+ }
-+ conn.maxReads = 1
-+
-+ // Handle response to auth packet
-+ if err := mc.handleAuthResult(authData, plugin); err != nil {
-+ t.Errorf("got error: %v", err)
-+ }
-+}
-+
-+func TestAuthFastCachingSHA256PasswordFullRSA(t *testing.T) {
-+ conn, mc := newRWMockConn(1)
-+ mc.cfg.User = "root"
-+ mc.cfg.Passwd = "secret"
-+
-+ authData := []byte{6, 81, 96, 114, 14, 42, 50, 30, 76, 47, 1, 95, 126, 81,
-+ 62, 94, 83, 80, 52, 85}
-+ plugin := "caching_sha2_password"
-+
-+ // Send Client Authentication Packet
-+ authResp, err := mc.auth(authData, plugin)
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+ err = mc.writeHandshakeResponsePacket(authResp, plugin)
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+
-+ // check written auth response
-+ authRespStart := 4 + 4 + 4 + 1 + 23 + len(mc.cfg.User) + 1
-+ authRespEnd := authRespStart + 1 + len(authResp)
-+ writtenAuthRespLen := conn.written[authRespStart]
-+ writtenAuthResp := conn.written[authRespStart+1 : authRespEnd]
-+ expectedAuthResp := []byte{171, 201, 138, 146, 89, 159, 11, 170, 0, 67, 165,
-+ 49, 175, 94, 218, 68, 177, 109, 110, 86, 34, 33, 44, 190, 67, 240, 70,
-+ 110, 40, 139, 124, 41}
-+ if writtenAuthRespLen != 32 || !bytes.Equal(writtenAuthResp, expectedAuthResp) {
-+ t.Fatalf("unexpected written auth response (%d bytes): %v", writtenAuthRespLen, writtenAuthResp)
-+ }
-+ conn.written = nil
-+
-+ // auth response
-+ conn.data = []byte{
-+ 2, 0, 0, 2, 1, 4, // Perform Full Authentication
-+ }
-+ conn.queuedReplies = [][]byte{
-+ // pub key response
-+ append([]byte{byte(1 + len(testPubKey)), 1, 0, 4, 1}, testPubKey...),
-+
-+ // OK
-+ {7, 0, 0, 6, 0, 0, 0, 2, 0, 0, 0},
-+ }
-+ conn.maxReads = 3
-+
-+ // Handle response to auth packet
-+ if err := mc.handleAuthResult(authData, plugin); err != nil {
-+ t.Errorf("got error: %v", err)
-+ }
-+
-+ if !bytes.HasPrefix(conn.written, []byte{1, 0, 0, 3, 2, 0, 1, 0, 5}) {
-+ t.Errorf("unexpected written data: %v", conn.written)
-+ }
-+}
-+
-+func TestAuthFastCachingSHA256PasswordFullRSAWithKey(t *testing.T) {
-+ conn, mc := newRWMockConn(1)
-+ mc.cfg.User = "root"
-+ mc.cfg.Passwd = "secret"
-+ mc.cfg.pubKey = testPubKeyRSA
-+
-+ authData := []byte{6, 81, 96, 114, 14, 42, 50, 30, 76, 47, 1, 95, 126, 81,
-+ 62, 94, 83, 80, 52, 85}
-+ plugin := "caching_sha2_password"
-+
-+ // Send Client Authentication Packet
-+ authResp, err := mc.auth(authData, plugin)
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+ err = mc.writeHandshakeResponsePacket(authResp, plugin)
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+
-+ // check written auth response
-+ authRespStart := 4 + 4 + 4 + 1 + 23 + len(mc.cfg.User) + 1
-+ authRespEnd := authRespStart + 1 + len(authResp)
-+ writtenAuthRespLen := conn.written[authRespStart]
-+ writtenAuthResp := conn.written[authRespStart+1 : authRespEnd]
-+ expectedAuthResp := []byte{171, 201, 138, 146, 89, 159, 11, 170, 0, 67, 165,
-+ 49, 175, 94, 218, 68, 177, 109, 110, 86, 34, 33, 44, 190, 67, 240, 70,
-+ 110, 40, 139, 124, 41}
-+ if writtenAuthRespLen != 32 || !bytes.Equal(writtenAuthResp, expectedAuthResp) {
-+ t.Fatalf("unexpected written auth response (%d bytes): %v", writtenAuthRespLen, writtenAuthResp)
-+ }
-+ conn.written = nil
-+
-+ // auth response
-+ conn.data = []byte{
-+ 2, 0, 0, 2, 1, 4, // Perform Full Authentication
-+ }
-+ conn.queuedReplies = [][]byte{
-+ // OK
-+ {7, 0, 0, 4, 0, 0, 0, 2, 0, 0, 0},
-+ }
-+ conn.maxReads = 2
-+
-+ // Handle response to auth packet
-+ if err := mc.handleAuthResult(authData, plugin); err != nil {
-+ t.Errorf("got error: %v", err)
-+ }
-+
-+ if !bytes.HasPrefix(conn.written, []byte{0, 1, 0, 3}) {
-+ t.Errorf("unexpected written data: %v", conn.written)
-+ }
-+}
-+
-+func TestAuthFastCachingSHA256PasswordFullSecure(t *testing.T) {
-+ conn, mc := newRWMockConn(1)
-+ mc.cfg.User = "root"
-+ mc.cfg.Passwd = "secret"
-+
-+ authData := []byte{6, 81, 96, 114, 14, 42, 50, 30, 76, 47, 1, 95, 126, 81,
-+ 62, 94, 83, 80, 52, 85}
-+ plugin := "caching_sha2_password"
-+
-+ // Send Client Authentication Packet
-+ authResp, err := mc.auth(authData, plugin)
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+ err = mc.writeHandshakeResponsePacket(authResp, plugin)
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+
-+ // Hack to make the caching_sha2_password plugin believe that the connection
-+ // is secure
-+ mc.cfg.tls = &tls.Config{InsecureSkipVerify: true}
-+
-+ // check written auth response
-+ authRespStart := 4 + 4 + 4 + 1 + 23 + len(mc.cfg.User) + 1
-+ authRespEnd := authRespStart + 1 + len(authResp)
-+ writtenAuthRespLen := conn.written[authRespStart]
-+ writtenAuthResp := conn.written[authRespStart+1 : authRespEnd]
-+ expectedAuthResp := []byte{171, 201, 138, 146, 89, 159, 11, 170, 0, 67, 165,
-+ 49, 175, 94, 218, 68, 177, 109, 110, 86, 34, 33, 44, 190, 67, 240, 70,
-+ 110, 40, 139, 124, 41}
-+ if writtenAuthRespLen != 32 || !bytes.Equal(writtenAuthResp, expectedAuthResp) {
-+ t.Fatalf("unexpected written auth response (%d bytes): %v", writtenAuthRespLen, writtenAuthResp)
-+ }
-+ conn.written = nil
-+
-+ // auth response
-+ conn.data = []byte{
-+ 2, 0, 0, 2, 1, 4, // Perform Full Authentication
-+ }
-+ conn.queuedReplies = [][]byte{
-+ // OK
-+ {7, 0, 0, 4, 0, 0, 0, 2, 0, 0, 0},
-+ }
-+ conn.maxReads = 3
-+
-+ // Handle response to auth packet
-+ if err := mc.handleAuthResult(authData, plugin); err != nil {
-+ t.Errorf("got error: %v", err)
-+ }
-+
-+ if !bytes.Equal(conn.written, []byte{7, 0, 0, 3, 115, 101, 99, 114, 101, 116, 0}) {
-+ t.Errorf("unexpected written data: %v", conn.written)
-+ }
-+}
-+
-+func TestAuthFastCleartextPasswordNotAllowed(t *testing.T) {
-+ _, mc := newRWMockConn(1)
-+ mc.cfg.User = "root"
-+ mc.cfg.Passwd = "secret"
-+
-+ authData := []byte{70, 114, 92, 94, 1, 38, 11, 116, 63, 114, 23, 101, 126,
-+ 103, 26, 95, 81, 17, 24, 21}
-+ plugin := "mysql_clear_password"
-+
-+ // Send Client Authentication Packet
-+ _, err := mc.auth(authData, plugin)
-+ if err != ErrCleartextPassword {
-+ t.Errorf("expected ErrCleartextPassword, got %v", err)
-+ }
-+}
-+
-+func TestAuthFastCleartextPassword(t *testing.T) {
-+ conn, mc := newRWMockConn(1)
-+ mc.cfg.User = "root"
-+ mc.cfg.Passwd = "secret"
-+ mc.cfg.AllowCleartextPasswords = true
-+
-+ authData := []byte{70, 114, 92, 94, 1, 38, 11, 116, 63, 114, 23, 101, 126,
-+ 103, 26, 95, 81, 17, 24, 21}
-+ plugin := "mysql_clear_password"
-+
-+ // Send Client Authentication Packet
-+ authResp, err := mc.auth(authData, plugin)
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+ err = mc.writeHandshakeResponsePacket(authResp, plugin)
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+
-+ // check written auth response
-+ authRespStart := 4 + 4 + 4 + 1 + 23 + len(mc.cfg.User) + 1
-+ authRespEnd := authRespStart + 1 + len(authResp)
-+ writtenAuthRespLen := conn.written[authRespStart]
-+ writtenAuthResp := conn.written[authRespStart+1 : authRespEnd]
-+ expectedAuthResp := []byte{115, 101, 99, 114, 101, 116, 0}
-+ if writtenAuthRespLen != 7 || !bytes.Equal(writtenAuthResp, expectedAuthResp) {
-+ t.Fatalf("unexpected written auth response (%d bytes): %v", writtenAuthRespLen, writtenAuthResp)
-+ }
-+ conn.written = nil
-+
-+ // auth response
-+ conn.data = []byte{
-+ 7, 0, 0, 2, 0, 0, 0, 2, 0, 0, 0, // OK
-+ }
-+ conn.maxReads = 1
-+
-+ // Handle response to auth packet
-+ if err := mc.handleAuthResult(authData, plugin); err != nil {
-+ t.Errorf("got error: %v", err)
-+ }
-+}
-+
-+func TestAuthFastCleartextPasswordEmpty(t *testing.T) {
-+ conn, mc := newRWMockConn(1)
-+ mc.cfg.User = "root"
-+ mc.cfg.Passwd = ""
-+ mc.cfg.AllowCleartextPasswords = true
-+
-+ authData := []byte{70, 114, 92, 94, 1, 38, 11, 116, 63, 114, 23, 101, 126,
-+ 103, 26, 95, 81, 17, 24, 21}
-+ plugin := "mysql_clear_password"
-+
-+ // Send Client Authentication Packet
-+ authResp, err := mc.auth(authData, plugin)
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+ err = mc.writeHandshakeResponsePacket(authResp, plugin)
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+
-+ // check written auth response
-+ authRespStart := 4 + 4 + 4 + 1 + 23 + len(mc.cfg.User) + 1
-+ authRespEnd := authRespStart + 1 + len(authResp)
-+ writtenAuthRespLen := conn.written[authRespStart]
-+ writtenAuthResp := conn.written[authRespStart+1 : authRespEnd]
-+ expectedAuthResp := []byte{0}
-+ if writtenAuthRespLen != 1 || !bytes.Equal(writtenAuthResp, expectedAuthResp) {
-+ t.Fatalf("unexpected written auth response (%d bytes): %v", writtenAuthRespLen, writtenAuthResp)
-+ }
-+ conn.written = nil
-+
-+ // auth response
-+ conn.data = []byte{
-+ 7, 0, 0, 2, 0, 0, 0, 2, 0, 0, 0, // OK
-+ }
-+ conn.maxReads = 1
-+
-+ // Handle response to auth packet
-+ if err := mc.handleAuthResult(authData, plugin); err != nil {
-+ t.Errorf("got error: %v", err)
-+ }
-+}
-+
-+func TestAuthFastNativePasswordNotAllowed(t *testing.T) {
-+ _, mc := newRWMockConn(1)
-+ mc.cfg.User = "root"
-+ mc.cfg.Passwd = "secret"
-+ mc.cfg.AllowNativePasswords = false
-+
-+ authData := []byte{70, 114, 92, 94, 1, 38, 11, 116, 63, 114, 23, 101, 126,
-+ 103, 26, 95, 81, 17, 24, 21}
-+ plugin := "mysql_native_password"
-+
-+ // Send Client Authentication Packet
-+ _, err := mc.auth(authData, plugin)
-+ if err != ErrNativePassword {
-+ t.Errorf("expected ErrNativePassword, got %v", err)
-+ }
-+}
-+
-+func TestAuthFastNativePassword(t *testing.T) {
-+ conn, mc := newRWMockConn(1)
-+ mc.cfg.User = "root"
-+ mc.cfg.Passwd = "secret"
-+
-+ authData := []byte{70, 114, 92, 94, 1, 38, 11, 116, 63, 114, 23, 101, 126,
-+ 103, 26, 95, 81, 17, 24, 21}
-+ plugin := "mysql_native_password"
-+
-+ // Send Client Authentication Packet
-+ authResp, err := mc.auth(authData, plugin)
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+ err = mc.writeHandshakeResponsePacket(authResp, plugin)
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+
-+ // check written auth response
-+ authRespStart := 4 + 4 + 4 + 1 + 23 + len(mc.cfg.User) + 1
-+ authRespEnd := authRespStart + 1 + len(authResp)
-+ writtenAuthRespLen := conn.written[authRespStart]
-+ writtenAuthResp := conn.written[authRespStart+1 : authRespEnd]
-+ expectedAuthResp := []byte{53, 177, 140, 159, 251, 189, 127, 53, 109, 252,
-+ 172, 50, 211, 192, 240, 164, 26, 48, 207, 45}
-+ if writtenAuthRespLen != 20 || !bytes.Equal(writtenAuthResp, expectedAuthResp) {
-+ t.Fatalf("unexpected written auth response (%d bytes): %v", writtenAuthRespLen, writtenAuthResp)
-+ }
-+ conn.written = nil
-+
-+ // auth response
-+ conn.data = []byte{
-+ 7, 0, 0, 2, 0, 0, 0, 2, 0, 0, 0, // OK
-+ }
-+ conn.maxReads = 1
-+
-+ // Handle response to auth packet
-+ if err := mc.handleAuthResult(authData, plugin); err != nil {
-+ t.Errorf("got error: %v", err)
-+ }
-+}
-+
-+func TestAuthFastNativePasswordEmpty(t *testing.T) {
-+ conn, mc := newRWMockConn(1)
-+ mc.cfg.User = "root"
-+ mc.cfg.Passwd = ""
-+
-+ authData := []byte{70, 114, 92, 94, 1, 38, 11, 116, 63, 114, 23, 101, 126,
-+ 103, 26, 95, 81, 17, 24, 21}
-+ plugin := "mysql_native_password"
-+
-+ // Send Client Authentication Packet
-+ authResp, err := mc.auth(authData, plugin)
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+ err = mc.writeHandshakeResponsePacket(authResp, plugin)
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+
-+ // check written auth response
-+ authRespStart := 4 + 4 + 4 + 1 + 23 + len(mc.cfg.User) + 1
-+ authRespEnd := authRespStart + 1 + len(authResp)
-+ writtenAuthRespLen := conn.written[authRespStart]
-+ writtenAuthResp := conn.written[authRespStart+1 : authRespEnd]
-+ if writtenAuthRespLen != 0 {
-+ t.Fatalf("unexpected written auth response (%d bytes): %v",
-+ writtenAuthRespLen, writtenAuthResp)
-+ }
-+ conn.written = nil
-+
-+ // auth response
-+ conn.data = []byte{
-+ 7, 0, 0, 2, 0, 0, 0, 2, 0, 0, 0, // OK
-+ }
-+ conn.maxReads = 1
-+
-+ // Handle response to auth packet
-+ if err := mc.handleAuthResult(authData, plugin); err != nil {
-+ t.Errorf("got error: %v", err)
-+ }
-+}
-+
-+func TestAuthFastSHA256PasswordEmpty(t *testing.T) {
-+ conn, mc := newRWMockConn(1)
-+ mc.cfg.User = "root"
-+ mc.cfg.Passwd = ""
-+
-+ authData := []byte{6, 81, 96, 114, 14, 42, 50, 30, 76, 47, 1, 95, 126, 81,
-+ 62, 94, 83, 80, 52, 85}
-+ plugin := "sha256_password"
-+
-+ // Send Client Authentication Packet
-+ authResp, err := mc.auth(authData, plugin)
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+ err = mc.writeHandshakeResponsePacket(authResp, plugin)
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+
-+ // check written auth response
-+ authRespStart := 4 + 4 + 4 + 1 + 23 + len(mc.cfg.User) + 1
-+ authRespEnd := authRespStart + 1 + len(authResp)
-+ writtenAuthRespLen := conn.written[authRespStart]
-+ writtenAuthResp := conn.written[authRespStart+1 : authRespEnd]
-+ expectedAuthResp := []byte{0}
-+ if writtenAuthRespLen != 1 || !bytes.Equal(writtenAuthResp, expectedAuthResp) {
-+ t.Fatalf("unexpected written auth response (%d bytes): %v", writtenAuthRespLen, writtenAuthResp)
-+ }
-+ conn.written = nil
-+
-+ // auth response (pub key response)
-+ conn.data = append([]byte{byte(1 + len(testPubKey)), 1, 0, 2, 1}, testPubKey...)
-+ conn.queuedReplies = [][]byte{
-+ // OK
-+ {7, 0, 0, 4, 0, 0, 0, 2, 0, 0, 0},
-+ }
-+ conn.maxReads = 2
-+
-+ // Handle response to auth packet
-+ if err := mc.handleAuthResult(authData, plugin); err != nil {
-+ t.Errorf("got error: %v", err)
-+ }
-+
-+ if !bytes.HasPrefix(conn.written, []byte{0, 1, 0, 3}) {
-+ t.Errorf("unexpected written data: %v", conn.written)
-+ }
-+}
-+
-+func TestAuthFastSHA256PasswordRSA(t *testing.T) {
-+ conn, mc := newRWMockConn(1)
-+ mc.cfg.User = "root"
-+ mc.cfg.Passwd = "secret"
-+
-+ authData := []byte{6, 81, 96, 114, 14, 42, 50, 30, 76, 47, 1, 95, 126, 81,
-+ 62, 94, 83, 80, 52, 85}
-+ plugin := "sha256_password"
-+
-+ // Send Client Authentication Packet
-+ authResp, err := mc.auth(authData, plugin)
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+ err = mc.writeHandshakeResponsePacket(authResp, plugin)
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+
-+ // check written auth response
-+ authRespStart := 4 + 4 + 4 + 1 + 23 + len(mc.cfg.User) + 1
-+ authRespEnd := authRespStart + 1 + len(authResp)
-+ writtenAuthRespLen := conn.written[authRespStart]
-+ writtenAuthResp := conn.written[authRespStart+1 : authRespEnd]
-+ expectedAuthResp := []byte{1}
-+ if writtenAuthRespLen != 1 || !bytes.Equal(writtenAuthResp, expectedAuthResp) {
-+ t.Fatalf("unexpected written auth response (%d bytes): %v", writtenAuthRespLen, writtenAuthResp)
-+ }
-+ conn.written = nil
-+
-+ // auth response (pub key response)
-+ conn.data = append([]byte{byte(1 + len(testPubKey)), 1, 0, 2, 1}, testPubKey...)
-+ conn.queuedReplies = [][]byte{
-+ // OK
-+ {7, 0, 0, 4, 0, 0, 0, 2, 0, 0, 0},
-+ }
-+ conn.maxReads = 2
-+
-+ // Handle response to auth packet
-+ if err := mc.handleAuthResult(authData, plugin); err != nil {
-+ t.Errorf("got error: %v", err)
-+ }
-+
-+ if !bytes.HasPrefix(conn.written, []byte{0, 1, 0, 3}) {
-+ t.Errorf("unexpected written data: %v", conn.written)
-+ }
-+}
-+
-+func TestAuthFastSHA256PasswordRSAWithKey(t *testing.T) {
-+ conn, mc := newRWMockConn(1)
-+ mc.cfg.User = "root"
-+ mc.cfg.Passwd = "secret"
-+ mc.cfg.pubKey = testPubKeyRSA
-+
-+ authData := []byte{6, 81, 96, 114, 14, 42, 50, 30, 76, 47, 1, 95, 126, 81,
-+ 62, 94, 83, 80, 52, 85}
-+ plugin := "sha256_password"
-+
-+ // Send Client Authentication Packet
-+ authResp, err := mc.auth(authData, plugin)
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+ err = mc.writeHandshakeResponsePacket(authResp, plugin)
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+
-+ // auth response (OK)
-+ conn.data = []byte{7, 0, 0, 2, 0, 0, 0, 2, 0, 0, 0}
-+ conn.maxReads = 1
-+
-+ // Handle response to auth packet
-+ if err := mc.handleAuthResult(authData, plugin); err != nil {
-+ t.Errorf("got error: %v", err)
-+ }
-+}
-+
-+func TestAuthFastSHA256PasswordSecure(t *testing.T) {
-+ conn, mc := newRWMockConn(1)
-+ mc.cfg.User = "root"
-+ mc.cfg.Passwd = "secret"
-+
-+ // hack to make the caching_sha2_password plugin believe that the connection
-+ // is secure
-+ mc.cfg.tls = &tls.Config{InsecureSkipVerify: true}
-+
-+ authData := []byte{6, 81, 96, 114, 14, 42, 50, 30, 76, 47, 1, 95, 126, 81,
-+ 62, 94, 83, 80, 52, 85}
-+ plugin := "sha256_password"
-+
-+ // send Client Authentication Packet
-+ authResp, err := mc.auth(authData, plugin)
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+
-+ // unset TLS config to prevent the actual establishment of a TLS wrapper
-+ mc.cfg.tls = nil
-+
-+ err = mc.writeHandshakeResponsePacket(authResp, plugin)
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+
-+ // check written auth response
-+ authRespStart := 4 + 4 + 4 + 1 + 23 + len(mc.cfg.User) + 1
-+ authRespEnd := authRespStart + 1 + len(authResp)
-+ writtenAuthRespLen := conn.written[authRespStart]
-+ writtenAuthResp := conn.written[authRespStart+1 : authRespEnd]
-+ expectedAuthResp := []byte{115, 101, 99, 114, 101, 116, 0}
-+ if writtenAuthRespLen != 7 || !bytes.Equal(writtenAuthResp, expectedAuthResp) {
-+ t.Fatalf("unexpected written auth response (%d bytes): %v", writtenAuthRespLen, writtenAuthResp)
-+ }
-+ conn.written = nil
-+
-+ // auth response (OK)
-+ conn.data = []byte{7, 0, 0, 2, 0, 0, 0, 2, 0, 0, 0}
-+ conn.maxReads = 1
-+
-+ // Handle response to auth packet
-+ if err := mc.handleAuthResult(authData, plugin); err != nil {
-+ t.Errorf("got error: %v", err)
-+ }
-+
-+ if !bytes.Equal(conn.written, []byte{}) {
-+ t.Errorf("unexpected written data: %v", conn.written)
-+ }
-+}
-+
-+func TestAuthSwitchCachingSHA256PasswordCached(t *testing.T) {
-+ conn, mc := newRWMockConn(2)
-+ mc.cfg.Passwd = "secret"
-+
-+ // auth switch request
-+ conn.data = []byte{44, 0, 0, 2, 254, 99, 97, 99, 104, 105, 110, 103, 95,
-+ 115, 104, 97, 50, 95, 112, 97, 115, 115, 119, 111, 114, 100, 0, 101,
-+ 11, 26, 18, 94, 97, 22, 72, 2, 46, 70, 106, 29, 55, 45, 94, 76, 90, 84,
-+ 50, 0}
-+
-+ // auth response
-+ conn.queuedReplies = [][]byte{
-+ {7, 0, 0, 4, 0, 0, 0, 2, 0, 0, 0}, // OK
-+ }
-+ conn.maxReads = 3
-+
-+ authData := []byte{123, 87, 15, 84, 20, 58, 37, 121, 91, 117, 51, 24, 19,
-+ 47, 43, 9, 41, 112, 67, 110}
-+ plugin := "mysql_native_password"
-+
-+ if err := mc.handleAuthResult(authData, plugin); err != nil {
-+ t.Errorf("got error: %v", err)
-+ }
-+
-+ expectedReply := []byte{
-+ // 1. Packet: Hash
-+ 32, 0, 0, 3, 129, 93, 132, 95, 114, 48, 79, 215, 128, 62, 193, 118, 128,
-+ 54, 75, 208, 159, 252, 227, 215, 129, 15, 242, 97, 19, 159, 31, 20, 58,
-+ 153, 9, 130,
-+ }
-+ if !bytes.Equal(conn.written, expectedReply) {
-+ t.Errorf("got unexpected data: %v", conn.written)
-+ }
-+}
-+
-+func TestAuthSwitchCachingSHA256PasswordEmpty(t *testing.T) {
-+ conn, mc := newRWMockConn(2)
-+ mc.cfg.Passwd = ""
-+
-+ // auth switch request
-+ conn.data = []byte{44, 0, 0, 2, 254, 99, 97, 99, 104, 105, 110, 103, 95,
-+ 115, 104, 97, 50, 95, 112, 97, 115, 115, 119, 111, 114, 100, 0, 101,
-+ 11, 26, 18, 94, 97, 22, 72, 2, 46, 70, 106, 29, 55, 45, 94, 76, 90, 84,
-+ 50, 0}
-+
-+ // auth response
-+ conn.queuedReplies = [][]byte{{7, 0, 0, 4, 0, 0, 0, 2, 0, 0, 0}}
-+ conn.maxReads = 2
-+
-+ authData := []byte{123, 87, 15, 84, 20, 58, 37, 121, 91, 117, 51, 24, 19,
-+ 47, 43, 9, 41, 112, 67, 110}
-+ plugin := "mysql_native_password"
-+
-+ if err := mc.handleAuthResult(authData, plugin); err != nil {
-+ t.Errorf("got error: %v", err)
-+ }
-+
-+ expectedReply := []byte{0, 0, 0, 3}
-+ if !bytes.Equal(conn.written, expectedReply) {
-+ t.Errorf("got unexpected data: %v", conn.written)
-+ }
-+}
-+
-+func TestAuthSwitchCachingSHA256PasswordFullRSA(t *testing.T) {
-+ conn, mc := newRWMockConn(2)
-+ mc.cfg.Passwd = "secret"
-+
-+ // auth switch request
-+ conn.data = []byte{44, 0, 0, 2, 254, 99, 97, 99, 104, 105, 110, 103, 95,
-+ 115, 104, 97, 50, 95, 112, 97, 115, 115, 119, 111, 114, 100, 0, 101,
-+ 11, 26, 18, 94, 97, 22, 72, 2, 46, 70, 106, 29, 55, 45, 94, 76, 90, 84,
-+ 50, 0}
-+
-+ conn.queuedReplies = [][]byte{
-+ // Perform Full Authentication
-+ {2, 0, 0, 4, 1, 4},
-+
-+ // Pub Key Response
-+ append([]byte{byte(1 + len(testPubKey)), 1, 0, 6, 1}, testPubKey...),
-+
-+ // OK
-+ {7, 0, 0, 8, 0, 0, 0, 2, 0, 0, 0},
-+ }
-+ conn.maxReads = 4
-+
-+ authData := []byte{123, 87, 15, 84, 20, 58, 37, 121, 91, 117, 51, 24, 19,
-+ 47, 43, 9, 41, 112, 67, 110}
-+ plugin := "mysql_native_password"
-+
-+ if err := mc.handleAuthResult(authData, plugin); err != nil {
-+ t.Errorf("got error: %v", err)
-+ }
-+
-+ expectedReplyPrefix := []byte{
-+ // 1. Packet: Hash
-+ 32, 0, 0, 3, 129, 93, 132, 95, 114, 48, 79, 215, 128, 62, 193, 118, 128,
-+ 54, 75, 208, 159, 252, 227, 215, 129, 15, 242, 97, 19, 159, 31, 20, 58,
-+ 153, 9, 130,
-+
-+ // 2. Packet: Pub Key Request
-+ 1, 0, 0, 5, 2,
-+
-+ // 3. Packet: Encrypted Password
-+ 0, 1, 0, 7, // [changing bytes]
-+ }
-+ if !bytes.HasPrefix(conn.written, expectedReplyPrefix) {
-+ t.Errorf("got unexpected data: %v", conn.written)
-+ }
-+}
-+
-+func TestAuthSwitchCachingSHA256PasswordFullRSAWithKey(t *testing.T) {
-+ conn, mc := newRWMockConn(2)
-+ mc.cfg.Passwd = "secret"
-+ mc.cfg.pubKey = testPubKeyRSA
-+
-+ // auth switch request
-+ conn.data = []byte{44, 0, 0, 2, 254, 99, 97, 99, 104, 105, 110, 103, 95,
-+ 115, 104, 97, 50, 95, 112, 97, 115, 115, 119, 111, 114, 100, 0, 101,
-+ 11, 26, 18, 94, 97, 22, 72, 2, 46, 70, 106, 29, 55, 45, 94, 76, 90, 84,
-+ 50, 0}
-+
-+ conn.queuedReplies = [][]byte{
-+ // Perform Full Authentication
-+ {2, 0, 0, 4, 1, 4},
-+
-+ // OK
-+ {7, 0, 0, 6, 0, 0, 0, 2, 0, 0, 0},
-+ }
-+ conn.maxReads = 3
-+
-+ authData := []byte{123, 87, 15, 84, 20, 58, 37, 121, 91, 117, 51, 24, 19,
-+ 47, 43, 9, 41, 112, 67, 110}
-+ plugin := "mysql_native_password"
-+
-+ if err := mc.handleAuthResult(authData, plugin); err != nil {
-+ t.Errorf("got error: %v", err)
-+ }
-+
-+ expectedReplyPrefix := []byte{
-+ // 1. Packet: Hash
-+ 32, 0, 0, 3, 129, 93, 132, 95, 114, 48, 79, 215, 128, 62, 193, 118, 128,
-+ 54, 75, 208, 159, 252, 227, 215, 129, 15, 242, 97, 19, 159, 31, 20, 58,
-+ 153, 9, 130,
-+
-+ // 2. Packet: Encrypted Password
-+ 0, 1, 0, 5, // [changing bytes]
-+ }
-+ if !bytes.HasPrefix(conn.written, expectedReplyPrefix) {
-+ t.Errorf("got unexpected data: %v", conn.written)
-+ }
-+}
-+
-+func TestAuthSwitchCachingSHA256PasswordFullSecure(t *testing.T) {
-+ conn, mc := newRWMockConn(2)
-+ mc.cfg.Passwd = "secret"
-+
-+ // Hack to make the caching_sha2_password plugin believe that the connection
-+ // is secure
-+ mc.cfg.tls = &tls.Config{InsecureSkipVerify: true}
-+
-+ // auth switch request
-+ conn.data = []byte{44, 0, 0, 2, 254, 99, 97, 99, 104, 105, 110, 103, 95,
-+ 115, 104, 97, 50, 95, 112, 97, 115, 115, 119, 111, 114, 100, 0, 101,
-+ 11, 26, 18, 94, 97, 22, 72, 2, 46, 70, 106, 29, 55, 45, 94, 76, 90, 84,
-+ 50, 0}
-+
-+ // auth response
-+ conn.queuedReplies = [][]byte{
-+ {2, 0, 0, 4, 1, 4}, // Perform Full Authentication
-+ {7, 0, 0, 6, 0, 0, 0, 2, 0, 0, 0}, // OK
-+ }
-+ conn.maxReads = 3
-+
-+ authData := []byte{123, 87, 15, 84, 20, 58, 37, 121, 91, 117, 51, 24, 19,
-+ 47, 43, 9, 41, 112, 67, 110}
-+ plugin := "mysql_native_password"
-+
-+ if err := mc.handleAuthResult(authData, plugin); err != nil {
-+ t.Errorf("got error: %v", err)
-+ }
-+
-+ expectedReply := []byte{
-+ // 1. Packet: Hash
-+ 32, 0, 0, 3, 129, 93, 132, 95, 114, 48, 79, 215, 128, 62, 193, 118, 128,
-+ 54, 75, 208, 159, 252, 227, 215, 129, 15, 242, 97, 19, 159, 31, 20, 58,
-+ 153, 9, 130,
-+
-+ // 2. Packet: Cleartext password
-+ 7, 0, 0, 5, 115, 101, 99, 114, 101, 116, 0,
-+ }
-+ if !bytes.Equal(conn.written, expectedReply) {
-+ t.Errorf("got unexpected data: %v", conn.written)
-+ }
-+}
-+
-+func TestAuthSwitchCleartextPasswordNotAllowed(t *testing.T) {
-+ conn, mc := newRWMockConn(2)
-+
-+ conn.data = []byte{22, 0, 0, 2, 254, 109, 121, 115, 113, 108, 95, 99, 108,
-+ 101, 97, 114, 95, 112, 97, 115, 115, 119, 111, 114, 100, 0}
-+ conn.maxReads = 1
-+ authData := []byte{123, 87, 15, 84, 20, 58, 37, 121, 91, 117, 51, 24, 19,
-+ 47, 43, 9, 41, 112, 67, 110}
-+ plugin := "mysql_native_password"
-+ err := mc.handleAuthResult(authData, plugin)
-+ if err != ErrCleartextPassword {
-+ t.Errorf("expected ErrCleartextPassword, got %v", err)
-+ }
-+}
-+
-+func TestAuthSwitchCleartextPassword(t *testing.T) {
-+ conn, mc := newRWMockConn(2)
-+ mc.cfg.AllowCleartextPasswords = true
-+ mc.cfg.Passwd = "secret"
-+
-+ // auth switch request
-+ conn.data = []byte{22, 0, 0, 2, 254, 109, 121, 115, 113, 108, 95, 99, 108,
-+ 101, 97, 114, 95, 112, 97, 115, 115, 119, 111, 114, 100, 0}
-+
-+ // auth response
-+ conn.queuedReplies = [][]byte{{7, 0, 0, 4, 0, 0, 0, 2, 0, 0, 0}}
-+ conn.maxReads = 2
-+
-+ authData := []byte{123, 87, 15, 84, 20, 58, 37, 121, 91, 117, 51, 24, 19,
-+ 47, 43, 9, 41, 112, 67, 110}
-+ plugin := "mysql_native_password"
-+
-+ if err := mc.handleAuthResult(authData, plugin); err != nil {
-+ t.Errorf("got error: %v", err)
-+ }
-+
-+ expectedReply := []byte{7, 0, 0, 3, 115, 101, 99, 114, 101, 116, 0}
-+ if !bytes.Equal(conn.written, expectedReply) {
-+ t.Errorf("got unexpected data: %v", conn.written)
-+ }
-+}
-+
-+func TestAuthSwitchCleartextPasswordEmpty(t *testing.T) {
-+ conn, mc := newRWMockConn(2)
-+ mc.cfg.AllowCleartextPasswords = true
-+ mc.cfg.Passwd = ""
-+
-+ // auth switch request
-+ conn.data = []byte{22, 0, 0, 2, 254, 109, 121, 115, 113, 108, 95, 99, 108,
-+ 101, 97, 114, 95, 112, 97, 115, 115, 119, 111, 114, 100, 0}
-+
-+ // auth response
-+ conn.queuedReplies = [][]byte{{7, 0, 0, 4, 0, 0, 0, 2, 0, 0, 0}}
-+ conn.maxReads = 2
-+
-+ authData := []byte{123, 87, 15, 84, 20, 58, 37, 121, 91, 117, 51, 24, 19,
-+ 47, 43, 9, 41, 112, 67, 110}
-+ plugin := "mysql_native_password"
-+
-+ if err := mc.handleAuthResult(authData, plugin); err != nil {
-+ t.Errorf("got error: %v", err)
-+ }
-+
-+ expectedReply := []byte{1, 0, 0, 3, 0}
-+ if !bytes.Equal(conn.written, expectedReply) {
-+ t.Errorf("got unexpected data: %v", conn.written)
-+ }
-+}
-+
-+func TestAuthSwitchNativePasswordNotAllowed(t *testing.T) {
-+ conn, mc := newRWMockConn(2)
-+ mc.cfg.AllowNativePasswords = false
-+
-+ conn.data = []byte{44, 0, 0, 2, 254, 109, 121, 115, 113, 108, 95, 110, 97,
-+ 116, 105, 118, 101, 95, 112, 97, 115, 115, 119, 111, 114, 100, 0, 96,
-+ 71, 63, 8, 1, 58, 75, 12, 69, 95, 66, 60, 117, 31, 48, 31, 89, 39, 55,
-+ 31, 0}
-+ conn.maxReads = 1
-+ authData := []byte{96, 71, 63, 8, 1, 58, 75, 12, 69, 95, 66, 60, 117, 31,
-+ 48, 31, 89, 39, 55, 31}
-+ plugin := "caching_sha2_password"
-+ err := mc.handleAuthResult(authData, plugin)
-+ if err != ErrNativePassword {
-+ t.Errorf("expected ErrNativePassword, got %v", err)
-+ }
-+}
-+
-+func TestAuthSwitchNativePassword(t *testing.T) {
-+ conn, mc := newRWMockConn(2)
-+ mc.cfg.AllowNativePasswords = true
-+ mc.cfg.Passwd = "secret"
-+
-+ // auth switch request
-+ conn.data = []byte{44, 0, 0, 2, 254, 109, 121, 115, 113, 108, 95, 110, 97,
-+ 116, 105, 118, 101, 95, 112, 97, 115, 115, 119, 111, 114, 100, 0, 96,
-+ 71, 63, 8, 1, 58, 75, 12, 69, 95, 66, 60, 117, 31, 48, 31, 89, 39, 55,
-+ 31, 0}
-+
-+ // auth response
-+ conn.queuedReplies = [][]byte{{7, 0, 0, 4, 0, 0, 0, 2, 0, 0, 0}}
-+ conn.maxReads = 2
-+
-+ authData := []byte{96, 71, 63, 8, 1, 58, 75, 12, 69, 95, 66, 60, 117, 31,
-+ 48, 31, 89, 39, 55, 31}
-+ plugin := "caching_sha2_password"
-+
-+ if err := mc.handleAuthResult(authData, plugin); err != nil {
-+ t.Errorf("got error: %v", err)
-+ }
-+
-+ expectedReply := []byte{20, 0, 0, 3, 202, 41, 195, 164, 34, 226, 49, 103,
-+ 21, 211, 167, 199, 227, 116, 8, 48, 57, 71, 149, 146}
-+ if !bytes.Equal(conn.written, expectedReply) {
-+ t.Errorf("got unexpected data: %v", conn.written)
-+ }
-+}
-+
-+func TestAuthSwitchNativePasswordEmpty(t *testing.T) {
-+ conn, mc := newRWMockConn(2)
-+ mc.cfg.AllowNativePasswords = true
-+ mc.cfg.Passwd = ""
-+
-+ // auth switch request
-+ conn.data = []byte{44, 0, 0, 2, 254, 109, 121, 115, 113, 108, 95, 110, 97,
-+ 116, 105, 118, 101, 95, 112, 97, 115, 115, 119, 111, 114, 100, 0, 96,
-+ 71, 63, 8, 1, 58, 75, 12, 69, 95, 66, 60, 117, 31, 48, 31, 89, 39, 55,
-+ 31, 0}
-+
-+ // auth response
-+ conn.queuedReplies = [][]byte{{7, 0, 0, 4, 0, 0, 0, 2, 0, 0, 0}}
-+ conn.maxReads = 2
-+
-+ authData := []byte{96, 71, 63, 8, 1, 58, 75, 12, 69, 95, 66, 60, 117, 31,
-+ 48, 31, 89, 39, 55, 31}
-+ plugin := "caching_sha2_password"
-+
-+ if err := mc.handleAuthResult(authData, plugin); err != nil {
-+ t.Errorf("got error: %v", err)
-+ }
-+
-+ expectedReply := []byte{0, 0, 0, 3}
-+ if !bytes.Equal(conn.written, expectedReply) {
-+ t.Errorf("got unexpected data: %v", conn.written)
-+ }
-+}
-+
-+func TestAuthSwitchOldPasswordNotAllowed(t *testing.T) {
-+ conn, mc := newRWMockConn(2)
-+
-+ conn.data = []byte{41, 0, 0, 2, 254, 109, 121, 115, 113, 108, 95, 111, 108,
-+ 100, 95, 112, 97, 115, 115, 119, 111, 114, 100, 0, 95, 84, 103, 43, 61,
-+ 49, 123, 61, 91, 50, 40, 113, 35, 84, 96, 101, 92, 123, 121, 107, 0}
-+ conn.maxReads = 1
-+ authData := []byte{95, 84, 103, 43, 61, 49, 123, 61, 91, 50, 40, 113, 35,
-+ 84, 96, 101, 92, 123, 121, 107}
-+ plugin := "mysql_native_password"
-+ err := mc.handleAuthResult(authData, plugin)
-+ if err != ErrOldPassword {
-+ t.Errorf("expected ErrOldPassword, got %v", err)
-+ }
-+}
-+
-+// Same to TestAuthSwitchOldPasswordNotAllowed, but use OldAuthSwitch request.
-+func TestOldAuthSwitchNotAllowed(t *testing.T) {
-+ conn, mc := newRWMockConn(2)
-+
-+ // OldAuthSwitch request
-+ conn.data = []byte{1, 0, 0, 2, 0xfe}
-+ conn.maxReads = 1
-+ authData := []byte{95, 84, 103, 43, 61, 49, 123, 61, 91, 50, 40, 113, 35,
-+ 84, 96, 101, 92, 123, 121, 107}
-+ plugin := "mysql_native_password"
-+ err := mc.handleAuthResult(authData, plugin)
-+ if err != ErrOldPassword {
-+ t.Errorf("expected ErrOldPassword, got %v", err)
-+ }
-+}
-+
-+func TestAuthSwitchOldPassword(t *testing.T) {
-+ conn, mc := newRWMockConn(2)
-+ mc.cfg.AllowOldPasswords = true
-+ mc.cfg.Passwd = "secret"
-+
-+ // auth switch request
-+ conn.data = []byte{41, 0, 0, 2, 254, 109, 121, 115, 113, 108, 95, 111, 108,
-+ 100, 95, 112, 97, 115, 115, 119, 111, 114, 100, 0, 95, 84, 103, 43, 61,
-+ 49, 123, 61, 91, 50, 40, 113, 35, 84, 96, 101, 92, 123, 121, 107, 0}
-+
-+ // auth response
-+ conn.queuedReplies = [][]byte{{8, 0, 0, 4, 0, 0, 0, 2, 0, 0, 0, 0}}
-+ conn.maxReads = 2
-+
-+ authData := []byte{95, 84, 103, 43, 61, 49, 123, 61, 91, 50, 40, 113, 35,
-+ 84, 96, 101, 92, 123, 121, 107}
-+ plugin := "mysql_native_password"
-+
-+ if err := mc.handleAuthResult(authData, plugin); err != nil {
-+ t.Errorf("got error: %v", err)
-+ }
-+
-+ expectedReply := []byte{9, 0, 0, 3, 86, 83, 83, 79, 74, 78, 65, 66, 0}
-+ if !bytes.Equal(conn.written, expectedReply) {
-+ t.Errorf("got unexpected data: %v", conn.written)
-+ }
-+}
-+
-+// Same to TestAuthSwitchOldPassword, but use OldAuthSwitch request.
-+func TestOldAuthSwitch(t *testing.T) {
-+ conn, mc := newRWMockConn(2)
-+ mc.cfg.AllowOldPasswords = true
-+ mc.cfg.Passwd = "secret"
-+
-+ // OldAuthSwitch request
-+ conn.data = []byte{1, 0, 0, 2, 0xfe}
-+
-+ // auth response
-+ conn.queuedReplies = [][]byte{{8, 0, 0, 4, 0, 0, 0, 2, 0, 0, 0, 0}}
-+ conn.maxReads = 2
-+
-+ authData := []byte{95, 84, 103, 43, 61, 49, 123, 61, 91, 50, 40, 113, 35,
-+ 84, 96, 101, 92, 123, 121, 107}
-+ plugin := "mysql_native_password"
-+
-+ if err := mc.handleAuthResult(authData, plugin); err != nil {
-+ t.Errorf("got error: %v", err)
-+ }
-+
-+ expectedReply := []byte{9, 0, 0, 3, 86, 83, 83, 79, 74, 78, 65, 66, 0}
-+ if !bytes.Equal(conn.written, expectedReply) {
-+ t.Errorf("got unexpected data: %v", conn.written)
-+ }
-+}
-+func TestAuthSwitchOldPasswordEmpty(t *testing.T) {
-+ conn, mc := newRWMockConn(2)
-+ mc.cfg.AllowOldPasswords = true
-+ mc.cfg.Passwd = ""
-+
-+ // auth switch request
-+ conn.data = []byte{41, 0, 0, 2, 254, 109, 121, 115, 113, 108, 95, 111, 108,
-+ 100, 95, 112, 97, 115, 115, 119, 111, 114, 100, 0, 95, 84, 103, 43, 61,
-+ 49, 123, 61, 91, 50, 40, 113, 35, 84, 96, 101, 92, 123, 121, 107, 0}
-+
-+ // auth response
-+ conn.queuedReplies = [][]byte{{8, 0, 0, 4, 0, 0, 0, 2, 0, 0, 0, 0}}
-+ conn.maxReads = 2
-+
-+ authData := []byte{95, 84, 103, 43, 61, 49, 123, 61, 91, 50, 40, 113, 35,
-+ 84, 96, 101, 92, 123, 121, 107}
-+ plugin := "mysql_native_password"
-+
-+ if err := mc.handleAuthResult(authData, plugin); err != nil {
-+ t.Errorf("got error: %v", err)
-+ }
-+
-+ expectedReply := []byte{1, 0, 0, 3, 0}
-+ if !bytes.Equal(conn.written, expectedReply) {
-+ t.Errorf("got unexpected data: %v", conn.written)
-+ }
-+}
-+
-+// Same to TestAuthSwitchOldPasswordEmpty, but use OldAuthSwitch request.
-+func TestOldAuthSwitchPasswordEmpty(t *testing.T) {
-+ conn, mc := newRWMockConn(2)
-+ mc.cfg.AllowOldPasswords = true
-+ mc.cfg.Passwd = ""
-+
-+ // OldAuthSwitch request.
-+ conn.data = []byte{1, 0, 0, 2, 0xfe}
-+
-+ // auth response
-+ conn.queuedReplies = [][]byte{{8, 0, 0, 4, 0, 0, 0, 2, 0, 0, 0, 0}}
-+ conn.maxReads = 2
-+
-+ authData := []byte{95, 84, 103, 43, 61, 49, 123, 61, 91, 50, 40, 113, 35,
-+ 84, 96, 101, 92, 123, 121, 107}
-+ plugin := "mysql_native_password"
-+
-+ if err := mc.handleAuthResult(authData, plugin); err != nil {
-+ t.Errorf("got error: %v", err)
-+ }
-+
-+ expectedReply := []byte{1, 0, 0, 3, 0}
-+ if !bytes.Equal(conn.written, expectedReply) {
-+ t.Errorf("got unexpected data: %v", conn.written)
-+ }
-+}
-+
-+func TestAuthSwitchSHA256PasswordEmpty(t *testing.T) {
-+ conn, mc := newRWMockConn(2)
-+ mc.cfg.Passwd = ""
-+
-+ // auth switch request
-+ conn.data = []byte{38, 0, 0, 2, 254, 115, 104, 97, 50, 53, 54, 95, 112, 97,
-+ 115, 115, 119, 111, 114, 100, 0, 78, 82, 62, 40, 100, 1, 59, 31, 44, 69,
-+ 33, 112, 8, 81, 51, 96, 65, 82, 16, 114, 0}
-+
-+ conn.queuedReplies = [][]byte{
-+ // OK
-+ {7, 0, 0, 4, 0, 0, 0, 2, 0, 0, 0},
-+ }
-+ conn.maxReads = 3
-+
-+ authData := []byte{123, 87, 15, 84, 20, 58, 37, 121, 91, 117, 51, 24, 19,
-+ 47, 43, 9, 41, 112, 67, 110}
-+ plugin := "mysql_native_password"
-+
-+ if err := mc.handleAuthResult(authData, plugin); err != nil {
-+ t.Errorf("got error: %v", err)
-+ }
-+
-+ expectedReplyPrefix := []byte{
-+ // 1. Packet: Empty Password
-+ 1, 0, 0, 3, 0,
-+ }
-+ if !bytes.HasPrefix(conn.written, expectedReplyPrefix) {
-+ t.Errorf("got unexpected data: %v", conn.written)
-+ }
-+}
-+
-+func TestAuthSwitchSHA256PasswordRSA(t *testing.T) {
-+ conn, mc := newRWMockConn(2)
-+ mc.cfg.Passwd = "secret"
-+
-+ // auth switch request
-+ conn.data = []byte{38, 0, 0, 2, 254, 115, 104, 97, 50, 53, 54, 95, 112, 97,
-+ 115, 115, 119, 111, 114, 100, 0, 78, 82, 62, 40, 100, 1, 59, 31, 44, 69,
-+ 33, 112, 8, 81, 51, 96, 65, 82, 16, 114, 0}
-+
-+ conn.queuedReplies = [][]byte{
-+ // Pub Key Response
-+ append([]byte{byte(1 + len(testPubKey)), 1, 0, 4, 1}, testPubKey...),
-+
-+ // OK
-+ {7, 0, 0, 6, 0, 0, 0, 2, 0, 0, 0},
-+ }
-+ conn.maxReads = 3
-+
-+ authData := []byte{123, 87, 15, 84, 20, 58, 37, 121, 91, 117, 51, 24, 19,
-+ 47, 43, 9, 41, 112, 67, 110}
-+ plugin := "mysql_native_password"
-+
-+ if err := mc.handleAuthResult(authData, plugin); err != nil {
-+ t.Errorf("got error: %v", err)
-+ }
-+
-+ expectedReplyPrefix := []byte{
-+ // 1. Packet: Pub Key Request
-+ 1, 0, 0, 3, 1,
-+
-+ // 2. Packet: Encrypted Password
-+ 0, 1, 0, 5, // [changing bytes]
-+ }
-+ if !bytes.HasPrefix(conn.written, expectedReplyPrefix) {
-+ t.Errorf("got unexpected data: %v", conn.written)
-+ }
-+}
-+
-+func TestAuthSwitchSHA256PasswordRSAWithKey(t *testing.T) {
-+ conn, mc := newRWMockConn(2)
-+ mc.cfg.Passwd = "secret"
-+ mc.cfg.pubKey = testPubKeyRSA
-+
-+ // auth switch request
-+ conn.data = []byte{38, 0, 0, 2, 254, 115, 104, 97, 50, 53, 54, 95, 112, 97,
-+ 115, 115, 119, 111, 114, 100, 0, 78, 82, 62, 40, 100, 1, 59, 31, 44, 69,
-+ 33, 112, 8, 81, 51, 96, 65, 82, 16, 114, 0}
-+
-+ conn.queuedReplies = [][]byte{
-+ // OK
-+ {7, 0, 0, 4, 0, 0, 0, 2, 0, 0, 0},
-+ }
-+ conn.maxReads = 2
-+
-+ authData := []byte{123, 87, 15, 84, 20, 58, 37, 121, 91, 117, 51, 24, 19,
-+ 47, 43, 9, 41, 112, 67, 110}
-+ plugin := "mysql_native_password"
-+
-+ if err := mc.handleAuthResult(authData, plugin); err != nil {
-+ t.Errorf("got error: %v", err)
-+ }
-+
-+ expectedReplyPrefix := []byte{
-+ // 1. Packet: Encrypted Password
-+ 0, 1, 0, 3, // [changing bytes]
-+ }
-+ if !bytes.HasPrefix(conn.written, expectedReplyPrefix) {
-+ t.Errorf("got unexpected data: %v", conn.written)
-+ }
-+}
-+
-+func TestAuthSwitchSHA256PasswordSecure(t *testing.T) {
-+ conn, mc := newRWMockConn(2)
-+ mc.cfg.Passwd = "secret"
-+
-+ // Hack to make the caching_sha2_password plugin believe that the connection
-+ // is secure
-+ mc.cfg.tls = &tls.Config{InsecureSkipVerify: true}
-+
-+ // auth switch request
-+ conn.data = []byte{38, 0, 0, 2, 254, 115, 104, 97, 50, 53, 54, 95, 112, 97,
-+ 115, 115, 119, 111, 114, 100, 0, 78, 82, 62, 40, 100, 1, 59, 31, 44, 69,
-+ 33, 112, 8, 81, 51, 96, 65, 82, 16, 114, 0}
-+
-+ conn.queuedReplies = [][]byte{
-+ // OK
-+ {7, 0, 0, 4, 0, 0, 0, 2, 0, 0, 0},
-+ }
-+ conn.maxReads = 2
-+
-+ authData := []byte{123, 87, 15, 84, 20, 58, 37, 121, 91, 117, 51, 24, 19,
-+ 47, 43, 9, 41, 112, 67, 110}
-+ plugin := "mysql_native_password"
-+
-+ if err := mc.handleAuthResult(authData, plugin); err != nil {
-+ t.Errorf("got error: %v", err)
-+ }
-+
-+ expectedReplyPrefix := []byte{
-+ // 1. Packet: Cleartext Password
-+ 7, 0, 0, 3, 115, 101, 99, 114, 101, 116, 0,
-+ }
-+ if !bytes.Equal(conn.written, expectedReplyPrefix) {
-+ t.Errorf("got unexpected data: %v", conn.written)
-+ }
-+}
-diff --git a/vendor/github.com/go-sql-driver/mysql/benchmark_test.go b/vendor/github.com/go-sql-driver/mysql/benchmark_test.go
-new file mode 100644
-index 00000000000..3e25a3bf252
---- /dev/null
-+++ b/vendor/github.com/go-sql-driver/mysql/benchmark_test.go
-@@ -0,0 +1,373 @@
-+// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
-+//
-+// Copyright 2013 The Go-MySQL-Driver Authors. All rights reserved.
-+//
-+// This Source Code Form is subject to the terms of the Mozilla Public
-+// License, v. 2.0. If a copy of the MPL was not distributed with this file,
-+// You can obtain one at http://mozilla.org/MPL/2.0/.
-+
-+package mysql
-+
-+import (
-+ "bytes"
-+ "context"
-+ "database/sql"
-+ "database/sql/driver"
-+ "fmt"
-+ "math"
-+ "runtime"
-+ "strings"
-+ "sync"
-+ "sync/atomic"
-+ "testing"
-+ "time"
-+)
-+
-+type TB testing.B
-+
-+func (tb *TB) check(err error) {
-+ if err != nil {
-+ tb.Fatal(err)
-+ }
-+}
-+
-+func (tb *TB) checkDB(db *sql.DB, err error) *sql.DB {
-+ tb.check(err)
-+ return db
-+}
-+
-+func (tb *TB) checkRows(rows *sql.Rows, err error) *sql.Rows {
-+ tb.check(err)
-+ return rows
-+}
-+
-+func (tb *TB) checkStmt(stmt *sql.Stmt, err error) *sql.Stmt {
-+ tb.check(err)
-+ return stmt
-+}
-+
-+func initDB(b *testing.B, queries ...string) *sql.DB {
-+ tb := (*TB)(b)
-+ db := tb.checkDB(sql.Open("mysql", dsn))
-+ for _, query := range queries {
-+ if _, err := db.Exec(query); err != nil {
-+ b.Fatalf("error on %q: %v", query, err)
-+ }
-+ }
-+ return db
-+}
-+
-+const concurrencyLevel = 10
-+
-+func BenchmarkQuery(b *testing.B) {
-+ tb := (*TB)(b)
-+ b.StopTimer()
-+ b.ReportAllocs()
-+ db := initDB(b,
-+ "DROP TABLE IF EXISTS foo",
-+ "CREATE TABLE foo (id INT PRIMARY KEY, val CHAR(50))",
-+ `INSERT INTO foo VALUES (1, "one")`,
-+ `INSERT INTO foo VALUES (2, "two")`,
-+ )
-+ db.SetMaxIdleConns(concurrencyLevel)
-+ defer db.Close()
-+
-+ stmt := tb.checkStmt(db.Prepare("SELECT val FROM foo WHERE id=?"))
-+ defer stmt.Close()
-+
-+ remain := int64(b.N)
-+ var wg sync.WaitGroup
-+ wg.Add(concurrencyLevel)
-+ defer wg.Wait()
-+ b.StartTimer()
-+
-+ for i := 0; i < concurrencyLevel; i++ {
-+ go func() {
-+ for {
-+ if atomic.AddInt64(&remain, -1) < 0 {
-+ wg.Done()
-+ return
-+ }
-+
-+ var got string
-+ tb.check(stmt.QueryRow(1).Scan(&got))
-+ if got != "one" {
-+ b.Errorf("query = %q; want one", got)
-+ wg.Done()
-+ return
-+ }
-+ }
-+ }()
-+ }
-+}
-+
-+func BenchmarkExec(b *testing.B) {
-+ tb := (*TB)(b)
-+ b.StopTimer()
-+ b.ReportAllocs()
-+ db := tb.checkDB(sql.Open("mysql", dsn))
-+ db.SetMaxIdleConns(concurrencyLevel)
-+ defer db.Close()
-+
-+ stmt := tb.checkStmt(db.Prepare("DO 1"))
-+ defer stmt.Close()
-+
-+ remain := int64(b.N)
-+ var wg sync.WaitGroup
-+ wg.Add(concurrencyLevel)
-+ defer wg.Wait()
-+ b.StartTimer()
-+
-+ for i := 0; i < concurrencyLevel; i++ {
-+ go func() {
-+ for {
-+ if atomic.AddInt64(&remain, -1) < 0 {
-+ wg.Done()
-+ return
-+ }
-+
-+ if _, err := stmt.Exec(); err != nil {
-+ b.Fatal(err.Error())
-+ }
-+ }
-+ }()
-+ }
-+}
-+
-+// data, but no db writes
-+var roundtripSample []byte
-+
-+func initRoundtripBenchmarks() ([]byte, int, int) {
-+ if roundtripSample == nil {
-+ roundtripSample = []byte(strings.Repeat("0123456789abcdef", 1024*1024))
-+ }
-+ return roundtripSample, 16, len(roundtripSample)
-+}
-+
-+func BenchmarkRoundtripTxt(b *testing.B) {
-+ b.StopTimer()
-+ sample, min, max := initRoundtripBenchmarks()
-+ sampleString := string(sample)
-+ b.ReportAllocs()
-+ tb := (*TB)(b)
-+ db := tb.checkDB(sql.Open("mysql", dsn))
-+ defer db.Close()
-+ b.StartTimer()
-+ var result string
-+ for i := 0; i < b.N; i++ {
-+ length := min + i
-+ if length > max {
-+ length = max
-+ }
-+ test := sampleString[0:length]
-+ rows := tb.checkRows(db.Query(`SELECT "` + test + `"`))
-+ if !rows.Next() {
-+ rows.Close()
-+ b.Fatalf("crashed")
-+ }
-+ err := rows.Scan(&result)
-+ if err != nil {
-+ rows.Close()
-+ b.Fatalf("crashed")
-+ }
-+ if result != test {
-+ rows.Close()
-+ b.Errorf("mismatch")
-+ }
-+ rows.Close()
-+ }
-+}
-+
-+func BenchmarkRoundtripBin(b *testing.B) {
-+ b.StopTimer()
-+ sample, min, max := initRoundtripBenchmarks()
-+ b.ReportAllocs()
-+ tb := (*TB)(b)
-+ db := tb.checkDB(sql.Open("mysql", dsn))
-+ defer db.Close()
-+ stmt := tb.checkStmt(db.Prepare("SELECT ?"))
-+ defer stmt.Close()
-+ b.StartTimer()
-+ var result sql.RawBytes
-+ for i := 0; i < b.N; i++ {
-+ length := min + i
-+ if length > max {
-+ length = max
-+ }
-+ test := sample[0:length]
-+ rows := tb.checkRows(stmt.Query(test))
-+ if !rows.Next() {
-+ rows.Close()
-+ b.Fatalf("crashed")
-+ }
-+ err := rows.Scan(&result)
-+ if err != nil {
-+ rows.Close()
-+ b.Fatalf("crashed")
-+ }
-+ if !bytes.Equal(result, test) {
-+ rows.Close()
-+ b.Errorf("mismatch")
-+ }
-+ rows.Close()
-+ }
-+}
-+
-+func BenchmarkInterpolation(b *testing.B) {
-+ mc := &mysqlConn{
-+ cfg: &Config{
-+ InterpolateParams: true,
-+ Loc: time.UTC,
-+ },
-+ maxAllowedPacket: maxPacketSize,
-+ maxWriteSize: maxPacketSize - 1,
-+ buf: newBuffer(nil),
-+ }
-+
-+ args := []driver.Value{
-+ int64(42424242),
-+ float64(math.Pi),
-+ false,
-+ time.Unix(1423411542, 807015000),
-+ []byte("bytes containing special chars ' \" \a \x00"),
-+ "string containing special chars ' \" \a \x00",
-+ }
-+ q := "SELECT ?, ?, ?, ?, ?, ?"
-+
-+ b.ReportAllocs()
-+ b.ResetTimer()
-+ for i := 0; i < b.N; i++ {
-+ _, err := mc.interpolateParams(q, args)
-+ if err != nil {
-+ b.Fatal(err)
-+ }
-+ }
-+}
-+
-+func benchmarkQueryContext(b *testing.B, db *sql.DB, p int) {
-+ ctx, cancel := context.WithCancel(context.Background())
-+ defer cancel()
-+ db.SetMaxIdleConns(p * runtime.GOMAXPROCS(0))
-+
-+ tb := (*TB)(b)
-+ stmt := tb.checkStmt(db.PrepareContext(ctx, "SELECT val FROM foo WHERE id=?"))
-+ defer stmt.Close()
-+
-+ b.SetParallelism(p)
-+ b.ReportAllocs()
-+ b.ResetTimer()
-+ b.RunParallel(func(pb *testing.PB) {
-+ var got string
-+ for pb.Next() {
-+ tb.check(stmt.QueryRow(1).Scan(&got))
-+ if got != "one" {
-+ b.Fatalf("query = %q; want one", got)
-+ }
-+ }
-+ })
-+}
-+
-+func BenchmarkQueryContext(b *testing.B) {
-+ db := initDB(b,
-+ "DROP TABLE IF EXISTS foo",
-+ "CREATE TABLE foo (id INT PRIMARY KEY, val CHAR(50))",
-+ `INSERT INTO foo VALUES (1, "one")`,
-+ `INSERT INTO foo VALUES (2, "two")`,
-+ )
-+ defer db.Close()
-+ for _, p := range []int{1, 2, 3, 4} {
-+ b.Run(fmt.Sprintf("%d", p), func(b *testing.B) {
-+ benchmarkQueryContext(b, db, p)
-+ })
-+ }
-+}
-+
-+func benchmarkExecContext(b *testing.B, db *sql.DB, p int) {
-+ ctx, cancel := context.WithCancel(context.Background())
-+ defer cancel()
-+ db.SetMaxIdleConns(p * runtime.GOMAXPROCS(0))
-+
-+ tb := (*TB)(b)
-+ stmt := tb.checkStmt(db.PrepareContext(ctx, "DO 1"))
-+ defer stmt.Close()
-+
-+ b.SetParallelism(p)
-+ b.ReportAllocs()
-+ b.ResetTimer()
-+ b.RunParallel(func(pb *testing.PB) {
-+ for pb.Next() {
-+ if _, err := stmt.ExecContext(ctx); err != nil {
-+ b.Fatal(err)
-+ }
-+ }
-+ })
-+}
-+
-+func BenchmarkExecContext(b *testing.B) {
-+ db := initDB(b,
-+ "DROP TABLE IF EXISTS foo",
-+ "CREATE TABLE foo (id INT PRIMARY KEY, val CHAR(50))",
-+ `INSERT INTO foo VALUES (1, "one")`,
-+ `INSERT INTO foo VALUES (2, "two")`,
-+ )
-+ defer db.Close()
-+ for _, p := range []int{1, 2, 3, 4} {
-+ b.Run(fmt.Sprintf("%d", p), func(b *testing.B) {
-+ benchmarkQueryContext(b, db, p)
-+ })
-+ }
-+}
-+
-+// BenchmarkQueryRawBytes benchmarks fetching 100 blobs using sql.RawBytes.
-+// "size=" means size of each blobs.
-+func BenchmarkQueryRawBytes(b *testing.B) {
-+ var sizes []int = []int{100, 1000, 2000, 4000, 8000, 12000, 16000, 32000, 64000, 256000}
-+ db := initDB(b,
-+ "DROP TABLE IF EXISTS bench_rawbytes",
-+ "CREATE TABLE bench_rawbytes (id INT PRIMARY KEY, val LONGBLOB)",
-+ )
-+ defer db.Close()
-+
-+ blob := make([]byte, sizes[len(sizes)-1])
-+ for i := range blob {
-+ blob[i] = 42
-+ }
-+ for i := 0; i < 100; i++ {
-+ _, err := db.Exec("INSERT INTO bench_rawbytes VALUES (?, ?)", i, blob)
-+ if err != nil {
-+ b.Fatal(err)
-+ }
-+ }
-+
-+ for _, s := range sizes {
-+ b.Run(fmt.Sprintf("size=%v", s), func(b *testing.B) {
-+ db.SetMaxIdleConns(0)
-+ db.SetMaxIdleConns(1)
-+ b.ReportAllocs()
-+ b.ResetTimer()
-+
-+ for j := 0; j < b.N; j++ {
-+ rows, err := db.Query("SELECT LEFT(val, ?) as v FROM bench_rawbytes", s)
-+ if err != nil {
-+ b.Fatal(err)
-+ }
-+ nrows := 0
-+ for rows.Next() {
-+ var buf sql.RawBytes
-+ err := rows.Scan(&buf)
-+ if err != nil {
-+ b.Fatal(err)
-+ }
-+ if len(buf) != s {
-+ b.Fatalf("size mismatch: expected %v, got %v", s, len(buf))
-+ }
-+ nrows++
-+ }
-+ rows.Close()
-+ if nrows != 100 {
-+ b.Fatalf("numbers of rows mismatch: expected %v, got %v", 100, nrows)
-+ }
-+ }
-+ })
-+ }
-+}
-diff --git a/vendor/github.com/go-sql-driver/mysql/buffer.go b/vendor/github.com/go-sql-driver/mysql/buffer.go
-new file mode 100644
-index 00000000000..0774c5c8c24
---- /dev/null
-+++ b/vendor/github.com/go-sql-driver/mysql/buffer.go
-@@ -0,0 +1,182 @@
-+// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
-+//
-+// Copyright 2013 The Go-MySQL-Driver Authors. All rights reserved.
-+//
-+// This Source Code Form is subject to the terms of the Mozilla Public
-+// License, v. 2.0. If a copy of the MPL was not distributed with this file,
-+// You can obtain one at http://mozilla.org/MPL/2.0/.
-+
-+package mysql
-+
-+import (
-+ "io"
-+ "net"
-+ "time"
-+)
-+
-+const defaultBufSize = 4096
-+const maxCachedBufSize = 256 * 1024
-+
-+// A buffer which is used for both reading and writing.
-+// This is possible since communication on each connection is synchronous.
-+// In other words, we can't write and read simultaneously on the same connection.
-+// The buffer is similar to bufio.Reader / Writer but zero-copy-ish
-+// Also highly optimized for this particular use case.
-+// This buffer is backed by two byte slices in a double-buffering scheme
-+type buffer struct {
-+ buf []byte // buf is a byte buffer who's length and capacity are equal.
-+ nc net.Conn
-+ idx int
-+ length int
-+ timeout time.Duration
-+ dbuf [2][]byte // dbuf is an array with the two byte slices that back this buffer
-+ flipcnt uint // flipccnt is the current buffer counter for double-buffering
-+}
-+
-+// newBuffer allocates and returns a new buffer.
-+func newBuffer(nc net.Conn) buffer {
-+ fg := make([]byte, defaultBufSize)
-+ return buffer{
-+ buf: fg,
-+ nc: nc,
-+ dbuf: [2][]byte{fg, nil},
-+ }
-+}
-+
-+// flip replaces the active buffer with the background buffer
-+// this is a delayed flip that simply increases the buffer counter;
-+// the actual flip will be performed the next time we call `buffer.fill`
-+func (b *buffer) flip() {
-+ b.flipcnt += 1
-+}
-+
-+// fill reads into the buffer until at least _need_ bytes are in it
-+func (b *buffer) fill(need int) error {
-+ n := b.length
-+ // fill data into its double-buffering target: if we've called
-+ // flip on this buffer, we'll be copying to the background buffer,
-+ // and then filling it with network data; otherwise we'll just move
-+ // the contents of the current buffer to the front before filling it
-+ dest := b.dbuf[b.flipcnt&1]
-+
-+ // grow buffer if necessary to fit the whole packet.
-+ if need > len(dest) {
-+ // Round up to the next multiple of the default size
-+ dest = make([]byte, ((need/defaultBufSize)+1)*defaultBufSize)
-+
-+ // if the allocated buffer is not too large, move it to backing storage
-+ // to prevent extra allocations on applications that perform large reads
-+ if len(dest) <= maxCachedBufSize {
-+ b.dbuf[b.flipcnt&1] = dest
-+ }
-+ }
-+
-+ // if we're filling the fg buffer, move the existing data to the start of it.
-+ // if we're filling the bg buffer, copy over the data
-+ if n > 0 {
-+ copy(dest[:n], b.buf[b.idx:])
-+ }
-+
-+ b.buf = dest
-+ b.idx = 0
-+
-+ for {
-+ if b.timeout > 0 {
-+ if err := b.nc.SetReadDeadline(time.Now().Add(b.timeout)); err != nil {
-+ return err
-+ }
-+ }
-+
-+ nn, err := b.nc.Read(b.buf[n:])
-+ n += nn
-+
-+ switch err {
-+ case nil:
-+ if n < need {
-+ continue
-+ }
-+ b.length = n
-+ return nil
-+
-+ case io.EOF:
-+ if n >= need {
-+ b.length = n
-+ return nil
-+ }
-+ return io.ErrUnexpectedEOF
-+
-+ default:
-+ return err
-+ }
-+ }
-+}
-+
-+// returns next N bytes from buffer.
-+// The returned slice is only guaranteed to be valid until the next read
-+func (b *buffer) readNext(need int) ([]byte, error) {
-+ if b.length < need {
-+ // refill
-+ if err := b.fill(need); err != nil {
-+ return nil, err
-+ }
-+ }
-+
-+ offset := b.idx
-+ b.idx += need
-+ b.length -= need
-+ return b.buf[offset:b.idx], nil
-+}
-+
-+// takeBuffer returns a buffer with the requested size.
-+// If possible, a slice from the existing buffer is returned.
-+// Otherwise a bigger buffer is made.
-+// Only one buffer (total) can be used at a time.
-+func (b *buffer) takeBuffer(length int) ([]byte, error) {
-+ if b.length > 0 {
-+ return nil, ErrBusyBuffer
-+ }
-+
-+ // test (cheap) general case first
-+ if length <= cap(b.buf) {
-+ return b.buf[:length], nil
-+ }
-+
-+ if length < maxPacketSize {
-+ b.buf = make([]byte, length)
-+ return b.buf, nil
-+ }
-+
-+ // buffer is larger than we want to store.
-+ return make([]byte, length), nil
-+}
-+
-+// takeSmallBuffer is shortcut which can be used if length is
-+// known to be smaller than defaultBufSize.
-+// Only one buffer (total) can be used at a time.
-+func (b *buffer) takeSmallBuffer(length int) ([]byte, error) {
-+ if b.length > 0 {
-+ return nil, ErrBusyBuffer
-+ }
-+ return b.buf[:length], nil
-+}
-+
-+// takeCompleteBuffer returns the complete existing buffer.
-+// This can be used if the necessary buffer size is unknown.
-+// cap and len of the returned buffer will be equal.
-+// Only one buffer (total) can be used at a time.
-+func (b *buffer) takeCompleteBuffer() ([]byte, error) {
-+ if b.length > 0 {
-+ return nil, ErrBusyBuffer
-+ }
-+ return b.buf, nil
-+}
-+
-+// store stores buf, an updated buffer, if its suitable to do so.
-+func (b *buffer) store(buf []byte) error {
-+ if b.length > 0 {
-+ return ErrBusyBuffer
-+ } else if cap(buf) <= maxPacketSize && cap(buf) > cap(b.buf) {
-+ b.buf = buf[:cap(buf)]
-+ }
-+ return nil
-+}
-diff --git a/vendor/github.com/go-sql-driver/mysql/collations.go b/vendor/github.com/go-sql-driver/mysql/collations.go
-new file mode 100644
-index 00000000000..8d2b5567679
---- /dev/null
-+++ b/vendor/github.com/go-sql-driver/mysql/collations.go
-@@ -0,0 +1,265 @@
-+// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
-+//
-+// Copyright 2014 The Go-MySQL-Driver Authors. All rights reserved.
-+//
-+// This Source Code Form is subject to the terms of the Mozilla Public
-+// License, v. 2.0. If a copy of the MPL was not distributed with this file,
-+// You can obtain one at http://mozilla.org/MPL/2.0/.
-+
-+package mysql
-+
-+const defaultCollation = "utf8mb4_general_ci"
-+const binaryCollation = "binary"
-+
-+// A list of available collations mapped to the internal ID.
-+// To update this map use the following MySQL query:
-+// SELECT COLLATION_NAME, ID FROM information_schema.COLLATIONS WHERE ID<256 ORDER BY ID
-+//
-+// Handshake packet have only 1 byte for collation_id. So we can't use collations with ID > 255.
-+//
-+// ucs2, utf16, and utf32 can't be used for connection charset.
-+// https://dev.mysql.com/doc/refman/5.7/en/charset-connection.html#charset-connection-impermissible-client-charset
-+// They are commented out to reduce this map.
-+var collations = map[string]byte{
-+ "big5_chinese_ci": 1,
-+ "latin2_czech_cs": 2,
-+ "dec8_swedish_ci": 3,
-+ "cp850_general_ci": 4,
-+ "latin1_german1_ci": 5,
-+ "hp8_english_ci": 6,
-+ "koi8r_general_ci": 7,
-+ "latin1_swedish_ci": 8,
-+ "latin2_general_ci": 9,
-+ "swe7_swedish_ci": 10,
-+ "ascii_general_ci": 11,
-+ "ujis_japanese_ci": 12,
-+ "sjis_japanese_ci": 13,
-+ "cp1251_bulgarian_ci": 14,
-+ "latin1_danish_ci": 15,
-+ "hebrew_general_ci": 16,
-+ "tis620_thai_ci": 18,
-+ "euckr_korean_ci": 19,
-+ "latin7_estonian_cs": 20,
-+ "latin2_hungarian_ci": 21,
-+ "koi8u_general_ci": 22,
-+ "cp1251_ukrainian_ci": 23,
-+ "gb2312_chinese_ci": 24,
-+ "greek_general_ci": 25,
-+ "cp1250_general_ci": 26,
-+ "latin2_croatian_ci": 27,
-+ "gbk_chinese_ci": 28,
-+ "cp1257_lithuanian_ci": 29,
-+ "latin5_turkish_ci": 30,
-+ "latin1_german2_ci": 31,
-+ "armscii8_general_ci": 32,
-+ "utf8_general_ci": 33,
-+ "cp1250_czech_cs": 34,
-+ //"ucs2_general_ci": 35,
-+ "cp866_general_ci": 36,
-+ "keybcs2_general_ci": 37,
-+ "macce_general_ci": 38,
-+ "macroman_general_ci": 39,
-+ "cp852_general_ci": 40,
-+ "latin7_general_ci": 41,
-+ "latin7_general_cs": 42,
-+ "macce_bin": 43,
-+ "cp1250_croatian_ci": 44,
-+ "utf8mb4_general_ci": 45,
-+ "utf8mb4_bin": 46,
-+ "latin1_bin": 47,
-+ "latin1_general_ci": 48,
-+ "latin1_general_cs": 49,
-+ "cp1251_bin": 50,
-+ "cp1251_general_ci": 51,
-+ "cp1251_general_cs": 52,
-+ "macroman_bin": 53,
-+ //"utf16_general_ci": 54,
-+ //"utf16_bin": 55,
-+ //"utf16le_general_ci": 56,
-+ "cp1256_general_ci": 57,
-+ "cp1257_bin": 58,
-+ "cp1257_general_ci": 59,
-+ //"utf32_general_ci": 60,
-+ //"utf32_bin": 61,
-+ //"utf16le_bin": 62,
-+ "binary": 63,
-+ "armscii8_bin": 64,
-+ "ascii_bin": 65,
-+ "cp1250_bin": 66,
-+ "cp1256_bin": 67,
-+ "cp866_bin": 68,
-+ "dec8_bin": 69,
-+ "greek_bin": 70,
-+ "hebrew_bin": 71,
-+ "hp8_bin": 72,
-+ "keybcs2_bin": 73,
-+ "koi8r_bin": 74,
-+ "koi8u_bin": 75,
-+ "utf8_tolower_ci": 76,
-+ "latin2_bin": 77,
-+ "latin5_bin": 78,
-+ "latin7_bin": 79,
-+ "cp850_bin": 80,
-+ "cp852_bin": 81,
-+ "swe7_bin": 82,
-+ "utf8_bin": 83,
-+ "big5_bin": 84,
-+ "euckr_bin": 85,
-+ "gb2312_bin": 86,
-+ "gbk_bin": 87,
-+ "sjis_bin": 88,
-+ "tis620_bin": 89,
-+ //"ucs2_bin": 90,
-+ "ujis_bin": 91,
-+ "geostd8_general_ci": 92,
-+ "geostd8_bin": 93,
-+ "latin1_spanish_ci": 94,
-+ "cp932_japanese_ci": 95,
-+ "cp932_bin": 96,
-+ "eucjpms_japanese_ci": 97,
-+ "eucjpms_bin": 98,
-+ "cp1250_polish_ci": 99,
-+ //"utf16_unicode_ci": 101,
-+ //"utf16_icelandic_ci": 102,
-+ //"utf16_latvian_ci": 103,
-+ //"utf16_romanian_ci": 104,
-+ //"utf16_slovenian_ci": 105,
-+ //"utf16_polish_ci": 106,
-+ //"utf16_estonian_ci": 107,
-+ //"utf16_spanish_ci": 108,
-+ //"utf16_swedish_ci": 109,
-+ //"utf16_turkish_ci": 110,
-+ //"utf16_czech_ci": 111,
-+ //"utf16_danish_ci": 112,
-+ //"utf16_lithuanian_ci": 113,
-+ //"utf16_slovak_ci": 114,
-+ //"utf16_spanish2_ci": 115,
-+ //"utf16_roman_ci": 116,
-+ //"utf16_persian_ci": 117,
-+ //"utf16_esperanto_ci": 118,
-+ //"utf16_hungarian_ci": 119,
-+ //"utf16_sinhala_ci": 120,
-+ //"utf16_german2_ci": 121,
-+ //"utf16_croatian_ci": 122,
-+ //"utf16_unicode_520_ci": 123,
-+ //"utf16_vietnamese_ci": 124,
-+ //"ucs2_unicode_ci": 128,
-+ //"ucs2_icelandic_ci": 129,
-+ //"ucs2_latvian_ci": 130,
-+ //"ucs2_romanian_ci": 131,
-+ //"ucs2_slovenian_ci": 132,
-+ //"ucs2_polish_ci": 133,
-+ //"ucs2_estonian_ci": 134,
-+ //"ucs2_spanish_ci": 135,
-+ //"ucs2_swedish_ci": 136,
-+ //"ucs2_turkish_ci": 137,
-+ //"ucs2_czech_ci": 138,
-+ //"ucs2_danish_ci": 139,
-+ //"ucs2_lithuanian_ci": 140,
-+ //"ucs2_slovak_ci": 141,
-+ //"ucs2_spanish2_ci": 142,
-+ //"ucs2_roman_ci": 143,
-+ //"ucs2_persian_ci": 144,
-+ //"ucs2_esperanto_ci": 145,
-+ //"ucs2_hungarian_ci": 146,
-+ //"ucs2_sinhala_ci": 147,
-+ //"ucs2_german2_ci": 148,
-+ //"ucs2_croatian_ci": 149,
-+ //"ucs2_unicode_520_ci": 150,
-+ //"ucs2_vietnamese_ci": 151,
-+ //"ucs2_general_mysql500_ci": 159,
-+ //"utf32_unicode_ci": 160,
-+ //"utf32_icelandic_ci": 161,
-+ //"utf32_latvian_ci": 162,
-+ //"utf32_romanian_ci": 163,
-+ //"utf32_slovenian_ci": 164,
-+ //"utf32_polish_ci": 165,
-+ //"utf32_estonian_ci": 166,
-+ //"utf32_spanish_ci": 167,
-+ //"utf32_swedish_ci": 168,
-+ //"utf32_turkish_ci": 169,
-+ //"utf32_czech_ci": 170,
-+ //"utf32_danish_ci": 171,
-+ //"utf32_lithuanian_ci": 172,
-+ //"utf32_slovak_ci": 173,
-+ //"utf32_spanish2_ci": 174,
-+ //"utf32_roman_ci": 175,
-+ //"utf32_persian_ci": 176,
-+ //"utf32_esperanto_ci": 177,
-+ //"utf32_hungarian_ci": 178,
-+ //"utf32_sinhala_ci": 179,
-+ //"utf32_german2_ci": 180,
-+ //"utf32_croatian_ci": 181,
-+ //"utf32_unicode_520_ci": 182,
-+ //"utf32_vietnamese_ci": 183,
-+ "utf8_unicode_ci": 192,
-+ "utf8_icelandic_ci": 193,
-+ "utf8_latvian_ci": 194,
-+ "utf8_romanian_ci": 195,
-+ "utf8_slovenian_ci": 196,
-+ "utf8_polish_ci": 197,
-+ "utf8_estonian_ci": 198,
-+ "utf8_spanish_ci": 199,
-+ "utf8_swedish_ci": 200,
-+ "utf8_turkish_ci": 201,
-+ "utf8_czech_ci": 202,
-+ "utf8_danish_ci": 203,
-+ "utf8_lithuanian_ci": 204,
-+ "utf8_slovak_ci": 205,
-+ "utf8_spanish2_ci": 206,
-+ "utf8_roman_ci": 207,
-+ "utf8_persian_ci": 208,
-+ "utf8_esperanto_ci": 209,
-+ "utf8_hungarian_ci": 210,
-+ "utf8_sinhala_ci": 211,
-+ "utf8_german2_ci": 212,
-+ "utf8_croatian_ci": 213,
-+ "utf8_unicode_520_ci": 214,
-+ "utf8_vietnamese_ci": 215,
-+ "utf8_general_mysql500_ci": 223,
-+ "utf8mb4_unicode_ci": 224,
-+ "utf8mb4_icelandic_ci": 225,
-+ "utf8mb4_latvian_ci": 226,
-+ "utf8mb4_romanian_ci": 227,
-+ "utf8mb4_slovenian_ci": 228,
-+ "utf8mb4_polish_ci": 229,
-+ "utf8mb4_estonian_ci": 230,
-+ "utf8mb4_spanish_ci": 231,
-+ "utf8mb4_swedish_ci": 232,
-+ "utf8mb4_turkish_ci": 233,
-+ "utf8mb4_czech_ci": 234,
-+ "utf8mb4_danish_ci": 235,
-+ "utf8mb4_lithuanian_ci": 236,
-+ "utf8mb4_slovak_ci": 237,
-+ "utf8mb4_spanish2_ci": 238,
-+ "utf8mb4_roman_ci": 239,
-+ "utf8mb4_persian_ci": 240,
-+ "utf8mb4_esperanto_ci": 241,
-+ "utf8mb4_hungarian_ci": 242,
-+ "utf8mb4_sinhala_ci": 243,
-+ "utf8mb4_german2_ci": 244,
-+ "utf8mb4_croatian_ci": 245,
-+ "utf8mb4_unicode_520_ci": 246,
-+ "utf8mb4_vietnamese_ci": 247,
-+ "gb18030_chinese_ci": 248,
-+ "gb18030_bin": 249,
-+ "gb18030_unicode_520_ci": 250,
-+ "utf8mb4_0900_ai_ci": 255,
-+}
-+
-+// A blacklist of collations which is unsafe to interpolate parameters.
-+// These multibyte encodings may contains 0x5c (`\`) in their trailing bytes.
-+var unsafeCollations = map[string]bool{
-+ "big5_chinese_ci": true,
-+ "sjis_japanese_ci": true,
-+ "gbk_chinese_ci": true,
-+ "big5_bin": true,
-+ "gb2312_bin": true,
-+ "gbk_bin": true,
-+ "sjis_bin": true,
-+ "cp932_japanese_ci": true,
-+ "cp932_bin": true,
-+ "gb18030_chinese_ci": true,
-+ "gb18030_bin": true,
-+ "gb18030_unicode_520_ci": true,
-+}
-diff --git a/vendor/github.com/go-sql-driver/mysql/conncheck.go b/vendor/github.com/go-sql-driver/mysql/conncheck.go
-new file mode 100644
-index 00000000000..70e9925f6fe
---- /dev/null
-+++ b/vendor/github.com/go-sql-driver/mysql/conncheck.go
-@@ -0,0 +1,54 @@
-+// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
-+//
-+// Copyright 2019 The Go-MySQL-Driver Authors. All rights reserved.
-+//
-+// This Source Code Form is subject to the terms of the Mozilla Public
-+// License, v. 2.0. If a copy of the MPL was not distributed with this file,
-+// You can obtain one at http://mozilla.org/MPL/2.0/.
-+
-+// +build !windows,!appengine
-+
-+package mysql
-+
-+import (
-+ "errors"
-+ "io"
-+ "net"
-+ "syscall"
-+)
-+
-+var errUnexpectedRead = errors.New("unexpected read from socket")
-+
-+func connCheck(conn net.Conn) error {
-+ var sysErr error
-+
-+ sysConn, ok := conn.(syscall.Conn)
-+ if !ok {
-+ return nil
-+ }
-+ rawConn, err := sysConn.SyscallConn()
-+ if err != nil {
-+ return err
-+ }
-+
-+ err = rawConn.Read(func(fd uintptr) bool {
-+ var buf [1]byte
-+ n, err := syscall.Read(int(fd), buf[:])
-+ switch {
-+ case n == 0 && err == nil:
-+ sysErr = io.EOF
-+ case n > 0:
-+ sysErr = errUnexpectedRead
-+ case err == syscall.EAGAIN || err == syscall.EWOULDBLOCK:
-+ sysErr = nil
-+ default:
-+ sysErr = err
-+ }
-+ return true
-+ })
-+ if err != nil {
-+ return err
-+ }
-+
-+ return sysErr
-+}
-diff --git a/vendor/github.com/go-sql-driver/mysql/conncheck_dummy.go b/vendor/github.com/go-sql-driver/mysql/conncheck_dummy.go
-new file mode 100644
-index 00000000000..4888288aa41
---- /dev/null
-+++ b/vendor/github.com/go-sql-driver/mysql/conncheck_dummy.go
-@@ -0,0 +1,17 @@
-+// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
-+//
-+// Copyright 2019 The Go-MySQL-Driver Authors. All rights reserved.
-+//
-+// This Source Code Form is subject to the terms of the Mozilla Public
-+// License, v. 2.0. If a copy of the MPL was not distributed with this file,
-+// You can obtain one at http://mozilla.org/MPL/2.0/.
-+
-+// +build windows appengine
-+
-+package mysql
-+
-+import "net"
-+
-+func connCheck(conn net.Conn) error {
-+ return nil
-+}
-diff --git a/vendor/github.com/go-sql-driver/mysql/conncheck_test.go b/vendor/github.com/go-sql-driver/mysql/conncheck_test.go
-new file mode 100644
-index 00000000000..b7234b0f52f
---- /dev/null
-+++ b/vendor/github.com/go-sql-driver/mysql/conncheck_test.go
-@@ -0,0 +1,38 @@
-+// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
-+//
-+// Copyright 2013 The Go-MySQL-Driver Authors. All rights reserved.
-+//
-+// This Source Code Form is subject to the terms of the Mozilla Public
-+// License, v. 2.0. If a copy of the MPL was not distributed with this file,
-+// You can obtain one at http://mozilla.org/MPL/2.0/.
-+
-+// +build go1.10,!windows
-+
-+package mysql
-+
-+import (
-+ "testing"
-+ "time"
-+)
-+
-+func TestStaleConnectionChecks(t *testing.T) {
-+ runTests(t, dsn, func(dbt *DBTest) {
-+ dbt.mustExec("SET @@SESSION.wait_timeout = 2")
-+
-+ if err := dbt.db.Ping(); err != nil {
-+ dbt.Fatal(err)
-+ }
-+
-+ // wait for MySQL to close our connection
-+ time.Sleep(3 * time.Second)
-+
-+ tx, err := dbt.db.Begin()
-+ if err != nil {
-+ dbt.Fatal(err)
-+ }
-+
-+ if err := tx.Rollback(); err != nil {
-+ dbt.Fatal(err)
-+ }
-+ })
-+}
-diff --git a/vendor/github.com/go-sql-driver/mysql/connection.go b/vendor/github.com/go-sql-driver/mysql/connection.go
-new file mode 100644
-index 00000000000..e4bb59e67c3
---- /dev/null
-+++ b/vendor/github.com/go-sql-driver/mysql/connection.go
-@@ -0,0 +1,651 @@
-+// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
-+//
-+// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved.
-+//
-+// This Source Code Form is subject to the terms of the Mozilla Public
-+// License, v. 2.0. If a copy of the MPL was not distributed with this file,
-+// You can obtain one at http://mozilla.org/MPL/2.0/.
-+
-+package mysql
-+
-+import (
-+ "context"
-+ "database/sql"
-+ "database/sql/driver"
-+ "io"
-+ "net"
-+ "strconv"
-+ "strings"
-+ "time"
-+)
-+
-+type mysqlConn struct {
-+ buf buffer
-+ netConn net.Conn
-+ rawConn net.Conn // underlying connection when netConn is TLS connection.
-+ affectedRows uint64
-+ insertId uint64
-+ cfg *Config
-+ maxAllowedPacket int
-+ maxWriteSize int
-+ writeTimeout time.Duration
-+ flags clientFlag
-+ status statusFlag
-+ sequence uint8
-+ parseTime bool
-+ reset bool // set when the Go SQL package calls ResetSession
-+
-+ // for context support (Go 1.8+)
-+ watching bool
-+ watcher chan<- context.Context
-+ closech chan struct{}
-+ finished chan<- struct{}
-+ canceled atomicError // set non-nil if conn is canceled
-+ closed atomicBool // set when conn is closed, before closech is closed
-+}
-+
-+// Handles parameters set in DSN after the connection is established
-+func (mc *mysqlConn) handleParams() (err error) {
-+ for param, val := range mc.cfg.Params {
-+ switch param {
-+ // Charset
-+ case "charset":
-+ charsets := strings.Split(val, ",")
-+ for i := range charsets {
-+ // ignore errors here - a charset may not exist
-+ err = mc.exec("SET NAMES " + charsets[i])
-+ if err == nil {
-+ break
-+ }
-+ }
-+ if err != nil {
-+ return
-+ }
-+
-+ // System Vars
-+ default:
-+ err = mc.exec("SET " + param + "=" + val + "")
-+ if err != nil {
-+ return
-+ }
-+ }
-+ }
-+
-+ return
-+}
-+
-+func (mc *mysqlConn) markBadConn(err error) error {
-+ if mc == nil {
-+ return err
-+ }
-+ if err != errBadConnNoWrite {
-+ return err
-+ }
-+ return driver.ErrBadConn
-+}
-+
-+func (mc *mysqlConn) Begin() (driver.Tx, error) {
-+ return mc.begin(false)
-+}
-+
-+func (mc *mysqlConn) begin(readOnly bool) (driver.Tx, error) {
-+ if mc.closed.IsSet() {
-+ errLog.Print(ErrInvalidConn)
-+ return nil, driver.ErrBadConn
-+ }
-+ var q string
-+ if readOnly {
-+ q = "START TRANSACTION READ ONLY"
-+ } else {
-+ q = "START TRANSACTION"
-+ }
-+ err := mc.exec(q)
-+ if err == nil {
-+ return &mysqlTx{mc}, err
-+ }
-+ return nil, mc.markBadConn(err)
-+}
-+
-+func (mc *mysqlConn) Close() (err error) {
-+ // Makes Close idempotent
-+ if !mc.closed.IsSet() {
-+ err = mc.writeCommandPacket(comQuit)
-+ }
-+
-+ mc.cleanup()
-+
-+ return
-+}
-+
-+// Closes the network connection and unsets internal variables. Do not call this
-+// function after successfully authentication, call Close instead. This function
-+// is called before auth or on auth failure because MySQL will have already
-+// closed the network connection.
-+func (mc *mysqlConn) cleanup() {
-+ if !mc.closed.TrySet(true) {
-+ return
-+ }
-+
-+ // Makes cleanup idempotent
-+ close(mc.closech)
-+ if mc.netConn == nil {
-+ return
-+ }
-+ if err := mc.netConn.Close(); err != nil {
-+ errLog.Print(err)
-+ }
-+}
-+
-+func (mc *mysqlConn) error() error {
-+ if mc.closed.IsSet() {
-+ if err := mc.canceled.Value(); err != nil {
-+ return err
-+ }
-+ return ErrInvalidConn
-+ }
-+ return nil
-+}
-+
-+func (mc *mysqlConn) Prepare(query string) (driver.Stmt, error) {
-+ if mc.closed.IsSet() {
-+ errLog.Print(ErrInvalidConn)
-+ return nil, driver.ErrBadConn
-+ }
-+ // Send command
-+ err := mc.writeCommandPacketStr(comStmtPrepare, query)
-+ if err != nil {
-+ // STMT_PREPARE is safe to retry. So we can return ErrBadConn here.
-+ errLog.Print(err)
-+ return nil, driver.ErrBadConn
-+ }
-+
-+ stmt := &mysqlStmt{
-+ mc: mc,
-+ }
-+
-+ // Read Result
-+ columnCount, err := stmt.readPrepareResultPacket()
-+ if err == nil {
-+ if stmt.paramCount > 0 {
-+ if err = mc.readUntilEOF(); err != nil {
-+ return nil, err
-+ }
-+ }
-+
-+ if columnCount > 0 {
-+ err = mc.readUntilEOF()
-+ }
-+ }
-+
-+ return stmt, err
-+}
-+
-+func (mc *mysqlConn) interpolateParams(query string, args []driver.Value) (string, error) {
-+ // Number of ? should be same to len(args)
-+ if strings.Count(query, "?") != len(args) {
-+ return "", driver.ErrSkip
-+ }
-+
-+ buf, err := mc.buf.takeCompleteBuffer()
-+ if err != nil {
-+ // can not take the buffer. Something must be wrong with the connection
-+ errLog.Print(err)
-+ return "", ErrInvalidConn
-+ }
-+ buf = buf[:0]
-+ argPos := 0
-+
-+ for i := 0; i < len(query); i++ {
-+ q := strings.IndexByte(query[i:], '?')
-+ if q == -1 {
-+ buf = append(buf, query[i:]...)
-+ break
-+ }
-+ buf = append(buf, query[i:i+q]...)
-+ i += q
-+
-+ arg := args[argPos]
-+ argPos++
-+
-+ if arg == nil {
-+ buf = append(buf, "NULL"...)
-+ continue
-+ }
-+
-+ switch v := arg.(type) {
-+ case int64:
-+ buf = strconv.AppendInt(buf, v, 10)
-+ case uint64:
-+ // Handle uint64 explicitly because our custom ConvertValue emits unsigned values
-+ buf = strconv.AppendUint(buf, v, 10)
-+ case float64:
-+ buf = strconv.AppendFloat(buf, v, 'g', -1, 64)
-+ case bool:
-+ if v {
-+ buf = append(buf, '1')
-+ } else {
-+ buf = append(buf, '0')
-+ }
-+ case time.Time:
-+ if v.IsZero() {
-+ buf = append(buf, "'0000-00-00'"...)
-+ } else {
-+ v := v.In(mc.cfg.Loc)
-+ v = v.Add(time.Nanosecond * 500) // To round under microsecond
-+ year := v.Year()
-+ year100 := year / 100
-+ year1 := year % 100
-+ month := v.Month()
-+ day := v.Day()
-+ hour := v.Hour()
-+ minute := v.Minute()
-+ second := v.Second()
-+ micro := v.Nanosecond() / 1000
-+
-+ buf = append(buf, []byte{
-+ '\'',
-+ digits10[year100], digits01[year100],
-+ digits10[year1], digits01[year1],
-+ '-',
-+ digits10[month], digits01[month],
-+ '-',
-+ digits10[day], digits01[day],
-+ ' ',
-+ digits10[hour], digits01[hour],
-+ ':',
-+ digits10[minute], digits01[minute],
-+ ':',
-+ digits10[second], digits01[second],
-+ }...)
-+
-+ if micro != 0 {
-+ micro10000 := micro / 10000
-+ micro100 := micro / 100 % 100
-+ micro1 := micro % 100
-+ buf = append(buf, []byte{
-+ '.',
-+ digits10[micro10000], digits01[micro10000],
-+ digits10[micro100], digits01[micro100],
-+ digits10[micro1], digits01[micro1],
-+ }...)
-+ }
-+ buf = append(buf, '\'')
-+ }
-+ case []byte:
-+ if v == nil {
-+ buf = append(buf, "NULL"...)
-+ } else {
-+ buf = append(buf, "_binary'"...)
-+ if mc.status&statusNoBackslashEscapes == 0 {
-+ buf = escapeBytesBackslash(buf, v)
-+ } else {
-+ buf = escapeBytesQuotes(buf, v)
-+ }
-+ buf = append(buf, '\'')
-+ }
-+ case string:
-+ buf = append(buf, '\'')
-+ if mc.status&statusNoBackslashEscapes == 0 {
-+ buf = escapeStringBackslash(buf, v)
-+ } else {
-+ buf = escapeStringQuotes(buf, v)
-+ }
-+ buf = append(buf, '\'')
-+ default:
-+ return "", driver.ErrSkip
-+ }
-+
-+ if len(buf)+4 > mc.maxAllowedPacket {
-+ return "", driver.ErrSkip
-+ }
-+ }
-+ if argPos != len(args) {
-+ return "", driver.ErrSkip
-+ }
-+ return string(buf), nil
-+}
-+
-+func (mc *mysqlConn) Exec(query string, args []driver.Value) (driver.Result, error) {
-+ if mc.closed.IsSet() {
-+ errLog.Print(ErrInvalidConn)
-+ return nil, driver.ErrBadConn
-+ }
-+ if len(args) != 0 {
-+ if !mc.cfg.InterpolateParams {
-+ return nil, driver.ErrSkip
-+ }
-+ // try to interpolate the parameters to save extra roundtrips for preparing and closing a statement
-+ prepared, err := mc.interpolateParams(query, args)
-+ if err != nil {
-+ return nil, err
-+ }
-+ query = prepared
-+ }
-+ mc.affectedRows = 0
-+ mc.insertId = 0
-+
-+ err := mc.exec(query)
-+ if err == nil {
-+ return &mysqlResult{
-+ affectedRows: int64(mc.affectedRows),
-+ insertId: int64(mc.insertId),
-+ }, err
-+ }
-+ return nil, mc.markBadConn(err)
-+}
-+
-+// Internal function to execute commands
-+func (mc *mysqlConn) exec(query string) error {
-+ // Send command
-+ if err := mc.writeCommandPacketStr(comQuery, query); err != nil {
-+ return mc.markBadConn(err)
-+ }
-+
-+ // Read Result
-+ resLen, err := mc.readResultSetHeaderPacket()
-+ if err != nil {
-+ return err
-+ }
-+
-+ if resLen > 0 {
-+ // columns
-+ if err := mc.readUntilEOF(); err != nil {
-+ return err
-+ }
-+
-+ // rows
-+ if err := mc.readUntilEOF(); err != nil {
-+ return err
-+ }
-+ }
-+
-+ return mc.discardResults()
-+}
-+
-+func (mc *mysqlConn) Query(query string, args []driver.Value) (driver.Rows, error) {
-+ return mc.query(query, args)
-+}
-+
-+func (mc *mysqlConn) query(query string, args []driver.Value) (*textRows, error) {
-+ if mc.closed.IsSet() {
-+ errLog.Print(ErrInvalidConn)
-+ return nil, driver.ErrBadConn
-+ }
-+ if len(args) != 0 {
-+ if !mc.cfg.InterpolateParams {
-+ return nil, driver.ErrSkip
-+ }
-+ // try client-side prepare to reduce roundtrip
-+ prepared, err := mc.interpolateParams(query, args)
-+ if err != nil {
-+ return nil, err
-+ }
-+ query = prepared
-+ }
-+ // Send command
-+ err := mc.writeCommandPacketStr(comQuery, query)
-+ if err == nil {
-+ // Read Result
-+ var resLen int
-+ resLen, err = mc.readResultSetHeaderPacket()
-+ if err == nil {
-+ rows := new(textRows)
-+ rows.mc = mc
-+
-+ if resLen == 0 {
-+ rows.rs.done = true
-+
-+ switch err := rows.NextResultSet(); err {
-+ case nil, io.EOF:
-+ return rows, nil
-+ default:
-+ return nil, err
-+ }
-+ }
-+
-+ // Columns
-+ rows.rs.columns, err = mc.readColumns(resLen)
-+ return rows, err
-+ }
-+ }
-+ return nil, mc.markBadConn(err)
-+}
-+
-+// Gets the value of the given MySQL System Variable
-+// The returned byte slice is only valid until the next read
-+func (mc *mysqlConn) getSystemVar(name string) ([]byte, error) {
-+ // Send command
-+ if err := mc.writeCommandPacketStr(comQuery, "SELECT @@"+name); err != nil {
-+ return nil, err
-+ }
-+
-+ // Read Result
-+ resLen, err := mc.readResultSetHeaderPacket()
-+ if err == nil {
-+ rows := new(textRows)
-+ rows.mc = mc
-+ rows.rs.columns = []mysqlField{{fieldType: fieldTypeVarChar}}
-+
-+ if resLen > 0 {
-+ // Columns
-+ if err := mc.readUntilEOF(); err != nil {
-+ return nil, err
-+ }
-+ }
-+
-+ dest := make([]driver.Value, resLen)
-+ if err = rows.readRow(dest); err == nil {
-+ return dest[0].([]byte), mc.readUntilEOF()
-+ }
-+ }
-+ return nil, err
-+}
-+
-+// finish is called when the query has canceled.
-+func (mc *mysqlConn) cancel(err error) {
-+ mc.canceled.Set(err)
-+ mc.cleanup()
-+}
-+
-+// finish is called when the query has succeeded.
-+func (mc *mysqlConn) finish() {
-+ if !mc.watching || mc.finished == nil {
-+ return
-+ }
-+ select {
-+ case mc.finished <- struct{}{}:
-+ mc.watching = false
-+ case <-mc.closech:
-+ }
-+}
-+
-+// Ping implements driver.Pinger interface
-+func (mc *mysqlConn) Ping(ctx context.Context) (err error) {
-+ if mc.closed.IsSet() {
-+ errLog.Print(ErrInvalidConn)
-+ return driver.ErrBadConn
-+ }
-+
-+ if err = mc.watchCancel(ctx); err != nil {
-+ return
-+ }
-+ defer mc.finish()
-+
-+ if err = mc.writeCommandPacket(comPing); err != nil {
-+ return mc.markBadConn(err)
-+ }
-+
-+ return mc.readResultOK()
-+}
-+
-+// BeginTx implements driver.ConnBeginTx interface
-+func (mc *mysqlConn) BeginTx(ctx context.Context, opts driver.TxOptions) (driver.Tx, error) {
-+ if err := mc.watchCancel(ctx); err != nil {
-+ return nil, err
-+ }
-+ defer mc.finish()
-+
-+ if sql.IsolationLevel(opts.Isolation) != sql.LevelDefault {
-+ level, err := mapIsolationLevel(opts.Isolation)
-+ if err != nil {
-+ return nil, err
-+ }
-+ err = mc.exec("SET TRANSACTION ISOLATION LEVEL " + level)
-+ if err != nil {
-+ return nil, err
-+ }
-+ }
-+
-+ return mc.begin(opts.ReadOnly)
-+}
-+
-+func (mc *mysqlConn) QueryContext(ctx context.Context, query string, args []driver.NamedValue) (driver.Rows, error) {
-+ dargs, err := namedValueToValue(args)
-+ if err != nil {
-+ return nil, err
-+ }
-+
-+ if err := mc.watchCancel(ctx); err != nil {
-+ return nil, err
-+ }
-+
-+ rows, err := mc.query(query, dargs)
-+ if err != nil {
-+ mc.finish()
-+ return nil, err
-+ }
-+ rows.finish = mc.finish
-+ return rows, err
-+}
-+
-+func (mc *mysqlConn) ExecContext(ctx context.Context, query string, args []driver.NamedValue) (driver.Result, error) {
-+ dargs, err := namedValueToValue(args)
-+ if err != nil {
-+ return nil, err
-+ }
-+
-+ if err := mc.watchCancel(ctx); err != nil {
-+ return nil, err
-+ }
-+ defer mc.finish()
-+
-+ return mc.Exec(query, dargs)
-+}
-+
-+func (mc *mysqlConn) PrepareContext(ctx context.Context, query string) (driver.Stmt, error) {
-+ if err := mc.watchCancel(ctx); err != nil {
-+ return nil, err
-+ }
-+
-+ stmt, err := mc.Prepare(query)
-+ mc.finish()
-+ if err != nil {
-+ return nil, err
-+ }
-+
-+ select {
-+ default:
-+ case <-ctx.Done():
-+ stmt.Close()
-+ return nil, ctx.Err()
-+ }
-+ return stmt, nil
-+}
-+
-+func (stmt *mysqlStmt) QueryContext(ctx context.Context, args []driver.NamedValue) (driver.Rows, error) {
-+ dargs, err := namedValueToValue(args)
-+ if err != nil {
-+ return nil, err
-+ }
-+
-+ if err := stmt.mc.watchCancel(ctx); err != nil {
-+ return nil, err
-+ }
-+
-+ rows, err := stmt.query(dargs)
-+ if err != nil {
-+ stmt.mc.finish()
-+ return nil, err
-+ }
-+ rows.finish = stmt.mc.finish
-+ return rows, err
-+}
-+
-+func (stmt *mysqlStmt) ExecContext(ctx context.Context, args []driver.NamedValue) (driver.Result, error) {
-+ dargs, err := namedValueToValue(args)
-+ if err != nil {
-+ return nil, err
-+ }
-+
-+ if err := stmt.mc.watchCancel(ctx); err != nil {
-+ return nil, err
-+ }
-+ defer stmt.mc.finish()
-+
-+ return stmt.Exec(dargs)
-+}
-+
-+func (mc *mysqlConn) watchCancel(ctx context.Context) error {
-+ if mc.watching {
-+ // Reach here if canceled,
-+ // so the connection is already invalid
-+ mc.cleanup()
-+ return nil
-+ }
-+ // When ctx is already cancelled, don't watch it.
-+ if err := ctx.Err(); err != nil {
-+ return err
-+ }
-+ // When ctx is not cancellable, don't watch it.
-+ if ctx.Done() == nil {
-+ return nil
-+ }
-+ // When watcher is not alive, can't watch it.
-+ if mc.watcher == nil {
-+ return nil
-+ }
-+
-+ mc.watching = true
-+ mc.watcher <- ctx
-+ return nil
-+}
-+
-+func (mc *mysqlConn) startWatcher() {
-+ watcher := make(chan context.Context, 1)
-+ mc.watcher = watcher
-+ finished := make(chan struct{})
-+ mc.finished = finished
-+ go func() {
-+ for {
-+ var ctx context.Context
-+ select {
-+ case ctx = <-watcher:
-+ case <-mc.closech:
-+ return
-+ }
-+
-+ select {
-+ case <-ctx.Done():
-+ mc.cancel(ctx.Err())
-+ case <-finished:
-+ case <-mc.closech:
-+ return
-+ }
-+ }
-+ }()
-+}
-+
-+func (mc *mysqlConn) CheckNamedValue(nv *driver.NamedValue) (err error) {
-+ nv.Value, err = converter{}.ConvertValue(nv.Value)
-+ return
-+}
-+
-+// ResetSession implements driver.SessionResetter.
-+// (From Go 1.10)
-+func (mc *mysqlConn) ResetSession(ctx context.Context) error {
-+ if mc.closed.IsSet() {
-+ return driver.ErrBadConn
-+ }
-+ mc.reset = true
-+ return nil
-+}
-diff --git a/vendor/github.com/go-sql-driver/mysql/connection_test.go b/vendor/github.com/go-sql-driver/mysql/connection_test.go
-new file mode 100644
-index 00000000000..19c17ff8b1d
---- /dev/null
-+++ b/vendor/github.com/go-sql-driver/mysql/connection_test.go
-@@ -0,0 +1,175 @@
-+// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
-+//
-+// Copyright 2016 The Go-MySQL-Driver Authors. All rights reserved.
-+//
-+// This Source Code Form is subject to the terms of the Mozilla Public
-+// License, v. 2.0. If a copy of the MPL was not distributed with this file,
-+// You can obtain one at http://mozilla.org/MPL/2.0/.
-+
-+package mysql
-+
-+import (
-+ "context"
-+ "database/sql/driver"
-+ "errors"
-+ "net"
-+ "testing"
-+)
-+
-+func TestInterpolateParams(t *testing.T) {
-+ mc := &mysqlConn{
-+ buf: newBuffer(nil),
-+ maxAllowedPacket: maxPacketSize,
-+ cfg: &Config{
-+ InterpolateParams: true,
-+ },
-+ }
-+
-+ q, err := mc.interpolateParams("SELECT ?+?", []driver.Value{int64(42), "gopher"})
-+ if err != nil {
-+ t.Errorf("Expected err=nil, got %#v", err)
-+ return
-+ }
-+ expected := `SELECT 42+'gopher'`
-+ if q != expected {
-+ t.Errorf("Expected: %q\nGot: %q", expected, q)
-+ }
-+}
-+
-+func TestInterpolateParamsTooManyPlaceholders(t *testing.T) {
-+ mc := &mysqlConn{
-+ buf: newBuffer(nil),
-+ maxAllowedPacket: maxPacketSize,
-+ cfg: &Config{
-+ InterpolateParams: true,
-+ },
-+ }
-+
-+ q, err := mc.interpolateParams("SELECT ?+?", []driver.Value{int64(42)})
-+ if err != driver.ErrSkip {
-+ t.Errorf("Expected err=driver.ErrSkip, got err=%#v, q=%#v", err, q)
-+ }
-+}
-+
-+// We don't support placeholder in string literal for now.
-+// https://github.com/go-sql-driver/mysql/pull/490
-+func TestInterpolateParamsPlaceholderInString(t *testing.T) {
-+ mc := &mysqlConn{
-+ buf: newBuffer(nil),
-+ maxAllowedPacket: maxPacketSize,
-+ cfg: &Config{
-+ InterpolateParams: true,
-+ },
-+ }
-+
-+ q, err := mc.interpolateParams("SELECT 'abc?xyz',?", []driver.Value{int64(42)})
-+ // When InterpolateParams support string literal, this should return `"SELECT 'abc?xyz', 42`
-+ if err != driver.ErrSkip {
-+ t.Errorf("Expected err=driver.ErrSkip, got err=%#v, q=%#v", err, q)
-+ }
-+}
-+
-+func TestInterpolateParamsUint64(t *testing.T) {
-+ mc := &mysqlConn{
-+ buf: newBuffer(nil),
-+ maxAllowedPacket: maxPacketSize,
-+ cfg: &Config{
-+ InterpolateParams: true,
-+ },
-+ }
-+
-+ q, err := mc.interpolateParams("SELECT ?", []driver.Value{uint64(42)})
-+ if err != nil {
-+ t.Errorf("Expected err=nil, got err=%#v, q=%#v", err, q)
-+ }
-+ if q != "SELECT 42" {
-+ t.Errorf("Expected uint64 interpolation to work, got q=%#v", q)
-+ }
-+}
-+
-+func TestCheckNamedValue(t *testing.T) {
-+ value := driver.NamedValue{Value: ^uint64(0)}
-+ x := &mysqlConn{}
-+ err := x.CheckNamedValue(&value)
-+
-+ if err != nil {
-+ t.Fatal("uint64 high-bit not convertible", err)
-+ }
-+
-+ if value.Value != ^uint64(0) {
-+ t.Fatalf("uint64 high-bit converted, got %#v %T", value.Value, value.Value)
-+ }
-+}
-+
-+// TestCleanCancel tests passed context is cancelled at start.
-+// No packet should be sent. Connection should keep current status.
-+func TestCleanCancel(t *testing.T) {
-+ mc := &mysqlConn{
-+ closech: make(chan struct{}),
-+ }
-+ mc.startWatcher()
-+ defer mc.cleanup()
-+
-+ ctx, cancel := context.WithCancel(context.Background())
-+ cancel()
-+
-+ for i := 0; i < 3; i++ { // Repeat same behavior
-+ err := mc.Ping(ctx)
-+ if err != context.Canceled {
-+ t.Errorf("expected context.Canceled, got %#v", err)
-+ }
-+
-+ if mc.closed.IsSet() {
-+ t.Error("expected mc is not closed, closed actually")
-+ }
-+
-+ if mc.watching {
-+ t.Error("expected watching is false, but true")
-+ }
-+ }
-+}
-+
-+func TestPingMarkBadConnection(t *testing.T) {
-+ nc := badConnection{err: errors.New("boom")}
-+ ms := &mysqlConn{
-+ netConn: nc,
-+ buf: newBuffer(nc),
-+ maxAllowedPacket: defaultMaxAllowedPacket,
-+ }
-+
-+ err := ms.Ping(context.Background())
-+
-+ if err != driver.ErrBadConn {
-+ t.Errorf("expected driver.ErrBadConn, got %#v", err)
-+ }
-+}
-+
-+func TestPingErrInvalidConn(t *testing.T) {
-+ nc := badConnection{err: errors.New("failed to write"), n: 10}
-+ ms := &mysqlConn{
-+ netConn: nc,
-+ buf: newBuffer(nc),
-+ maxAllowedPacket: defaultMaxAllowedPacket,
-+ closech: make(chan struct{}),
-+ }
-+
-+ err := ms.Ping(context.Background())
-+
-+ if err != ErrInvalidConn {
-+ t.Errorf("expected ErrInvalidConn, got %#v", err)
-+ }
-+}
-+
-+type badConnection struct {
-+ n int
-+ err error
-+ net.Conn
-+}
-+
-+func (bc badConnection) Write(b []byte) (n int, err error) {
-+ return bc.n, bc.err
-+}
-+
-+func (bc badConnection) Close() error {
-+ return nil
-+}
-diff --git a/vendor/github.com/go-sql-driver/mysql/connector.go b/vendor/github.com/go-sql-driver/mysql/connector.go
-new file mode 100644
-index 00000000000..eac0f01aae1
---- /dev/null
-+++ b/vendor/github.com/go-sql-driver/mysql/connector.go
-@@ -0,0 +1,140 @@
-+// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
-+//
-+// Copyright 2018 The Go-MySQL-Driver Authors. All rights reserved.
-+//
-+// This Source Code Form is subject to the terms of the Mozilla Public
-+// License, v. 2.0. If a copy of the MPL was not distributed with this file,
-+// You can obtain one at http://mozilla.org/MPL/2.0/.
-+
-+package mysql
-+
-+import (
-+ "context"
-+ "database/sql/driver"
-+ "net"
-+)
-+
-+type connector struct {
-+ cfg *Config // immutable private copy.
-+}
-+
-+// Connect implements driver.Connector interface.
-+// Connect returns a connection to the database.
-+func (c *connector) Connect(ctx context.Context) (driver.Conn, error) {
-+ var err error
-+
-+ // New mysqlConn
-+ mc := &mysqlConn{
-+ maxAllowedPacket: maxPacketSize,
-+ maxWriteSize: maxPacketSize - 1,
-+ closech: make(chan struct{}),
-+ cfg: c.cfg,
-+ }
-+ mc.parseTime = mc.cfg.ParseTime
-+
-+ // Connect to Server
-+ dialsLock.RLock()
-+ dial, ok := dials[mc.cfg.Net]
-+ dialsLock.RUnlock()
-+ if ok {
-+ mc.netConn, err = dial(ctx, mc.cfg.Addr)
-+ } else {
-+ nd := net.Dialer{Timeout: mc.cfg.Timeout}
-+ mc.netConn, err = nd.DialContext(ctx, mc.cfg.Net, mc.cfg.Addr)
-+ }
-+
-+ if err != nil {
-+ return nil, err
-+ }
-+
-+ // Enable TCP Keepalives on TCP connections
-+ if tc, ok := mc.netConn.(*net.TCPConn); ok {
-+ if err := tc.SetKeepAlive(true); err != nil {
-+ // Don't send COM_QUIT before handshake.
-+ mc.netConn.Close()
-+ mc.netConn = nil
-+ return nil, err
-+ }
-+ }
-+
-+ // Call startWatcher for context support (From Go 1.8)
-+ mc.startWatcher()
-+ if err := mc.watchCancel(ctx); err != nil {
-+ mc.cleanup()
-+ return nil, err
-+ }
-+ defer mc.finish()
-+
-+ mc.buf = newBuffer(mc.netConn)
-+
-+ // Set I/O timeouts
-+ mc.buf.timeout = mc.cfg.ReadTimeout
-+ mc.writeTimeout = mc.cfg.WriteTimeout
-+
-+ // Reading Handshake Initialization Packet
-+ authData, plugin, err := mc.readHandshakePacket()
-+ if err != nil {
-+ mc.cleanup()
-+ return nil, err
-+ }
-+
-+ if plugin == "" {
-+ plugin = defaultAuthPlugin
-+ }
-+
-+ // Send Client Authentication Packet
-+ authResp, err := mc.auth(authData, plugin)
-+ if err != nil {
-+ // try the default auth plugin, if using the requested plugin failed
-+ errLog.Print("could not use requested auth plugin '"+plugin+"': ", err.Error())
-+ plugin = defaultAuthPlugin
-+ authResp, err = mc.auth(authData, plugin)
-+ if err != nil {
-+ mc.cleanup()
-+ return nil, err
-+ }
-+ }
-+ if err = mc.writeHandshakeResponsePacket(authResp, plugin); err != nil {
-+ mc.cleanup()
-+ return nil, err
-+ }
-+
-+ // Handle response to auth packet, switch methods if possible
-+ if err = mc.handleAuthResult(authData, plugin); err != nil {
-+ // Authentication failed and MySQL has already closed the connection
-+ // (https://dev.mysql.com/doc/internals/en/authentication-fails.html).
-+ // Do not send COM_QUIT, just cleanup and return the error.
-+ mc.cleanup()
-+ return nil, err
-+ }
-+
-+ if mc.cfg.MaxAllowedPacket > 0 {
-+ mc.maxAllowedPacket = mc.cfg.MaxAllowedPacket
-+ } else {
-+ // Get max allowed packet size
-+ maxap, err := mc.getSystemVar("max_allowed_packet")
-+ if err != nil {
-+ mc.Close()
-+ return nil, err
-+ }
-+ mc.maxAllowedPacket = stringToInt(maxap) - 1
-+ }
-+ if mc.maxAllowedPacket < maxPacketSize {
-+ mc.maxWriteSize = mc.maxAllowedPacket
-+ }
-+
-+ // Handle DSN Params
-+ err = mc.handleParams()
-+ if err != nil {
-+ mc.Close()
-+ return nil, err
-+ }
-+
-+ return mc, nil
-+}
-+
-+// Driver implements driver.Connector interface.
-+// Driver returns &MySQLDriver{}.
-+func (c *connector) Driver() driver.Driver {
-+ return &MySQLDriver{}
-+}
-diff --git a/vendor/github.com/go-sql-driver/mysql/connector_test.go b/vendor/github.com/go-sql-driver/mysql/connector_test.go
-new file mode 100644
-index 00000000000..976903c5b5a
---- /dev/null
-+++ b/vendor/github.com/go-sql-driver/mysql/connector_test.go
-@@ -0,0 +1,30 @@
-+package mysql
-+
-+import (
-+ "context"
-+ "net"
-+ "testing"
-+ "time"
-+)
-+
-+func TestConnectorReturnsTimeout(t *testing.T) {
-+ connector := &connector{&Config{
-+ Net: "tcp",
-+ Addr: "1.1.1.1:1234",
-+ Timeout: 10 * time.Millisecond,
-+ }}
-+
-+ _, err := connector.Connect(context.Background())
-+ if err == nil {
-+ t.Fatal("error expected")
-+ }
-+
-+ if nerr, ok := err.(*net.OpError); ok {
-+ expected := "dial tcp 1.1.1.1:1234: i/o timeout"
-+ if nerr.Error() != expected {
-+ t.Fatalf("expected %q, got %q", expected, nerr.Error())
-+ }
-+ } else {
-+ t.Fatalf("expected %T, got %T", nerr, err)
-+ }
-+}
-diff --git a/vendor/github.com/go-sql-driver/mysql/const.go b/vendor/github.com/go-sql-driver/mysql/const.go
-new file mode 100644
-index 00000000000..b1e6b85efca
---- /dev/null
-+++ b/vendor/github.com/go-sql-driver/mysql/const.go
-@@ -0,0 +1,174 @@
-+// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
-+//
-+// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved.
-+//
-+// This Source Code Form is subject to the terms of the Mozilla Public
-+// License, v. 2.0. If a copy of the MPL was not distributed with this file,
-+// You can obtain one at http://mozilla.org/MPL/2.0/.
-+
-+package mysql
-+
-+const (
-+ defaultAuthPlugin = "mysql_native_password"
-+ defaultMaxAllowedPacket = 4 << 20 // 4 MiB
-+ minProtocolVersion = 10
-+ maxPacketSize = 1<<24 - 1
-+ timeFormat = "2006-01-02 15:04:05.999999"
-+)
-+
-+// MySQL constants documentation:
-+// http://dev.mysql.com/doc/internals/en/client-server-protocol.html
-+
-+const (
-+ iOK byte = 0x00
-+ iAuthMoreData byte = 0x01
-+ iLocalInFile byte = 0xfb
-+ iEOF byte = 0xfe
-+ iERR byte = 0xff
-+)
-+
-+// https://dev.mysql.com/doc/internals/en/capability-flags.html#packet-Protocol::CapabilityFlags
-+type clientFlag uint32
-+
-+const (
-+ clientLongPassword clientFlag = 1 << iota
-+ clientFoundRows
-+ clientLongFlag
-+ clientConnectWithDB
-+ clientNoSchema
-+ clientCompress
-+ clientODBC
-+ clientLocalFiles
-+ clientIgnoreSpace
-+ clientProtocol41
-+ clientInteractive
-+ clientSSL
-+ clientIgnoreSIGPIPE
-+ clientTransactions
-+ clientReserved
-+ clientSecureConn
-+ clientMultiStatements
-+ clientMultiResults
-+ clientPSMultiResults
-+ clientPluginAuth
-+ clientConnectAttrs
-+ clientPluginAuthLenEncClientData
-+ clientCanHandleExpiredPasswords
-+ clientSessionTrack
-+ clientDeprecateEOF
-+)
-+
-+const (
-+ comQuit byte = iota + 1
-+ comInitDB
-+ comQuery
-+ comFieldList
-+ comCreateDB
-+ comDropDB
-+ comRefresh
-+ comShutdown
-+ comStatistics
-+ comProcessInfo
-+ comConnect
-+ comProcessKill
-+ comDebug
-+ comPing
-+ comTime
-+ comDelayedInsert
-+ comChangeUser
-+ comBinlogDump
-+ comTableDump
-+ comConnectOut
-+ comRegisterSlave
-+ comStmtPrepare
-+ comStmtExecute
-+ comStmtSendLongData
-+ comStmtClose
-+ comStmtReset
-+ comSetOption
-+ comStmtFetch
-+)
-+
-+// https://dev.mysql.com/doc/internals/en/com-query-response.html#packet-Protocol::ColumnType
-+type fieldType byte
-+
-+const (
-+ fieldTypeDecimal fieldType = iota
-+ fieldTypeTiny
-+ fieldTypeShort
-+ fieldTypeLong
-+ fieldTypeFloat
-+ fieldTypeDouble
-+ fieldTypeNULL
-+ fieldTypeTimestamp
-+ fieldTypeLongLong
-+ fieldTypeInt24
-+ fieldTypeDate
-+ fieldTypeTime
-+ fieldTypeDateTime
-+ fieldTypeYear
-+ fieldTypeNewDate
-+ fieldTypeVarChar
-+ fieldTypeBit
-+)
-+const (
-+ fieldTypeJSON fieldType = iota + 0xf5
-+ fieldTypeNewDecimal
-+ fieldTypeEnum
-+ fieldTypeSet
-+ fieldTypeTinyBLOB
-+ fieldTypeMediumBLOB
-+ fieldTypeLongBLOB
-+ fieldTypeBLOB
-+ fieldTypeVarString
-+ fieldTypeString
-+ fieldTypeGeometry
-+)
-+
-+type fieldFlag uint16
-+
-+const (
-+ flagNotNULL fieldFlag = 1 << iota
-+ flagPriKey
-+ flagUniqueKey
-+ flagMultipleKey
-+ flagBLOB
-+ flagUnsigned
-+ flagZeroFill
-+ flagBinary
-+ flagEnum
-+ flagAutoIncrement
-+ flagTimestamp
-+ flagSet
-+ flagUnknown1
-+ flagUnknown2
-+ flagUnknown3
-+ flagUnknown4
-+)
-+
-+// http://dev.mysql.com/doc/internals/en/status-flags.html
-+type statusFlag uint16
-+
-+const (
-+ statusInTrans statusFlag = 1 << iota
-+ statusInAutocommit
-+ statusReserved // Not in documentation
-+ statusMoreResultsExists
-+ statusNoGoodIndexUsed
-+ statusNoIndexUsed
-+ statusCursorExists
-+ statusLastRowSent
-+ statusDbDropped
-+ statusNoBackslashEscapes
-+ statusMetadataChanged
-+ statusQueryWasSlow
-+ statusPsOutParams
-+ statusInTransReadonly
-+ statusSessionStateChanged
-+)
-+
-+const (
-+ cachingSha2PasswordRequestPublicKey = 2
-+ cachingSha2PasswordFastAuthSuccess = 3
-+ cachingSha2PasswordPerformFullAuthentication = 4
-+)
-diff --git a/vendor/github.com/go-sql-driver/mysql/driver.go b/vendor/github.com/go-sql-driver/mysql/driver.go
-new file mode 100644
-index 00000000000..1f9decf80bc
---- /dev/null
-+++ b/vendor/github.com/go-sql-driver/mysql/driver.go
-@@ -0,0 +1,85 @@
-+// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved.
-+//
-+// This Source Code Form is subject to the terms of the Mozilla Public
-+// License, v. 2.0. If a copy of the MPL was not distributed with this file,
-+// You can obtain one at http://mozilla.org/MPL/2.0/.
-+
-+// Package mysql provides a MySQL driver for Go's database/sql package.
-+//
-+// The driver should be used via the database/sql package:
-+//
-+// import "database/sql"
-+// import _ "github.com/go-sql-driver/mysql"
-+//
-+// db, err := sql.Open("mysql", "user:password@/dbname")
-+//
-+// See https://github.com/go-sql-driver/mysql#usage for details
-+package mysql
-+
-+import (
-+ "context"
-+ "database/sql"
-+ "database/sql/driver"
-+ "net"
-+ "sync"
-+)
-+
-+// MySQLDriver is exported to make the driver directly accessible.
-+// In general the driver is used via the database/sql package.
-+type MySQLDriver struct{}
-+
-+// DialFunc is a function which can be used to establish the network connection.
-+// Custom dial functions must be registered with RegisterDial
-+//
-+// Deprecated: users should register a DialContextFunc instead
-+type DialFunc func(addr string) (net.Conn, error)
-+
-+// DialContextFunc is a function which can be used to establish the network connection.
-+// Custom dial functions must be registered with RegisterDialContext
-+type DialContextFunc func(ctx context.Context, addr string) (net.Conn, error)
-+
-+var (
-+ dialsLock sync.RWMutex
-+ dials map[string]DialContextFunc
-+)
-+
-+// RegisterDialContext registers a custom dial function. It can then be used by the
-+// network address mynet(addr), where mynet is the registered new network.
-+// The current context for the connection and its address is passed to the dial function.
-+func RegisterDialContext(net string, dial DialContextFunc) {
-+ dialsLock.Lock()
-+ defer dialsLock.Unlock()
-+ if dials == nil {
-+ dials = make(map[string]DialContextFunc)
-+ }
-+ dials[net] = dial
-+}
-+
-+// RegisterDial registers a custom dial function. It can then be used by the
-+// network address mynet(addr), where mynet is the registered new network.
-+// addr is passed as a parameter to the dial function.
-+//
-+// Deprecated: users should call RegisterDialContext instead
-+func RegisterDial(network string, dial DialFunc) {
-+ RegisterDialContext(network, func(_ context.Context, addr string) (net.Conn, error) {
-+ return dial(addr)
-+ })
-+}
-+
-+// Open new Connection.
-+// See https://github.com/go-sql-driver/mysql#dsn-data-source-name for how
-+// the DSN string is formatted
-+func (d MySQLDriver) Open(dsn string) (driver.Conn, error) {
-+ cfg, err := ParseDSN(dsn)
-+ if err != nil {
-+ return nil, err
-+ }
-+ c := &connector{
-+ cfg: cfg,
-+ }
-+ return c.Connect(context.Background())
-+}
-+
-+func init() {
-+ sql.Register("mysql", &MySQLDriver{})
-+}
-diff --git a/vendor/github.com/go-sql-driver/mysql/driver_go110.go b/vendor/github.com/go-sql-driver/mysql/driver_go110.go
-new file mode 100644
-index 00000000000..eb5a8fe9bd8
---- /dev/null
-+++ b/vendor/github.com/go-sql-driver/mysql/driver_go110.go
-@@ -0,0 +1,37 @@
-+// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
-+//
-+// Copyright 2018 The Go-MySQL-Driver Authors. All rights reserved.
-+//
-+// This Source Code Form is subject to the terms of the Mozilla Public
-+// License, v. 2.0. If a copy of the MPL was not distributed with this file,
-+// You can obtain one at http://mozilla.org/MPL/2.0/.
-+
-+// +build go1.10
-+
-+package mysql
-+
-+import (
-+ "database/sql/driver"
-+)
-+
-+// NewConnector returns new driver.Connector.
-+func NewConnector(cfg *Config) (driver.Connector, error) {
-+ cfg = cfg.Clone()
-+ // normalize the contents of cfg so calls to NewConnector have the same
-+ // behavior as MySQLDriver.OpenConnector
-+ if err := cfg.normalize(); err != nil {
-+ return nil, err
-+ }
-+ return &connector{cfg: cfg}, nil
-+}
-+
-+// OpenConnector implements driver.DriverContext.
-+func (d MySQLDriver) OpenConnector(dsn string) (driver.Connector, error) {
-+ cfg, err := ParseDSN(dsn)
-+ if err != nil {
-+ return nil, err
-+ }
-+ return &connector{
-+ cfg: cfg,
-+ }, nil
-+}
-diff --git a/vendor/github.com/go-sql-driver/mysql/driver_go110_test.go b/vendor/github.com/go-sql-driver/mysql/driver_go110_test.go
-new file mode 100644
-index 00000000000..fd8df89753c
---- /dev/null
-+++ b/vendor/github.com/go-sql-driver/mysql/driver_go110_test.go
-@@ -0,0 +1,190 @@
-+// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
-+//
-+// Copyright 2018 The Go-MySQL-Driver Authors. All rights reserved.
-+//
-+// This Source Code Form is subject to the terms of the Mozilla Public
-+// License, v. 2.0. If a copy of the MPL was not distributed with this file,
-+// You can obtain one at http://mozilla.org/MPL/2.0/.
-+
-+// +build go1.10
-+
-+package mysql
-+
-+import (
-+ "context"
-+ "database/sql"
-+ "database/sql/driver"
-+ "fmt"
-+ "net"
-+ "testing"
-+ "time"
-+)
-+
-+var _ driver.DriverContext = &MySQLDriver{}
-+
-+type dialCtxKey struct{}
-+
-+func TestConnectorObeysDialTimeouts(t *testing.T) {
-+ if !available {
-+ t.Skipf("MySQL server not running on %s", netAddr)
-+ }
-+
-+ RegisterDialContext("dialctxtest", func(ctx context.Context, addr string) (net.Conn, error) {
-+ var d net.Dialer
-+ if !ctx.Value(dialCtxKey{}).(bool) {
-+ return nil, fmt.Errorf("test error: query context is not propagated to our dialer")
-+ }
-+ return d.DialContext(ctx, prot, addr)
-+ })
-+
-+ db, err := sql.Open("mysql", fmt.Sprintf("%s:%s@dialctxtest(%s)/%s?timeout=30s", user, pass, addr, dbname))
-+ if err != nil {
-+ t.Fatalf("error connecting: %s", err.Error())
-+ }
-+ defer db.Close()
-+
-+ ctx := context.WithValue(context.Background(), dialCtxKey{}, true)
-+
-+ _, err = db.ExecContext(ctx, "DO 1")
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+}
-+
-+func configForTests(t *testing.T) *Config {
-+ if !available {
-+ t.Skipf("MySQL server not running on %s", netAddr)
-+ }
-+
-+ mycnf := NewConfig()
-+ mycnf.User = user
-+ mycnf.Passwd = pass
-+ mycnf.Addr = addr
-+ mycnf.Net = prot
-+ mycnf.DBName = dbname
-+ return mycnf
-+}
-+
-+func TestNewConnector(t *testing.T) {
-+ mycnf := configForTests(t)
-+ conn, err := NewConnector(mycnf)
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+
-+ db := sql.OpenDB(conn)
-+ defer db.Close()
-+
-+ if err := db.Ping(); err != nil {
-+ t.Fatal(err)
-+ }
-+}
-+
-+type slowConnection struct {
-+ net.Conn
-+ slowdown time.Duration
-+}
-+
-+func (sc *slowConnection) Read(b []byte) (int, error) {
-+ time.Sleep(sc.slowdown)
-+ return sc.Conn.Read(b)
-+}
-+
-+type connectorHijack struct {
-+ driver.Connector
-+ connErr error
-+}
-+
-+func (cw *connectorHijack) Connect(ctx context.Context) (driver.Conn, error) {
-+ var conn driver.Conn
-+ conn, cw.connErr = cw.Connector.Connect(ctx)
-+ return conn, cw.connErr
-+}
-+
-+func TestConnectorTimeoutsDuringOpen(t *testing.T) {
-+ RegisterDialContext("slowconn", func(ctx context.Context, addr string) (net.Conn, error) {
-+ var d net.Dialer
-+ conn, err := d.DialContext(ctx, prot, addr)
-+ if err != nil {
-+ return nil, err
-+ }
-+ return &slowConnection{Conn: conn, slowdown: 100 * time.Millisecond}, nil
-+ })
-+
-+ mycnf := configForTests(t)
-+ mycnf.Net = "slowconn"
-+
-+ conn, err := NewConnector(mycnf)
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+
-+ hijack := &connectorHijack{Connector: conn}
-+
-+ db := sql.OpenDB(hijack)
-+ defer db.Close()
-+
-+ ctx, cancel := context.WithTimeout(context.Background(), 50*time.Millisecond)
-+ defer cancel()
-+
-+ _, err = db.ExecContext(ctx, "DO 1")
-+ if err != context.DeadlineExceeded {
-+ t.Fatalf("ExecContext should have timed out")
-+ }
-+ if hijack.connErr != context.DeadlineExceeded {
-+ t.Fatalf("(*Connector).Connect should have timed out")
-+ }
-+}
-+
-+// A connection which can only be closed.
-+type dummyConnection struct {
-+ net.Conn
-+ closed bool
-+}
-+
-+func (d *dummyConnection) Close() error {
-+ d.closed = true
-+ return nil
-+}
-+
-+func TestConnectorTimeoutsWatchCancel(t *testing.T) {
-+ var (
-+ cancel func() // Used to cancel the context just after connecting.
-+ created *dummyConnection // The created connection.
-+ )
-+
-+ RegisterDialContext("TestConnectorTimeoutsWatchCancel", func(ctx context.Context, addr string) (net.Conn, error) {
-+ // Canceling at this time triggers the watchCancel error branch in Connect().
-+ cancel()
-+ created = &dummyConnection{}
-+ return created, nil
-+ })
-+
-+ mycnf := NewConfig()
-+ mycnf.User = "root"
-+ mycnf.Addr = "foo"
-+ mycnf.Net = "TestConnectorTimeoutsWatchCancel"
-+
-+ conn, err := NewConnector(mycnf)
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+
-+ db := sql.OpenDB(conn)
-+ defer db.Close()
-+
-+ var ctx context.Context
-+ ctx, cancel = context.WithCancel(context.Background())
-+ defer cancel()
-+
-+ if _, err := db.Conn(ctx); err != context.Canceled {
-+ t.Errorf("got %v, want context.Canceled", err)
-+ }
-+
-+ if created == nil {
-+ t.Fatal("no connection created")
-+ }
-+ if !created.closed {
-+ t.Errorf("connection not closed")
-+ }
-+}
-diff --git a/vendor/github.com/go-sql-driver/mysql/driver_test.go b/vendor/github.com/go-sql-driver/mysql/driver_test.go
-new file mode 100644
-index 00000000000..df7353dbb6f
---- /dev/null
-+++ b/vendor/github.com/go-sql-driver/mysql/driver_test.go
-@@ -0,0 +1,2996 @@
-+// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
-+//
-+// Copyright 2013 The Go-MySQL-Driver Authors. All rights reserved.
-+//
-+// This Source Code Form is subject to the terms of the Mozilla Public
-+// License, v. 2.0. If a copy of the MPL was not distributed with this file,
-+// You can obtain one at http://mozilla.org/MPL/2.0/.
-+
-+package mysql
-+
-+import (
-+ "bytes"
-+ "context"
-+ "crypto/tls"
-+ "database/sql"
-+ "database/sql/driver"
-+ "fmt"
-+ "io"
-+ "io/ioutil"
-+ "log"
-+ "math"
-+ "net"
-+ "net/url"
-+ "os"
-+ "reflect"
-+ "strings"
-+ "sync"
-+ "sync/atomic"
-+ "testing"
-+ "time"
-+)
-+
-+// Ensure that all the driver interfaces are implemented
-+var (
-+ _ driver.Rows = &binaryRows{}
-+ _ driver.Rows = &textRows{}
-+)
-+
-+var (
-+ user string
-+ pass string
-+ prot string
-+ addr string
-+ dbname string
-+ dsn string
-+ netAddr string
-+ available bool
-+)
-+
-+var (
-+ tDate = time.Date(2012, 6, 14, 0, 0, 0, 0, time.UTC)
-+ sDate = "2012-06-14"
-+ tDateTime = time.Date(2011, 11, 20, 21, 27, 37, 0, time.UTC)
-+ sDateTime = "2011-11-20 21:27:37"
-+ tDate0 = time.Time{}
-+ sDate0 = "0000-00-00"
-+ sDateTime0 = "0000-00-00 00:00:00"
-+)
-+
-+// See https://github.com/go-sql-driver/mysql/wiki/Testing
-+func init() {
-+ // get environment variables
-+ env := func(key, defaultValue string) string {
-+ if value := os.Getenv(key); value != "" {
-+ return value
-+ }
-+ return defaultValue
-+ }
-+ user = env("MYSQL_TEST_USER", "root")
-+ pass = env("MYSQL_TEST_PASS", "")
-+ prot = env("MYSQL_TEST_PROT", "tcp")
-+ addr = env("MYSQL_TEST_ADDR", "localhost:3306")
-+ dbname = env("MYSQL_TEST_DBNAME", "gotest")
-+ netAddr = fmt.Sprintf("%s(%s)", prot, addr)
-+ dsn = fmt.Sprintf("%s:%s@%s/%s?timeout=30s", user, pass, netAddr, dbname)
-+ c, err := net.Dial(prot, addr)
-+ if err == nil {
-+ available = true
-+ c.Close()
-+ }
-+}
-+
-+type DBTest struct {
-+ *testing.T
-+ db *sql.DB
-+}
-+
-+type netErrorMock struct {
-+ temporary bool
-+ timeout bool
-+}
-+
-+func (e netErrorMock) Temporary() bool {
-+ return e.temporary
-+}
-+
-+func (e netErrorMock) Timeout() bool {
-+ return e.timeout
-+}
-+
-+func (e netErrorMock) Error() string {
-+ return fmt.Sprintf("mock net error. Temporary: %v, Timeout %v", e.temporary, e.timeout)
-+}
-+
-+func runTestsWithMultiStatement(t *testing.T, dsn string, tests ...func(dbt *DBTest)) {
-+ if !available {
-+ t.Skipf("MySQL server not running on %s", netAddr)
-+ }
-+
-+ dsn += "&multiStatements=true"
-+ var db *sql.DB
-+ if _, err := ParseDSN(dsn); err != errInvalidDSNUnsafeCollation {
-+ db, err = sql.Open("mysql", dsn)
-+ if err != nil {
-+ t.Fatalf("error connecting: %s", err.Error())
-+ }
-+ defer db.Close()
-+ }
-+
-+ dbt := &DBTest{t, db}
-+ for _, test := range tests {
-+ test(dbt)
-+ dbt.db.Exec("DROP TABLE IF EXISTS test")
-+ }
-+}
-+
-+func runTests(t *testing.T, dsn string, tests ...func(dbt *DBTest)) {
-+ if !available {
-+ t.Skipf("MySQL server not running on %s", netAddr)
-+ }
-+
-+ db, err := sql.Open("mysql", dsn)
-+ if err != nil {
-+ t.Fatalf("error connecting: %s", err.Error())
-+ }
-+ defer db.Close()
-+
-+ db.Exec("DROP TABLE IF EXISTS test")
-+
-+ dsn2 := dsn + "&interpolateParams=true"
-+ var db2 *sql.DB
-+ if _, err := ParseDSN(dsn2); err != errInvalidDSNUnsafeCollation {
-+ db2, err = sql.Open("mysql", dsn2)
-+ if err != nil {
-+ t.Fatalf("error connecting: %s", err.Error())
-+ }
-+ defer db2.Close()
-+ }
-+
-+ dsn3 := dsn + "&multiStatements=true"
-+ var db3 *sql.DB
-+ if _, err := ParseDSN(dsn3); err != errInvalidDSNUnsafeCollation {
-+ db3, err = sql.Open("mysql", dsn3)
-+ if err != nil {
-+ t.Fatalf("error connecting: %s", err.Error())
-+ }
-+ defer db3.Close()
-+ }
-+
-+ dbt := &DBTest{t, db}
-+ dbt2 := &DBTest{t, db2}
-+ dbt3 := &DBTest{t, db3}
-+ for _, test := range tests {
-+ test(dbt)
-+ dbt.db.Exec("DROP TABLE IF EXISTS test")
-+ if db2 != nil {
-+ test(dbt2)
-+ dbt2.db.Exec("DROP TABLE IF EXISTS test")
-+ }
-+ if db3 != nil {
-+ test(dbt3)
-+ dbt3.db.Exec("DROP TABLE IF EXISTS test")
-+ }
-+ }
-+}
-+
-+func (dbt *DBTest) fail(method, query string, err error) {
-+ if len(query) > 300 {
-+ query = "[query too large to print]"
-+ }
-+ dbt.Fatalf("error on %s %s: %s", method, query, err.Error())
-+}
-+
-+func (dbt *DBTest) mustExec(query string, args ...interface{}) (res sql.Result) {
-+ res, err := dbt.db.Exec(query, args...)
-+ if err != nil {
-+ dbt.fail("exec", query, err)
-+ }
-+ return res
-+}
-+
-+func (dbt *DBTest) mustQuery(query string, args ...interface{}) (rows *sql.Rows) {
-+ rows, err := dbt.db.Query(query, args...)
-+ if err != nil {
-+ dbt.fail("query", query, err)
-+ }
-+ return rows
-+}
-+
-+func maybeSkip(t *testing.T, err error, skipErrno uint16) {
-+ mySQLErr, ok := err.(*MySQLError)
-+ if !ok {
-+ return
-+ }
-+
-+ if mySQLErr.Number == skipErrno {
-+ t.Skipf("skipping test for error: %v", err)
-+ }
-+}
-+
-+func TestEmptyQuery(t *testing.T) {
-+ runTests(t, dsn, func(dbt *DBTest) {
-+ // just a comment, no query
-+ rows := dbt.mustQuery("--")
-+ defer rows.Close()
-+ // will hang before #255
-+ if rows.Next() {
-+ dbt.Errorf("next on rows must be false")
-+ }
-+ })
-+}
-+
-+func TestCRUD(t *testing.T) {
-+ runTests(t, dsn, func(dbt *DBTest) {
-+ // Create Table
-+ dbt.mustExec("CREATE TABLE test (value BOOL)")
-+
-+ // Test for unexpected data
-+ var out bool
-+ rows := dbt.mustQuery("SELECT * FROM test")
-+ if rows.Next() {
-+ dbt.Error("unexpected data in empty table")
-+ }
-+ rows.Close()
-+
-+ // Create Data
-+ res := dbt.mustExec("INSERT INTO test VALUES (1)")
-+ count, err := res.RowsAffected()
-+ if err != nil {
-+ dbt.Fatalf("res.RowsAffected() returned error: %s", err.Error())
-+ }
-+ if count != 1 {
-+ dbt.Fatalf("expected 1 affected row, got %d", count)
-+ }
-+
-+ id, err := res.LastInsertId()
-+ if err != nil {
-+ dbt.Fatalf("res.LastInsertId() returned error: %s", err.Error())
-+ }
-+ if id != 0 {
-+ dbt.Fatalf("expected InsertId 0, got %d", id)
-+ }
-+
-+ // Read
-+ rows = dbt.mustQuery("SELECT value FROM test")
-+ if rows.Next() {
-+ rows.Scan(&out)
-+ if true != out {
-+ dbt.Errorf("true != %t", out)
-+ }
-+
-+ if rows.Next() {
-+ dbt.Error("unexpected data")
-+ }
-+ } else {
-+ dbt.Error("no data")
-+ }
-+ rows.Close()
-+
-+ // Update
-+ res = dbt.mustExec("UPDATE test SET value = ? WHERE value = ?", false, true)
-+ count, err = res.RowsAffected()
-+ if err != nil {
-+ dbt.Fatalf("res.RowsAffected() returned error: %s", err.Error())
-+ }
-+ if count != 1 {
-+ dbt.Fatalf("expected 1 affected row, got %d", count)
-+ }
-+
-+ // Check Update
-+ rows = dbt.mustQuery("SELECT value FROM test")
-+ if rows.Next() {
-+ rows.Scan(&out)
-+ if false != out {
-+ dbt.Errorf("false != %t", out)
-+ }
-+
-+ if rows.Next() {
-+ dbt.Error("unexpected data")
-+ }
-+ } else {
-+ dbt.Error("no data")
-+ }
-+ rows.Close()
-+
-+ // Delete
-+ res = dbt.mustExec("DELETE FROM test WHERE value = ?", false)
-+ count, err = res.RowsAffected()
-+ if err != nil {
-+ dbt.Fatalf("res.RowsAffected() returned error: %s", err.Error())
-+ }
-+ if count != 1 {
-+ dbt.Fatalf("expected 1 affected row, got %d", count)
-+ }
-+
-+ // Check for unexpected rows
-+ res = dbt.mustExec("DELETE FROM test")
-+ count, err = res.RowsAffected()
-+ if err != nil {
-+ dbt.Fatalf("res.RowsAffected() returned error: %s", err.Error())
-+ }
-+ if count != 0 {
-+ dbt.Fatalf("expected 0 affected row, got %d", count)
-+ }
-+ })
-+}
-+
-+func TestMultiQuery(t *testing.T) {
-+ runTestsWithMultiStatement(t, dsn, func(dbt *DBTest) {
-+ // Create Table
-+ dbt.mustExec("CREATE TABLE `test` (`id` int(11) NOT NULL, `value` int(11) NOT NULL) ")
-+
-+ // Create Data
-+ res := dbt.mustExec("INSERT INTO test VALUES (1, 1)")
-+ count, err := res.RowsAffected()
-+ if err != nil {
-+ dbt.Fatalf("res.RowsAffected() returned error: %s", err.Error())
-+ }
-+ if count != 1 {
-+ dbt.Fatalf("expected 1 affected row, got %d", count)
-+ }
-+
-+ // Update
-+ res = dbt.mustExec("UPDATE test SET value = 3 WHERE id = 1; UPDATE test SET value = 4 WHERE id = 1; UPDATE test SET value = 5 WHERE id = 1;")
-+ count, err = res.RowsAffected()
-+ if err != nil {
-+ dbt.Fatalf("res.RowsAffected() returned error: %s", err.Error())
-+ }
-+ if count != 1 {
-+ dbt.Fatalf("expected 1 affected row, got %d", count)
-+ }
-+
-+ // Read
-+ var out int
-+ rows := dbt.mustQuery("SELECT value FROM test WHERE id=1;")
-+ if rows.Next() {
-+ rows.Scan(&out)
-+ if 5 != out {
-+ dbt.Errorf("5 != %d", out)
-+ }
-+
-+ if rows.Next() {
-+ dbt.Error("unexpected data")
-+ }
-+ } else {
-+ dbt.Error("no data")
-+ }
-+ rows.Close()
-+
-+ })
-+}
-+
-+func TestInt(t *testing.T) {
-+ runTests(t, dsn, func(dbt *DBTest) {
-+ types := [5]string{"TINYINT", "SMALLINT", "MEDIUMINT", "INT", "BIGINT"}
-+ in := int64(42)
-+ var out int64
-+ var rows *sql.Rows
-+
-+ // SIGNED
-+ for _, v := range types {
-+ dbt.mustExec("CREATE TABLE test (value " + v + ")")
-+
-+ dbt.mustExec("INSERT INTO test VALUES (?)", in)
-+
-+ rows = dbt.mustQuery("SELECT value FROM test")
-+ if rows.Next() {
-+ rows.Scan(&out)
-+ if in != out {
-+ dbt.Errorf("%s: %d != %d", v, in, out)
-+ }
-+ } else {
-+ dbt.Errorf("%s: no data", v)
-+ }
-+ rows.Close()
-+
-+ dbt.mustExec("DROP TABLE IF EXISTS test")
-+ }
-+
-+ // UNSIGNED ZEROFILL
-+ for _, v := range types {
-+ dbt.mustExec("CREATE TABLE test (value " + v + " ZEROFILL)")
-+
-+ dbt.mustExec("INSERT INTO test VALUES (?)", in)
-+
-+ rows = dbt.mustQuery("SELECT value FROM test")
-+ if rows.Next() {
-+ rows.Scan(&out)
-+ if in != out {
-+ dbt.Errorf("%s ZEROFILL: %d != %d", v, in, out)
-+ }
-+ } else {
-+ dbt.Errorf("%s ZEROFILL: no data", v)
-+ }
-+ rows.Close()
-+
-+ dbt.mustExec("DROP TABLE IF EXISTS test")
-+ }
-+ })
-+}
-+
-+func TestFloat32(t *testing.T) {
-+ runTests(t, dsn, func(dbt *DBTest) {
-+ types := [2]string{"FLOAT", "DOUBLE"}
-+ in := float32(42.23)
-+ var out float32
-+ var rows *sql.Rows
-+ for _, v := range types {
-+ dbt.mustExec("CREATE TABLE test (value " + v + ")")
-+ dbt.mustExec("INSERT INTO test VALUES (?)", in)
-+ rows = dbt.mustQuery("SELECT value FROM test")
-+ if rows.Next() {
-+ rows.Scan(&out)
-+ if in != out {
-+ dbt.Errorf("%s: %g != %g", v, in, out)
-+ }
-+ } else {
-+ dbt.Errorf("%s: no data", v)
-+ }
-+ rows.Close()
-+ dbt.mustExec("DROP TABLE IF EXISTS test")
-+ }
-+ })
-+}
-+
-+func TestFloat64(t *testing.T) {
-+ runTests(t, dsn, func(dbt *DBTest) {
-+ types := [2]string{"FLOAT", "DOUBLE"}
-+ var expected float64 = 42.23
-+ var out float64
-+ var rows *sql.Rows
-+ for _, v := range types {
-+ dbt.mustExec("CREATE TABLE test (value " + v + ")")
-+ dbt.mustExec("INSERT INTO test VALUES (42.23)")
-+ rows = dbt.mustQuery("SELECT value FROM test")
-+ if rows.Next() {
-+ rows.Scan(&out)
-+ if expected != out {
-+ dbt.Errorf("%s: %g != %g", v, expected, out)
-+ }
-+ } else {
-+ dbt.Errorf("%s: no data", v)
-+ }
-+ rows.Close()
-+ dbt.mustExec("DROP TABLE IF EXISTS test")
-+ }
-+ })
-+}
-+
-+func TestFloat64Placeholder(t *testing.T) {
-+ runTests(t, dsn, func(dbt *DBTest) {
-+ types := [2]string{"FLOAT", "DOUBLE"}
-+ var expected float64 = 42.23
-+ var out float64
-+ var rows *sql.Rows
-+ for _, v := range types {
-+ dbt.mustExec("CREATE TABLE test (id int, value " + v + ")")
-+ dbt.mustExec("INSERT INTO test VALUES (1, 42.23)")
-+ rows = dbt.mustQuery("SELECT value FROM test WHERE id = ?", 1)
-+ if rows.Next() {
-+ rows.Scan(&out)
-+ if expected != out {
-+ dbt.Errorf("%s: %g != %g", v, expected, out)
-+ }
-+ } else {
-+ dbt.Errorf("%s: no data", v)
-+ }
-+ rows.Close()
-+ dbt.mustExec("DROP TABLE IF EXISTS test")
-+ }
-+ })
-+}
-+
-+func TestString(t *testing.T) {
-+ runTests(t, dsn, func(dbt *DBTest) {
-+ types := [6]string{"CHAR(255)", "VARCHAR(255)", "TINYTEXT", "TEXT", "MEDIUMTEXT", "LONGTEXT"}
-+ in := "κόσμε üöäßñóùéàâÿœ'îë Árvíztűrő いろはにほへとちりぬるを イロハニホヘト דג סקרן чащах น่าฟังเอย"
-+ var out string
-+ var rows *sql.Rows
-+
-+ for _, v := range types {
-+ dbt.mustExec("CREATE TABLE test (value " + v + ") CHARACTER SET utf8")
-+
-+ dbt.mustExec("INSERT INTO test VALUES (?)", in)
-+
-+ rows = dbt.mustQuery("SELECT value FROM test")
-+ if rows.Next() {
-+ rows.Scan(&out)
-+ if in != out {
-+ dbt.Errorf("%s: %s != %s", v, in, out)
-+ }
-+ } else {
-+ dbt.Errorf("%s: no data", v)
-+ }
-+ rows.Close()
-+
-+ dbt.mustExec("DROP TABLE IF EXISTS test")
-+ }
-+
-+ // BLOB
-+ dbt.mustExec("CREATE TABLE test (id int, value BLOB) CHARACTER SET utf8")
-+
-+ id := 2
-+ in = "Lorem ipsum dolor sit amet, consetetur sadipscing elitr, " +
-+ "sed diam nonumy eirmod tempor invidunt ut labore et dolore magna aliquyam erat, " +
-+ "sed diam voluptua. At vero eos et accusam et justo duo dolores et ea rebum. " +
-+ "Stet clita kasd gubergren, no sea takimata sanctus est Lorem ipsum dolor sit amet. " +
-+ "Lorem ipsum dolor sit amet, consetetur sadipscing elitr, " +
-+ "sed diam nonumy eirmod tempor invidunt ut labore et dolore magna aliquyam erat, " +
-+ "sed diam voluptua. At vero eos et accusam et justo duo dolores et ea rebum. " +
-+ "Stet clita kasd gubergren, no sea takimata sanctus est Lorem ipsum dolor sit amet."
-+ dbt.mustExec("INSERT INTO test VALUES (?, ?)", id, in)
-+
-+ err := dbt.db.QueryRow("SELECT value FROM test WHERE id = ?", id).Scan(&out)
-+ if err != nil {
-+ dbt.Fatalf("Error on BLOB-Query: %s", err.Error())
-+ } else if out != in {
-+ dbt.Errorf("BLOB: %s != %s", in, out)
-+ }
-+ })
-+}
-+
-+func TestRawBytes(t *testing.T) {
-+ runTests(t, dsn, func(dbt *DBTest) {
-+ v1 := []byte("aaa")
-+ v2 := []byte("bbb")
-+ rows := dbt.mustQuery("SELECT ?, ?", v1, v2)
-+ defer rows.Close()
-+ if rows.Next() {
-+ var o1, o2 sql.RawBytes
-+ if err := rows.Scan(&o1, &o2); err != nil {
-+ dbt.Errorf("Got error: %v", err)
-+ }
-+ if !bytes.Equal(v1, o1) {
-+ dbt.Errorf("expected %v, got %v", v1, o1)
-+ }
-+ if !bytes.Equal(v2, o2) {
-+ dbt.Errorf("expected %v, got %v", v2, o2)
-+ }
-+ // https://github.com/go-sql-driver/mysql/issues/765
-+ // Appending to RawBytes shouldn't overwrite next RawBytes.
-+ o1 = append(o1, "xyzzy"...)
-+ if !bytes.Equal(v2, o2) {
-+ dbt.Errorf("expected %v, got %v", v2, o2)
-+ }
-+ } else {
-+ dbt.Errorf("no data")
-+ }
-+ })
-+}
-+
-+type testValuer struct {
-+ value string
-+}
-+
-+func (tv testValuer) Value() (driver.Value, error) {
-+ return tv.value, nil
-+}
-+
-+func TestValuer(t *testing.T) {
-+ runTests(t, dsn, func(dbt *DBTest) {
-+ in := testValuer{"a_value"}
-+ var out string
-+ var rows *sql.Rows
-+
-+ dbt.mustExec("CREATE TABLE test (value VARCHAR(255)) CHARACTER SET utf8")
-+ dbt.mustExec("INSERT INTO test VALUES (?)", in)
-+ rows = dbt.mustQuery("SELECT value FROM test")
-+ if rows.Next() {
-+ rows.Scan(&out)
-+ if in.value != out {
-+ dbt.Errorf("Valuer: %v != %s", in, out)
-+ }
-+ } else {
-+ dbt.Errorf("Valuer: no data")
-+ }
-+ rows.Close()
-+
-+ dbt.mustExec("DROP TABLE IF EXISTS test")
-+ })
-+}
-+
-+type testValuerWithValidation struct {
-+ value string
-+}
-+
-+func (tv testValuerWithValidation) Value() (driver.Value, error) {
-+ if len(tv.value) == 0 {
-+ return nil, fmt.Errorf("Invalid string valuer. Value must not be empty")
-+ }
-+
-+ return tv.value, nil
-+}
-+
-+func TestValuerWithValidation(t *testing.T) {
-+ runTests(t, dsn, func(dbt *DBTest) {
-+ in := testValuerWithValidation{"a_value"}
-+ var out string
-+ var rows *sql.Rows
-+
-+ dbt.mustExec("CREATE TABLE testValuer (value VARCHAR(255)) CHARACTER SET utf8")
-+ dbt.mustExec("INSERT INTO testValuer VALUES (?)", in)
-+
-+ rows = dbt.mustQuery("SELECT value FROM testValuer")
-+ defer rows.Close()
-+
-+ if rows.Next() {
-+ rows.Scan(&out)
-+ if in.value != out {
-+ dbt.Errorf("Valuer: %v != %s", in, out)
-+ }
-+ } else {
-+ dbt.Errorf("Valuer: no data")
-+ }
-+
-+ if _, err := dbt.db.Exec("INSERT INTO testValuer VALUES (?)", testValuerWithValidation{""}); err == nil {
-+ dbt.Errorf("Failed to check valuer error")
-+ }
-+
-+ if _, err := dbt.db.Exec("INSERT INTO testValuer VALUES (?)", nil); err != nil {
-+ dbt.Errorf("Failed to check nil")
-+ }
-+
-+ if _, err := dbt.db.Exec("INSERT INTO testValuer VALUES (?)", map[string]bool{}); err == nil {
-+ dbt.Errorf("Failed to check not valuer")
-+ }
-+
-+ dbt.mustExec("DROP TABLE IF EXISTS testValuer")
-+ })
-+}
-+
-+type timeTests struct {
-+ dbtype string
-+ tlayout string
-+ tests []timeTest
-+}
-+
-+type timeTest struct {
-+ s string // leading "!": do not use t as value in queries
-+ t time.Time
-+}
-+
-+type timeMode byte
-+
-+func (t timeMode) String() string {
-+ switch t {
-+ case binaryString:
-+ return "binary:string"
-+ case binaryTime:
-+ return "binary:time.Time"
-+ case textString:
-+ return "text:string"
-+ }
-+ panic("unsupported timeMode")
-+}
-+
-+func (t timeMode) Binary() bool {
-+ switch t {
-+ case binaryString, binaryTime:
-+ return true
-+ }
-+ return false
-+}
-+
-+const (
-+ binaryString timeMode = iota
-+ binaryTime
-+ textString
-+)
-+
-+func (t timeTest) genQuery(dbtype string, mode timeMode) string {
-+ var inner string
-+ if mode.Binary() {
-+ inner = "?"
-+ } else {
-+ inner = `"%s"`
-+ }
-+ return `SELECT cast(` + inner + ` as ` + dbtype + `)`
-+}
-+
-+func (t timeTest) run(dbt *DBTest, dbtype, tlayout string, mode timeMode) {
-+ var rows *sql.Rows
-+ query := t.genQuery(dbtype, mode)
-+ switch mode {
-+ case binaryString:
-+ rows = dbt.mustQuery(query, t.s)
-+ case binaryTime:
-+ rows = dbt.mustQuery(query, t.t)
-+ case textString:
-+ query = fmt.Sprintf(query, t.s)
-+ rows = dbt.mustQuery(query)
-+ default:
-+ panic("unsupported mode")
-+ }
-+ defer rows.Close()
-+ var err error
-+ if !rows.Next() {
-+ err = rows.Err()
-+ if err == nil {
-+ err = fmt.Errorf("no data")
-+ }
-+ dbt.Errorf("%s [%s]: %s", dbtype, mode, err)
-+ return
-+ }
-+ var dst interface{}
-+ err = rows.Scan(&dst)
-+ if err != nil {
-+ dbt.Errorf("%s [%s]: %s", dbtype, mode, err)
-+ return
-+ }
-+ switch val := dst.(type) {
-+ case []uint8:
-+ str := string(val)
-+ if str == t.s {
-+ return
-+ }
-+ if mode.Binary() && dbtype == "DATETIME" && len(str) == 26 && str[:19] == t.s {
-+ // a fix mainly for TravisCI:
-+ // accept full microsecond resolution in result for DATETIME columns
-+ // where the binary protocol was used
-+ return
-+ }
-+ dbt.Errorf("%s [%s] to string: expected %q, got %q",
-+ dbtype, mode,
-+ t.s, str,
-+ )
-+ case time.Time:
-+ if val == t.t {
-+ return
-+ }
-+ dbt.Errorf("%s [%s] to string: expected %q, got %q",
-+ dbtype, mode,
-+ t.s, val.Format(tlayout),
-+ )
-+ default:
-+ fmt.Printf("%#v\n", []interface{}{dbtype, tlayout, mode, t.s, t.t})
-+ dbt.Errorf("%s [%s]: unhandled type %T (is '%v')",
-+ dbtype, mode,
-+ val, val,
-+ )
-+ }
-+}
-+
-+func TestDateTime(t *testing.T) {
-+ afterTime := func(t time.Time, d string) time.Time {
-+ dur, err := time.ParseDuration(d)
-+ if err != nil {
-+ panic(err)
-+ }
-+ return t.Add(dur)
-+ }
-+ // NOTE: MySQL rounds DATETIME(x) up - but that's not included in the tests
-+ format := "2006-01-02 15:04:05.999999"
-+ t0 := time.Time{}
-+ tstr0 := "0000-00-00 00:00:00.000000"
-+ testcases := []timeTests{
-+ {"DATE", format[:10], []timeTest{
-+ {t: time.Date(2011, 11, 20, 0, 0, 0, 0, time.UTC)},
-+ {t: t0, s: tstr0[:10]},
-+ }},
-+ {"DATETIME", format[:19], []timeTest{
-+ {t: time.Date(2011, 11, 20, 21, 27, 37, 0, time.UTC)},
-+ {t: t0, s: tstr0[:19]},
-+ }},
-+ {"DATETIME(0)", format[:21], []timeTest{
-+ {t: time.Date(2011, 11, 20, 21, 27, 37, 0, time.UTC)},
-+ {t: t0, s: tstr0[:19]},
-+ }},
-+ {"DATETIME(1)", format[:21], []timeTest{
-+ {t: time.Date(2011, 11, 20, 21, 27, 37, 100000000, time.UTC)},
-+ {t: t0, s: tstr0[:21]},
-+ }},
-+ {"DATETIME(6)", format, []timeTest{
-+ {t: time.Date(2011, 11, 20, 21, 27, 37, 123456000, time.UTC)},
-+ {t: t0, s: tstr0},
-+ }},
-+ {"TIME", format[11:19], []timeTest{
-+ {t: afterTime(t0, "12345s")},
-+ {s: "!-12:34:56"},
-+ {s: "!-838:59:59"},
-+ {s: "!838:59:59"},
-+ {t: t0, s: tstr0[11:19]},
-+ }},
-+ {"TIME(0)", format[11:19], []timeTest{
-+ {t: afterTime(t0, "12345s")},
-+ {s: "!-12:34:56"},
-+ {s: "!-838:59:59"},
-+ {s: "!838:59:59"},
-+ {t: t0, s: tstr0[11:19]},
-+ }},
-+ {"TIME(1)", format[11:21], []timeTest{
-+ {t: afterTime(t0, "12345600ms")},
-+ {s: "!-12:34:56.7"},
-+ {s: "!-838:59:58.9"},
-+ {s: "!838:59:58.9"},
-+ {t: t0, s: tstr0[11:21]},
-+ }},
-+ {"TIME(6)", format[11:], []timeTest{
-+ {t: afterTime(t0, "1234567890123000ns")},
-+ {s: "!-12:34:56.789012"},
-+ {s: "!-838:59:58.999999"},
-+ {s: "!838:59:58.999999"},
-+ {t: t0, s: tstr0[11:]},
-+ }},
-+ }
-+ dsns := []string{
-+ dsn + "&parseTime=true",
-+ dsn + "&parseTime=false",
-+ }
-+ for _, testdsn := range dsns {
-+ runTests(t, testdsn, func(dbt *DBTest) {
-+ microsecsSupported := false
-+ zeroDateSupported := false
-+ var rows *sql.Rows
-+ var err error
-+ rows, err = dbt.db.Query(`SELECT cast("00:00:00.1" as TIME(1)) = "00:00:00.1"`)
-+ if err == nil {
-+ rows.Scan(µsecsSupported)
-+ rows.Close()
-+ }
-+ rows, err = dbt.db.Query(`SELECT cast("0000-00-00" as DATE) = "0000-00-00"`)
-+ if err == nil {
-+ rows.Scan(&zeroDateSupported)
-+ rows.Close()
-+ }
-+ for _, setups := range testcases {
-+ if t := setups.dbtype; !microsecsSupported && t[len(t)-1:] == ")" {
-+ // skip fractional second tests if unsupported by server
-+ continue
-+ }
-+ for _, setup := range setups.tests {
-+ allowBinTime := true
-+ if setup.s == "" {
-+ // fill time string wherever Go can reliable produce it
-+ setup.s = setup.t.Format(setups.tlayout)
-+ } else if setup.s[0] == '!' {
-+ // skip tests using setup.t as source in queries
-+ allowBinTime = false
-+ // fix setup.s - remove the "!"
-+ setup.s = setup.s[1:]
-+ }
-+ if !zeroDateSupported && setup.s == tstr0[:len(setup.s)] {
-+ // skip disallowed 0000-00-00 date
-+ continue
-+ }
-+ setup.run(dbt, setups.dbtype, setups.tlayout, textString)
-+ setup.run(dbt, setups.dbtype, setups.tlayout, binaryString)
-+ if allowBinTime {
-+ setup.run(dbt, setups.dbtype, setups.tlayout, binaryTime)
-+ }
-+ }
-+ }
-+ })
-+ }
-+}
-+
-+func TestTimestampMicros(t *testing.T) {
-+ format := "2006-01-02 15:04:05.999999"
-+ f0 := format[:19]
-+ f1 := format[:21]
-+ f6 := format[:26]
-+ runTests(t, dsn, func(dbt *DBTest) {
-+ // check if microseconds are supported.
-+ // Do not use timestamp(x) for that check - before 5.5.6, x would mean display width
-+ // and not precision.
-+ // Se last paragraph at http://dev.mysql.com/doc/refman/5.6/en/fractional-seconds.html
-+ microsecsSupported := false
-+ if rows, err := dbt.db.Query(`SELECT cast("00:00:00.1" as TIME(1)) = "00:00:00.1"`); err == nil {
-+ rows.Scan(µsecsSupported)
-+ rows.Close()
-+ }
-+ if !microsecsSupported {
-+ // skip test
-+ return
-+ }
-+ _, err := dbt.db.Exec(`
-+ CREATE TABLE test (
-+ value0 TIMESTAMP NOT NULL DEFAULT '` + f0 + `',
-+ value1 TIMESTAMP(1) NOT NULL DEFAULT '` + f1 + `',
-+ value6 TIMESTAMP(6) NOT NULL DEFAULT '` + f6 + `'
-+ )`,
-+ )
-+ if err != nil {
-+ dbt.Error(err)
-+ }
-+ defer dbt.mustExec("DROP TABLE IF EXISTS test")
-+ dbt.mustExec("INSERT INTO test SET value0=?, value1=?, value6=?", f0, f1, f6)
-+ var res0, res1, res6 string
-+ rows := dbt.mustQuery("SELECT * FROM test")
-+ defer rows.Close()
-+ if !rows.Next() {
-+ dbt.Errorf("test contained no selectable values")
-+ }
-+ err = rows.Scan(&res0, &res1, &res6)
-+ if err != nil {
-+ dbt.Error(err)
-+ }
-+ if res0 != f0 {
-+ dbt.Errorf("expected %q, got %q", f0, res0)
-+ }
-+ if res1 != f1 {
-+ dbt.Errorf("expected %q, got %q", f1, res1)
-+ }
-+ if res6 != f6 {
-+ dbt.Errorf("expected %q, got %q", f6, res6)
-+ }
-+ })
-+}
-+
-+func TestNULL(t *testing.T) {
-+ runTests(t, dsn, func(dbt *DBTest) {
-+ nullStmt, err := dbt.db.Prepare("SELECT NULL")
-+ if err != nil {
-+ dbt.Fatal(err)
-+ }
-+ defer nullStmt.Close()
-+
-+ nonNullStmt, err := dbt.db.Prepare("SELECT 1")
-+ if err != nil {
-+ dbt.Fatal(err)
-+ }
-+ defer nonNullStmt.Close()
-+
-+ // NullBool
-+ var nb sql.NullBool
-+ // Invalid
-+ if err = nullStmt.QueryRow().Scan(&nb); err != nil {
-+ dbt.Fatal(err)
-+ }
-+ if nb.Valid {
-+ dbt.Error("valid NullBool which should be invalid")
-+ }
-+ // Valid
-+ if err = nonNullStmt.QueryRow().Scan(&nb); err != nil {
-+ dbt.Fatal(err)
-+ }
-+ if !nb.Valid {
-+ dbt.Error("invalid NullBool which should be valid")
-+ } else if nb.Bool != true {
-+ dbt.Errorf("Unexpected NullBool value: %t (should be true)", nb.Bool)
-+ }
-+
-+ // NullFloat64
-+ var nf sql.NullFloat64
-+ // Invalid
-+ if err = nullStmt.QueryRow().Scan(&nf); err != nil {
-+ dbt.Fatal(err)
-+ }
-+ if nf.Valid {
-+ dbt.Error("valid NullFloat64 which should be invalid")
-+ }
-+ // Valid
-+ if err = nonNullStmt.QueryRow().Scan(&nf); err != nil {
-+ dbt.Fatal(err)
-+ }
-+ if !nf.Valid {
-+ dbt.Error("invalid NullFloat64 which should be valid")
-+ } else if nf.Float64 != float64(1) {
-+ dbt.Errorf("unexpected NullFloat64 value: %f (should be 1.0)", nf.Float64)
-+ }
-+
-+ // NullInt64
-+ var ni sql.NullInt64
-+ // Invalid
-+ if err = nullStmt.QueryRow().Scan(&ni); err != nil {
-+ dbt.Fatal(err)
-+ }
-+ if ni.Valid {
-+ dbt.Error("valid NullInt64 which should be invalid")
-+ }
-+ // Valid
-+ if err = nonNullStmt.QueryRow().Scan(&ni); err != nil {
-+ dbt.Fatal(err)
-+ }
-+ if !ni.Valid {
-+ dbt.Error("invalid NullInt64 which should be valid")
-+ } else if ni.Int64 != int64(1) {
-+ dbt.Errorf("unexpected NullInt64 value: %d (should be 1)", ni.Int64)
-+ }
-+
-+ // NullString
-+ var ns sql.NullString
-+ // Invalid
-+ if err = nullStmt.QueryRow().Scan(&ns); err != nil {
-+ dbt.Fatal(err)
-+ }
-+ if ns.Valid {
-+ dbt.Error("valid NullString which should be invalid")
-+ }
-+ // Valid
-+ if err = nonNullStmt.QueryRow().Scan(&ns); err != nil {
-+ dbt.Fatal(err)
-+ }
-+ if !ns.Valid {
-+ dbt.Error("invalid NullString which should be valid")
-+ } else if ns.String != `1` {
-+ dbt.Error("unexpected NullString value:" + ns.String + " (should be `1`)")
-+ }
-+
-+ // nil-bytes
-+ var b []byte
-+ // Read nil
-+ if err = nullStmt.QueryRow().Scan(&b); err != nil {
-+ dbt.Fatal(err)
-+ }
-+ if b != nil {
-+ dbt.Error("non-nil []byte which should be nil")
-+ }
-+ // Read non-nil
-+ if err = nonNullStmt.QueryRow().Scan(&b); err != nil {
-+ dbt.Fatal(err)
-+ }
-+ if b == nil {
-+ dbt.Error("nil []byte which should be non-nil")
-+ }
-+ // Insert nil
-+ b = nil
-+ success := false
-+ if err = dbt.db.QueryRow("SELECT ? IS NULL", b).Scan(&success); err != nil {
-+ dbt.Fatal(err)
-+ }
-+ if !success {
-+ dbt.Error("inserting []byte(nil) as NULL failed")
-+ }
-+ // Check input==output with input==nil
-+ b = nil
-+ if err = dbt.db.QueryRow("SELECT ?", b).Scan(&b); err != nil {
-+ dbt.Fatal(err)
-+ }
-+ if b != nil {
-+ dbt.Error("non-nil echo from nil input")
-+ }
-+ // Check input==output with input!=nil
-+ b = []byte("")
-+ if err = dbt.db.QueryRow("SELECT ?", b).Scan(&b); err != nil {
-+ dbt.Fatal(err)
-+ }
-+ if b == nil {
-+ dbt.Error("nil echo from non-nil input")
-+ }
-+
-+ // Insert NULL
-+ dbt.mustExec("CREATE TABLE test (dummmy1 int, value int, dummy2 int)")
-+
-+ dbt.mustExec("INSERT INTO test VALUES (?, ?, ?)", 1, nil, 2)
-+
-+ var out interface{}
-+ rows := dbt.mustQuery("SELECT * FROM test")
-+ defer rows.Close()
-+ if rows.Next() {
-+ rows.Scan(&out)
-+ if out != nil {
-+ dbt.Errorf("%v != nil", out)
-+ }
-+ } else {
-+ dbt.Error("no data")
-+ }
-+ })
-+}
-+
-+func TestUint64(t *testing.T) {
-+ const (
-+ u0 = uint64(0)
-+ uall = ^u0
-+ uhigh = uall >> 1
-+ utop = ^uhigh
-+ s0 = int64(0)
-+ sall = ^s0
-+ shigh = int64(uhigh)
-+ stop = ^shigh
-+ )
-+ runTests(t, dsn, func(dbt *DBTest) {
-+ stmt, err := dbt.db.Prepare(`SELECT ?, ?, ? ,?, ?, ?, ?, ?`)
-+ if err != nil {
-+ dbt.Fatal(err)
-+ }
-+ defer stmt.Close()
-+ row := stmt.QueryRow(
-+ u0, uhigh, utop, uall,
-+ s0, shigh, stop, sall,
-+ )
-+
-+ var ua, ub, uc, ud uint64
-+ var sa, sb, sc, sd int64
-+
-+ err = row.Scan(&ua, &ub, &uc, &ud, &sa, &sb, &sc, &sd)
-+ if err != nil {
-+ dbt.Fatal(err)
-+ }
-+ switch {
-+ case ua != u0,
-+ ub != uhigh,
-+ uc != utop,
-+ ud != uall,
-+ sa != s0,
-+ sb != shigh,
-+ sc != stop,
-+ sd != sall:
-+ dbt.Fatal("unexpected result value")
-+ }
-+ })
-+}
-+
-+func TestLongData(t *testing.T) {
-+ runTests(t, dsn+"&maxAllowedPacket=0", func(dbt *DBTest) {
-+ var maxAllowedPacketSize int
-+ err := dbt.db.QueryRow("select @@max_allowed_packet").Scan(&maxAllowedPacketSize)
-+ if err != nil {
-+ dbt.Fatal(err)
-+ }
-+ maxAllowedPacketSize--
-+
-+ // don't get too ambitious
-+ if maxAllowedPacketSize > 1<<25 {
-+ maxAllowedPacketSize = 1 << 25
-+ }
-+
-+ dbt.mustExec("CREATE TABLE test (value LONGBLOB)")
-+
-+ in := strings.Repeat(`a`, maxAllowedPacketSize+1)
-+ var out string
-+ var rows *sql.Rows
-+
-+ // Long text data
-+ const nonDataQueryLen = 28 // length query w/o value
-+ inS := in[:maxAllowedPacketSize-nonDataQueryLen]
-+ dbt.mustExec("INSERT INTO test VALUES('" + inS + "')")
-+ rows = dbt.mustQuery("SELECT value FROM test")
-+ defer rows.Close()
-+ if rows.Next() {
-+ rows.Scan(&out)
-+ if inS != out {
-+ dbt.Fatalf("LONGBLOB: length in: %d, length out: %d", len(inS), len(out))
-+ }
-+ if rows.Next() {
-+ dbt.Error("LONGBLOB: unexpexted row")
-+ }
-+ } else {
-+ dbt.Fatalf("LONGBLOB: no data")
-+ }
-+
-+ // Empty table
-+ dbt.mustExec("TRUNCATE TABLE test")
-+
-+ // Long binary data
-+ dbt.mustExec("INSERT INTO test VALUES(?)", in)
-+ rows = dbt.mustQuery("SELECT value FROM test WHERE 1=?", 1)
-+ defer rows.Close()
-+ if rows.Next() {
-+ rows.Scan(&out)
-+ if in != out {
-+ dbt.Fatalf("LONGBLOB: length in: %d, length out: %d", len(in), len(out))
-+ }
-+ if rows.Next() {
-+ dbt.Error("LONGBLOB: unexpexted row")
-+ }
-+ } else {
-+ if err = rows.Err(); err != nil {
-+ dbt.Fatalf("LONGBLOB: no data (err: %s)", err.Error())
-+ } else {
-+ dbt.Fatal("LONGBLOB: no data (err: )")
-+ }
-+ }
-+ })
-+}
-+
-+func TestLoadData(t *testing.T) {
-+ runTests(t, dsn, func(dbt *DBTest) {
-+ verifyLoadDataResult := func() {
-+ rows, err := dbt.db.Query("SELECT * FROM test")
-+ if err != nil {
-+ dbt.Fatal(err.Error())
-+ }
-+
-+ i := 0
-+ values := [4]string{
-+ "a string",
-+ "a string containing a \t",
-+ "a string containing a \n",
-+ "a string containing both \t\n",
-+ }
-+
-+ var id int
-+ var value string
-+
-+ for rows.Next() {
-+ i++
-+ err = rows.Scan(&id, &value)
-+ if err != nil {
-+ dbt.Fatal(err.Error())
-+ }
-+ if i != id {
-+ dbt.Fatalf("%d != %d", i, id)
-+ }
-+ if values[i-1] != value {
-+ dbt.Fatalf("%q != %q", values[i-1], value)
-+ }
-+ }
-+ err = rows.Err()
-+ if err != nil {
-+ dbt.Fatal(err.Error())
-+ }
-+
-+ if i != 4 {
-+ dbt.Fatalf("rows count mismatch. Got %d, want 4", i)
-+ }
-+ }
-+
-+ dbt.db.Exec("DROP TABLE IF EXISTS test")
-+ dbt.mustExec("CREATE TABLE test (id INT NOT NULL PRIMARY KEY, value TEXT NOT NULL) CHARACTER SET utf8")
-+
-+ // Local File
-+ file, err := ioutil.TempFile("", "gotest")
-+ defer os.Remove(file.Name())
-+ if err != nil {
-+ dbt.Fatal(err)
-+ }
-+ RegisterLocalFile(file.Name())
-+
-+ // Try first with empty file
-+ dbt.mustExec(fmt.Sprintf("LOAD DATA LOCAL INFILE %q INTO TABLE test", file.Name()))
-+ var count int
-+ err = dbt.db.QueryRow("SELECT COUNT(*) FROM test").Scan(&count)
-+ if err != nil {
-+ dbt.Fatal(err.Error())
-+ }
-+ if count != 0 {
-+ dbt.Fatalf("unexpected row count: got %d, want 0", count)
-+ }
-+
-+ // Then fille File with data and try to load it
-+ file.WriteString("1\ta string\n2\ta string containing a \\t\n3\ta string containing a \\n\n4\ta string containing both \\t\\n\n")
-+ file.Close()
-+ dbt.mustExec(fmt.Sprintf("LOAD DATA LOCAL INFILE %q INTO TABLE test", file.Name()))
-+ verifyLoadDataResult()
-+
-+ // Try with non-existing file
-+ _, err = dbt.db.Exec("LOAD DATA LOCAL INFILE 'doesnotexist' INTO TABLE test")
-+ if err == nil {
-+ dbt.Fatal("load non-existent file didn't fail")
-+ } else if err.Error() != "local file 'doesnotexist' is not registered" {
-+ dbt.Fatal(err.Error())
-+ }
-+
-+ // Empty table
-+ dbt.mustExec("TRUNCATE TABLE test")
-+
-+ // Reader
-+ RegisterReaderHandler("test", func() io.Reader {
-+ file, err = os.Open(file.Name())
-+ if err != nil {
-+ dbt.Fatal(err)
-+ }
-+ return file
-+ })
-+ dbt.mustExec("LOAD DATA LOCAL INFILE 'Reader::test' INTO TABLE test")
-+ verifyLoadDataResult()
-+ // negative test
-+ _, err = dbt.db.Exec("LOAD DATA LOCAL INFILE 'Reader::doesnotexist' INTO TABLE test")
-+ if err == nil {
-+ dbt.Fatal("load non-existent Reader didn't fail")
-+ } else if err.Error() != "Reader 'doesnotexist' is not registered" {
-+ dbt.Fatal(err.Error())
-+ }
-+ })
-+}
-+
-+func TestFoundRows(t *testing.T) {
-+ runTests(t, dsn, func(dbt *DBTest) {
-+ dbt.mustExec("CREATE TABLE test (id INT NOT NULL ,data INT NOT NULL)")
-+ dbt.mustExec("INSERT INTO test (id, data) VALUES (0, 0),(0, 0),(1, 0),(1, 0),(1, 1)")
-+
-+ res := dbt.mustExec("UPDATE test SET data = 1 WHERE id = 0")
-+ count, err := res.RowsAffected()
-+ if err != nil {
-+ dbt.Fatalf("res.RowsAffected() returned error: %s", err.Error())
-+ }
-+ if count != 2 {
-+ dbt.Fatalf("Expected 2 affected rows, got %d", count)
-+ }
-+ res = dbt.mustExec("UPDATE test SET data = 1 WHERE id = 1")
-+ count, err = res.RowsAffected()
-+ if err != nil {
-+ dbt.Fatalf("res.RowsAffected() returned error: %s", err.Error())
-+ }
-+ if count != 2 {
-+ dbt.Fatalf("Expected 2 affected rows, got %d", count)
-+ }
-+ })
-+ runTests(t, dsn+"&clientFoundRows=true", func(dbt *DBTest) {
-+ dbt.mustExec("CREATE TABLE test (id INT NOT NULL ,data INT NOT NULL)")
-+ dbt.mustExec("INSERT INTO test (id, data) VALUES (0, 0),(0, 0),(1, 0),(1, 0),(1, 1)")
-+
-+ res := dbt.mustExec("UPDATE test SET data = 1 WHERE id = 0")
-+ count, err := res.RowsAffected()
-+ if err != nil {
-+ dbt.Fatalf("res.RowsAffected() returned error: %s", err.Error())
-+ }
-+ if count != 2 {
-+ dbt.Fatalf("Expected 2 matched rows, got %d", count)
-+ }
-+ res = dbt.mustExec("UPDATE test SET data = 1 WHERE id = 1")
-+ count, err = res.RowsAffected()
-+ if err != nil {
-+ dbt.Fatalf("res.RowsAffected() returned error: %s", err.Error())
-+ }
-+ if count != 3 {
-+ dbt.Fatalf("Expected 3 matched rows, got %d", count)
-+ }
-+ })
-+}
-+
-+func TestTLS(t *testing.T) {
-+ tlsTestReq := func(dbt *DBTest) {
-+ if err := dbt.db.Ping(); err != nil {
-+ if err == ErrNoTLS {
-+ dbt.Skip("server does not support TLS")
-+ } else {
-+ dbt.Fatalf("error on Ping: %s", err.Error())
-+ }
-+ }
-+
-+ rows := dbt.mustQuery("SHOW STATUS LIKE 'Ssl_cipher'")
-+ defer rows.Close()
-+
-+ var variable, value *sql.RawBytes
-+ for rows.Next() {
-+ if err := rows.Scan(&variable, &value); err != nil {
-+ dbt.Fatal(err.Error())
-+ }
-+
-+ if (*value == nil) || (len(*value) == 0) {
-+ dbt.Fatalf("no Cipher")
-+ } else {
-+ dbt.Logf("Cipher: %s", *value)
-+ }
-+ }
-+ }
-+ tlsTestOpt := func(dbt *DBTest) {
-+ if err := dbt.db.Ping(); err != nil {
-+ dbt.Fatalf("error on Ping: %s", err.Error())
-+ }
-+ }
-+
-+ runTests(t, dsn+"&tls=preferred", tlsTestOpt)
-+ runTests(t, dsn+"&tls=skip-verify", tlsTestReq)
-+
-+ // Verify that registering / using a custom cfg works
-+ RegisterTLSConfig("custom-skip-verify", &tls.Config{
-+ InsecureSkipVerify: true,
-+ })
-+ runTests(t, dsn+"&tls=custom-skip-verify", tlsTestReq)
-+}
-+
-+func TestReuseClosedConnection(t *testing.T) {
-+ // this test does not use sql.database, it uses the driver directly
-+ if !available {
-+ t.Skipf("MySQL server not running on %s", netAddr)
-+ }
-+
-+ md := &MySQLDriver{}
-+ conn, err := md.Open(dsn)
-+ if err != nil {
-+ t.Fatalf("error connecting: %s", err.Error())
-+ }
-+ stmt, err := conn.Prepare("DO 1")
-+ if err != nil {
-+ t.Fatalf("error preparing statement: %s", err.Error())
-+ }
-+ _, err = stmt.Exec(nil)
-+ if err != nil {
-+ t.Fatalf("error executing statement: %s", err.Error())
-+ }
-+ err = conn.Close()
-+ if err != nil {
-+ t.Fatalf("error closing connection: %s", err.Error())
-+ }
-+
-+ defer func() {
-+ if err := recover(); err != nil {
-+ t.Errorf("panic after reusing a closed connection: %v", err)
-+ }
-+ }()
-+ _, err = stmt.Exec(nil)
-+ if err != nil && err != driver.ErrBadConn {
-+ t.Errorf("unexpected error '%s', expected '%s'",
-+ err.Error(), driver.ErrBadConn.Error())
-+ }
-+}
-+
-+func TestCharset(t *testing.T) {
-+ if !available {
-+ t.Skipf("MySQL server not running on %s", netAddr)
-+ }
-+
-+ mustSetCharset := func(charsetParam, expected string) {
-+ runTests(t, dsn+"&"+charsetParam, func(dbt *DBTest) {
-+ rows := dbt.mustQuery("SELECT @@character_set_connection")
-+ defer rows.Close()
-+
-+ if !rows.Next() {
-+ dbt.Fatalf("error getting connection charset: %s", rows.Err())
-+ }
-+
-+ var got string
-+ rows.Scan(&got)
-+
-+ if got != expected {
-+ dbt.Fatalf("expected connection charset %s but got %s", expected, got)
-+ }
-+ })
-+ }
-+
-+ // non utf8 test
-+ mustSetCharset("charset=ascii", "ascii")
-+
-+ // when the first charset is invalid, use the second
-+ mustSetCharset("charset=none,utf8", "utf8")
-+
-+ // when the first charset is valid, use it
-+ mustSetCharset("charset=ascii,utf8", "ascii")
-+ mustSetCharset("charset=utf8,ascii", "utf8")
-+}
-+
-+func TestFailingCharset(t *testing.T) {
-+ runTests(t, dsn+"&charset=none", func(dbt *DBTest) {
-+ // run query to really establish connection...
-+ _, err := dbt.db.Exec("SELECT 1")
-+ if err == nil {
-+ dbt.db.Close()
-+ t.Fatalf("connection must not succeed without a valid charset")
-+ }
-+ })
-+}
-+
-+func TestCollation(t *testing.T) {
-+ if !available {
-+ t.Skipf("MySQL server not running on %s", netAddr)
-+ }
-+
-+ defaultCollation := "utf8mb4_general_ci"
-+ testCollations := []string{
-+ "", // do not set
-+ defaultCollation, // driver default
-+ "latin1_general_ci",
-+ "binary",
-+ "utf8_unicode_ci",
-+ "cp1257_bin",
-+ }
-+
-+ for _, collation := range testCollations {
-+ var expected, tdsn string
-+ if collation != "" {
-+ tdsn = dsn + "&collation=" + collation
-+ expected = collation
-+ } else {
-+ tdsn = dsn
-+ expected = defaultCollation
-+ }
-+
-+ runTests(t, tdsn, func(dbt *DBTest) {
-+ var got string
-+ if err := dbt.db.QueryRow("SELECT @@collation_connection").Scan(&got); err != nil {
-+ dbt.Fatal(err)
-+ }
-+
-+ if got != expected {
-+ dbt.Fatalf("expected connection collation %s but got %s", expected, got)
-+ }
-+ })
-+ }
-+}
-+
-+func TestColumnsWithAlias(t *testing.T) {
-+ runTests(t, dsn+"&columnsWithAlias=true", func(dbt *DBTest) {
-+ rows := dbt.mustQuery("SELECT 1 AS A")
-+ defer rows.Close()
-+ cols, _ := rows.Columns()
-+ if len(cols) != 1 {
-+ t.Fatalf("expected 1 column, got %d", len(cols))
-+ }
-+ if cols[0] != "A" {
-+ t.Fatalf("expected column name \"A\", got \"%s\"", cols[0])
-+ }
-+
-+ rows = dbt.mustQuery("SELECT * FROM (SELECT 1 AS one) AS A")
-+ defer rows.Close()
-+ cols, _ = rows.Columns()
-+ if len(cols) != 1 {
-+ t.Fatalf("expected 1 column, got %d", len(cols))
-+ }
-+ if cols[0] != "A.one" {
-+ t.Fatalf("expected column name \"A.one\", got \"%s\"", cols[0])
-+ }
-+ })
-+}
-+
-+func TestRawBytesResultExceedsBuffer(t *testing.T) {
-+ runTests(t, dsn, func(dbt *DBTest) {
-+ // defaultBufSize from buffer.go
-+ expected := strings.Repeat("abc", defaultBufSize)
-+
-+ rows := dbt.mustQuery("SELECT '" + expected + "'")
-+ defer rows.Close()
-+ if !rows.Next() {
-+ dbt.Error("expected result, got none")
-+ }
-+ var result sql.RawBytes
-+ rows.Scan(&result)
-+ if expected != string(result) {
-+ dbt.Error("result did not match expected value")
-+ }
-+ })
-+}
-+
-+func TestTimezoneConversion(t *testing.T) {
-+ zones := []string{"UTC", "US/Central", "US/Pacific", "Local"}
-+
-+ // Regression test for timezone handling
-+ tzTest := func(dbt *DBTest) {
-+ // Create table
-+ dbt.mustExec("CREATE TABLE test (ts TIMESTAMP)")
-+
-+ // Insert local time into database (should be converted)
-+ usCentral, _ := time.LoadLocation("US/Central")
-+ reftime := time.Date(2014, 05, 30, 18, 03, 17, 0, time.UTC).In(usCentral)
-+ dbt.mustExec("INSERT INTO test VALUE (?)", reftime)
-+
-+ // Retrieve time from DB
-+ rows := dbt.mustQuery("SELECT ts FROM test")
-+ defer rows.Close()
-+ if !rows.Next() {
-+ dbt.Fatal("did not get any rows out")
-+ }
-+
-+ var dbTime time.Time
-+ err := rows.Scan(&dbTime)
-+ if err != nil {
-+ dbt.Fatal("Err", err)
-+ }
-+
-+ // Check that dates match
-+ if reftime.Unix() != dbTime.Unix() {
-+ dbt.Errorf("times do not match.\n")
-+ dbt.Errorf(" Now(%v)=%v\n", usCentral, reftime)
-+ dbt.Errorf(" Now(UTC)=%v\n", dbTime)
-+ }
-+ }
-+
-+ for _, tz := range zones {
-+ runTests(t, dsn+"&parseTime=true&loc="+url.QueryEscape(tz), tzTest)
-+ }
-+}
-+
-+// Special cases
-+
-+func TestRowsClose(t *testing.T) {
-+ runTests(t, dsn, func(dbt *DBTest) {
-+ rows, err := dbt.db.Query("SELECT 1")
-+ if err != nil {
-+ dbt.Fatal(err)
-+ }
-+
-+ err = rows.Close()
-+ if err != nil {
-+ dbt.Fatal(err)
-+ }
-+
-+ if rows.Next() {
-+ dbt.Fatal("unexpected row after rows.Close()")
-+ }
-+
-+ err = rows.Err()
-+ if err != nil {
-+ dbt.Fatal(err)
-+ }
-+ })
-+}
-+
-+// dangling statements
-+// http://code.google.com/p/go/issues/detail?id=3865
-+func TestCloseStmtBeforeRows(t *testing.T) {
-+ runTests(t, dsn, func(dbt *DBTest) {
-+ stmt, err := dbt.db.Prepare("SELECT 1")
-+ if err != nil {
-+ dbt.Fatal(err)
-+ }
-+
-+ rows, err := stmt.Query()
-+ if err != nil {
-+ stmt.Close()
-+ dbt.Fatal(err)
-+ }
-+ defer rows.Close()
-+
-+ err = stmt.Close()
-+ if err != nil {
-+ dbt.Fatal(err)
-+ }
-+
-+ if !rows.Next() {
-+ dbt.Fatal("getting row failed")
-+ } else {
-+ err = rows.Err()
-+ if err != nil {
-+ dbt.Fatal(err)
-+ }
-+
-+ var out bool
-+ err = rows.Scan(&out)
-+ if err != nil {
-+ dbt.Fatalf("error on rows.Scan(): %s", err.Error())
-+ }
-+ if out != true {
-+ dbt.Errorf("true != %t", out)
-+ }
-+ }
-+ })
-+}
-+
-+// It is valid to have multiple Rows for the same Stmt
-+// http://code.google.com/p/go/issues/detail?id=3734
-+func TestStmtMultiRows(t *testing.T) {
-+ runTests(t, dsn, func(dbt *DBTest) {
-+ stmt, err := dbt.db.Prepare("SELECT 1 UNION SELECT 0")
-+ if err != nil {
-+ dbt.Fatal(err)
-+ }
-+
-+ rows1, err := stmt.Query()
-+ if err != nil {
-+ stmt.Close()
-+ dbt.Fatal(err)
-+ }
-+ defer rows1.Close()
-+
-+ rows2, err := stmt.Query()
-+ if err != nil {
-+ stmt.Close()
-+ dbt.Fatal(err)
-+ }
-+ defer rows2.Close()
-+
-+ var out bool
-+
-+ // 1
-+ if !rows1.Next() {
-+ dbt.Fatal("first rows1.Next failed")
-+ } else {
-+ err = rows1.Err()
-+ if err != nil {
-+ dbt.Fatal(err)
-+ }
-+
-+ err = rows1.Scan(&out)
-+ if err != nil {
-+ dbt.Fatalf("error on rows.Scan(): %s", err.Error())
-+ }
-+ if out != true {
-+ dbt.Errorf("true != %t", out)
-+ }
-+ }
-+
-+ if !rows2.Next() {
-+ dbt.Fatal("first rows2.Next failed")
-+ } else {
-+ err = rows2.Err()
-+ if err != nil {
-+ dbt.Fatal(err)
-+ }
-+
-+ err = rows2.Scan(&out)
-+ if err != nil {
-+ dbt.Fatalf("error on rows.Scan(): %s", err.Error())
-+ }
-+ if out != true {
-+ dbt.Errorf("true != %t", out)
-+ }
-+ }
-+
-+ // 2
-+ if !rows1.Next() {
-+ dbt.Fatal("second rows1.Next failed")
-+ } else {
-+ err = rows1.Err()
-+ if err != nil {
-+ dbt.Fatal(err)
-+ }
-+
-+ err = rows1.Scan(&out)
-+ if err != nil {
-+ dbt.Fatalf("error on rows.Scan(): %s", err.Error())
-+ }
-+ if out != false {
-+ dbt.Errorf("false != %t", out)
-+ }
-+
-+ if rows1.Next() {
-+ dbt.Fatal("unexpected row on rows1")
-+ }
-+ err = rows1.Close()
-+ if err != nil {
-+ dbt.Fatal(err)
-+ }
-+ }
-+
-+ if !rows2.Next() {
-+ dbt.Fatal("second rows2.Next failed")
-+ } else {
-+ err = rows2.Err()
-+ if err != nil {
-+ dbt.Fatal(err)
-+ }
-+
-+ err = rows2.Scan(&out)
-+ if err != nil {
-+ dbt.Fatalf("error on rows.Scan(): %s", err.Error())
-+ }
-+ if out != false {
-+ dbt.Errorf("false != %t", out)
-+ }
-+
-+ if rows2.Next() {
-+ dbt.Fatal("unexpected row on rows2")
-+ }
-+ err = rows2.Close()
-+ if err != nil {
-+ dbt.Fatal(err)
-+ }
-+ }
-+ })
-+}
-+
-+// Regression test for
-+// * more than 32 NULL parameters (issue 209)
-+// * more parameters than fit into the buffer (issue 201)
-+// * parameters * 64 > max_allowed_packet (issue 734)
-+func TestPreparedManyCols(t *testing.T) {
-+ numParams := 65535
-+ runTests(t, dsn, func(dbt *DBTest) {
-+ query := "SELECT ?" + strings.Repeat(",?", numParams-1)
-+ stmt, err := dbt.db.Prepare(query)
-+ if err != nil {
-+ dbt.Fatal(err)
-+ }
-+ defer stmt.Close()
-+
-+ // create more parameters than fit into the buffer
-+ // which will take nil-values
-+ params := make([]interface{}, numParams)
-+ rows, err := stmt.Query(params...)
-+ if err != nil {
-+ dbt.Fatal(err)
-+ }
-+ rows.Close()
-+
-+ // Create 0byte string which we can't send via STMT_LONG_DATA.
-+ for i := 0; i < numParams; i++ {
-+ params[i] = ""
-+ }
-+ rows, err = stmt.Query(params...)
-+ if err != nil {
-+ dbt.Fatal(err)
-+ }
-+ rows.Close()
-+ })
-+}
-+
-+func TestConcurrent(t *testing.T) {
-+ if enabled, _ := readBool(os.Getenv("MYSQL_TEST_CONCURRENT")); !enabled {
-+ t.Skip("MYSQL_TEST_CONCURRENT env var not set")
-+ }
-+
-+ runTests(t, dsn, func(dbt *DBTest) {
-+ var max int
-+ err := dbt.db.QueryRow("SELECT @@max_connections").Scan(&max)
-+ if err != nil {
-+ dbt.Fatalf("%s", err.Error())
-+ }
-+ dbt.Logf("testing up to %d concurrent connections \r\n", max)
-+
-+ var remaining, succeeded int32 = int32(max), 0
-+
-+ var wg sync.WaitGroup
-+ wg.Add(max)
-+
-+ var fatalError string
-+ var once sync.Once
-+ fatalf := func(s string, vals ...interface{}) {
-+ once.Do(func() {
-+ fatalError = fmt.Sprintf(s, vals...)
-+ })
-+ }
-+
-+ for i := 0; i < max; i++ {
-+ go func(id int) {
-+ defer wg.Done()
-+
-+ tx, err := dbt.db.Begin()
-+ atomic.AddInt32(&remaining, -1)
-+
-+ if err != nil {
-+ if err.Error() != "Error 1040: Too many connections" {
-+ fatalf("error on conn %d: %s", id, err.Error())
-+ }
-+ return
-+ }
-+
-+ // keep the connection busy until all connections are open
-+ for remaining > 0 {
-+ if _, err = tx.Exec("DO 1"); err != nil {
-+ fatalf("error on conn %d: %s", id, err.Error())
-+ return
-+ }
-+ }
-+
-+ if err = tx.Commit(); err != nil {
-+ fatalf("error on conn %d: %s", id, err.Error())
-+ return
-+ }
-+
-+ // everything went fine with this connection
-+ atomic.AddInt32(&succeeded, 1)
-+ }(i)
-+ }
-+
-+ // wait until all conections are open
-+ wg.Wait()
-+
-+ if fatalError != "" {
-+ dbt.Fatal(fatalError)
-+ }
-+
-+ dbt.Logf("reached %d concurrent connections\r\n", succeeded)
-+ })
-+}
-+
-+func testDialError(t *testing.T, dialErr error, expectErr error) {
-+ RegisterDialContext("mydial", func(ctx context.Context, addr string) (net.Conn, error) {
-+ return nil, dialErr
-+ })
-+
-+ db, err := sql.Open("mysql", fmt.Sprintf("%s:%s@mydial(%s)/%s?timeout=30s", user, pass, addr, dbname))
-+ if err != nil {
-+ t.Fatalf("error connecting: %s", err.Error())
-+ }
-+ defer db.Close()
-+
-+ _, err = db.Exec("DO 1")
-+ if err != expectErr {
-+ t.Fatalf("was expecting %s. Got: %s", dialErr, err)
-+ }
-+}
-+
-+func TestDialUnknownError(t *testing.T) {
-+ testErr := fmt.Errorf("test")
-+ testDialError(t, testErr, testErr)
-+}
-+
-+func TestDialNonRetryableNetErr(t *testing.T) {
-+ testErr := netErrorMock{}
-+ testDialError(t, testErr, testErr)
-+}
-+
-+func TestDialTemporaryNetErr(t *testing.T) {
-+ testErr := netErrorMock{temporary: true}
-+ testDialError(t, testErr, testErr)
-+}
-+
-+// Tests custom dial functions
-+func TestCustomDial(t *testing.T) {
-+ if !available {
-+ t.Skipf("MySQL server not running on %s", netAddr)
-+ }
-+
-+ // our custom dial function which justs wraps net.Dial here
-+ RegisterDialContext("mydial", func(ctx context.Context, addr string) (net.Conn, error) {
-+ var d net.Dialer
-+ return d.DialContext(ctx, prot, addr)
-+ })
-+
-+ db, err := sql.Open("mysql", fmt.Sprintf("%s:%s@mydial(%s)/%s?timeout=30s", user, pass, addr, dbname))
-+ if err != nil {
-+ t.Fatalf("error connecting: %s", err.Error())
-+ }
-+ defer db.Close()
-+
-+ if _, err = db.Exec("DO 1"); err != nil {
-+ t.Fatalf("connection failed: %s", err.Error())
-+ }
-+}
-+
-+func TestSQLInjection(t *testing.T) {
-+ createTest := func(arg string) func(dbt *DBTest) {
-+ return func(dbt *DBTest) {
-+ dbt.mustExec("CREATE TABLE test (v INTEGER)")
-+ dbt.mustExec("INSERT INTO test VALUES (?)", 1)
-+
-+ var v int
-+ // NULL can't be equal to anything, the idea here is to inject query so it returns row
-+ // This test verifies that escapeQuotes and escapeBackslash are working properly
-+ err := dbt.db.QueryRow("SELECT v FROM test WHERE NULL = ?", arg).Scan(&v)
-+ if err == sql.ErrNoRows {
-+ return // success, sql injection failed
-+ } else if err == nil {
-+ dbt.Errorf("sql injection successful with arg: %s", arg)
-+ } else {
-+ dbt.Errorf("error running query with arg: %s; err: %s", arg, err.Error())
-+ }
-+ }
-+ }
-+
-+ dsns := []string{
-+ dsn,
-+ dsn + "&sql_mode='NO_BACKSLASH_ESCAPES'",
-+ }
-+ for _, testdsn := range dsns {
-+ runTests(t, testdsn, createTest("1 OR 1=1"))
-+ runTests(t, testdsn, createTest("' OR '1'='1"))
-+ }
-+}
-+
-+// Test if inserted data is correctly retrieved after being escaped
-+func TestInsertRetrieveEscapedData(t *testing.T) {
-+ testData := func(dbt *DBTest) {
-+ dbt.mustExec("CREATE TABLE test (v VARCHAR(255))")
-+
-+ // All sequences that are escaped by escapeQuotes and escapeBackslash
-+ v := "foo \x00\n\r\x1a\"'\\"
-+ dbt.mustExec("INSERT INTO test VALUES (?)", v)
-+
-+ var out string
-+ err := dbt.db.QueryRow("SELECT v FROM test").Scan(&out)
-+ if err != nil {
-+ dbt.Fatalf("%s", err.Error())
-+ }
-+
-+ if out != v {
-+ dbt.Errorf("%q != %q", out, v)
-+ }
-+ }
-+
-+ dsns := []string{
-+ dsn,
-+ dsn + "&sql_mode='NO_BACKSLASH_ESCAPES'",
-+ }
-+ for _, testdsn := range dsns {
-+ runTests(t, testdsn, testData)
-+ }
-+}
-+
-+func TestUnixSocketAuthFail(t *testing.T) {
-+ runTests(t, dsn, func(dbt *DBTest) {
-+ // Save the current logger so we can restore it.
-+ oldLogger := errLog
-+
-+ // Set a new logger so we can capture its output.
-+ buffer := bytes.NewBuffer(make([]byte, 0, 64))
-+ newLogger := log.New(buffer, "prefix: ", 0)
-+ SetLogger(newLogger)
-+
-+ // Restore the logger.
-+ defer SetLogger(oldLogger)
-+
-+ // Make a new DSN that uses the MySQL socket file and a bad password, which
-+ // we can make by simply appending any character to the real password.
-+ badPass := pass + "x"
-+ socket := ""
-+ if prot == "unix" {
-+ socket = addr
-+ } else {
-+ // Get socket file from MySQL.
-+ err := dbt.db.QueryRow("SELECT @@socket").Scan(&socket)
-+ if err != nil {
-+ t.Fatalf("error on SELECT @@socket: %s", err.Error())
-+ }
-+ }
-+ t.Logf("socket: %s", socket)
-+ badDSN := fmt.Sprintf("%s:%s@unix(%s)/%s?timeout=30s", user, badPass, socket, dbname)
-+ db, err := sql.Open("mysql", badDSN)
-+ if err != nil {
-+ t.Fatalf("error connecting: %s", err.Error())
-+ }
-+ defer db.Close()
-+
-+ // Connect to MySQL for real. This will cause an auth failure.
-+ err = db.Ping()
-+ if err == nil {
-+ t.Error("expected Ping() to return an error")
-+ }
-+
-+ // The driver should not log anything.
-+ if actual := buffer.String(); actual != "" {
-+ t.Errorf("expected no output, got %q", actual)
-+ }
-+ })
-+}
-+
-+// See Issue #422
-+func TestInterruptBySignal(t *testing.T) {
-+ runTestsWithMultiStatement(t, dsn, func(dbt *DBTest) {
-+ dbt.mustExec(`
-+ DROP PROCEDURE IF EXISTS test_signal;
-+ CREATE PROCEDURE test_signal(ret INT)
-+ BEGIN
-+ SELECT ret;
-+ SIGNAL SQLSTATE
-+ '45001'
-+ SET
-+ MESSAGE_TEXT = "an error",
-+ MYSQL_ERRNO = 45001;
-+ END
-+ `)
-+ defer dbt.mustExec("DROP PROCEDURE test_signal")
-+
-+ var val int
-+
-+ // text protocol
-+ rows, err := dbt.db.Query("CALL test_signal(42)")
-+ if err != nil {
-+ dbt.Fatalf("error on text query: %s", err.Error())
-+ }
-+ for rows.Next() {
-+ if err := rows.Scan(&val); err != nil {
-+ dbt.Error(err)
-+ } else if val != 42 {
-+ dbt.Errorf("expected val to be 42")
-+ }
-+ }
-+ rows.Close()
-+
-+ // binary protocol
-+ rows, err = dbt.db.Query("CALL test_signal(?)", 42)
-+ if err != nil {
-+ dbt.Fatalf("error on binary query: %s", err.Error())
-+ }
-+ for rows.Next() {
-+ if err := rows.Scan(&val); err != nil {
-+ dbt.Error(err)
-+ } else if val != 42 {
-+ dbt.Errorf("expected val to be 42")
-+ }
-+ }
-+ rows.Close()
-+ })
-+}
-+
-+func TestColumnsReusesSlice(t *testing.T) {
-+ rows := mysqlRows{
-+ rs: resultSet{
-+ columns: []mysqlField{
-+ {
-+ tableName: "test",
-+ name: "A",
-+ },
-+ {
-+ tableName: "test",
-+ name: "B",
-+ },
-+ },
-+ },
-+ }
-+
-+ allocs := testing.AllocsPerRun(1, func() {
-+ cols := rows.Columns()
-+
-+ if len(cols) != 2 {
-+ t.Fatalf("expected 2 columns, got %d", len(cols))
-+ }
-+ })
-+
-+ if allocs != 0 {
-+ t.Fatalf("expected 0 allocations, got %d", int(allocs))
-+ }
-+
-+ if rows.rs.columnNames == nil {
-+ t.Fatalf("expected columnNames to be set, got nil")
-+ }
-+}
-+
-+func TestRejectReadOnly(t *testing.T) {
-+ runTests(t, dsn, func(dbt *DBTest) {
-+ // Create Table
-+ dbt.mustExec("CREATE TABLE test (value BOOL)")
-+ // Set the session to read-only. We didn't set the `rejectReadOnly`
-+ // option, so any writes after this should fail.
-+ _, err := dbt.db.Exec("SET SESSION TRANSACTION READ ONLY")
-+ // Error 1193: Unknown system variable 'TRANSACTION' => skip test,
-+ // MySQL server version is too old
-+ maybeSkip(t, err, 1193)
-+ if _, err := dbt.db.Exec("DROP TABLE test"); err == nil {
-+ t.Fatalf("writing to DB in read-only session without " +
-+ "rejectReadOnly did not error")
-+ }
-+ // Set the session back to read-write so runTests() can properly clean
-+ // up the table `test`.
-+ dbt.mustExec("SET SESSION TRANSACTION READ WRITE")
-+ })
-+
-+ // Enable the `rejectReadOnly` option.
-+ runTests(t, dsn+"&rejectReadOnly=true", func(dbt *DBTest) {
-+ // Create Table
-+ dbt.mustExec("CREATE TABLE test (value BOOL)")
-+ // Set the session to read only. Any writes after this should error on
-+ // a driver.ErrBadConn, and cause `database/sql` to initiate a new
-+ // connection.
-+ dbt.mustExec("SET SESSION TRANSACTION READ ONLY")
-+ // This would error, but `database/sql` should automatically retry on a
-+ // new connection which is not read-only, and eventually succeed.
-+ dbt.mustExec("DROP TABLE test")
-+ })
-+}
-+
-+func TestPing(t *testing.T) {
-+ runTests(t, dsn, func(dbt *DBTest) {
-+ if err := dbt.db.Ping(); err != nil {
-+ dbt.fail("Ping", "Ping", err)
-+ }
-+ })
-+}
-+
-+// See Issue #799
-+func TestEmptyPassword(t *testing.T) {
-+ if !available {
-+ t.Skipf("MySQL server not running on %s", netAddr)
-+ }
-+
-+ dsn := fmt.Sprintf("%s:%s@%s/%s?timeout=30s", user, "", netAddr, dbname)
-+ db, err := sql.Open("mysql", dsn)
-+ if err == nil {
-+ defer db.Close()
-+ err = db.Ping()
-+ }
-+
-+ if pass == "" {
-+ if err != nil {
-+ t.Fatal(err.Error())
-+ }
-+ } else {
-+ if err == nil {
-+ t.Fatal("expected authentication error")
-+ }
-+ if !strings.HasPrefix(err.Error(), "Error 1045") {
-+ t.Fatal(err.Error())
-+ }
-+ }
-+}
-+
-+// static interface implementation checks of mysqlConn
-+var (
-+ _ driver.ConnBeginTx = &mysqlConn{}
-+ _ driver.ConnPrepareContext = &mysqlConn{}
-+ _ driver.ExecerContext = &mysqlConn{}
-+ _ driver.Pinger = &mysqlConn{}
-+ _ driver.QueryerContext = &mysqlConn{}
-+)
-+
-+// static interface implementation checks of mysqlStmt
-+var (
-+ _ driver.StmtExecContext = &mysqlStmt{}
-+ _ driver.StmtQueryContext = &mysqlStmt{}
-+)
-+
-+// Ensure that all the driver interfaces are implemented
-+var (
-+ // _ driver.RowsColumnTypeLength = &binaryRows{}
-+ // _ driver.RowsColumnTypeLength = &textRows{}
-+ _ driver.RowsColumnTypeDatabaseTypeName = &binaryRows{}
-+ _ driver.RowsColumnTypeDatabaseTypeName = &textRows{}
-+ _ driver.RowsColumnTypeNullable = &binaryRows{}
-+ _ driver.RowsColumnTypeNullable = &textRows{}
-+ _ driver.RowsColumnTypePrecisionScale = &binaryRows{}
-+ _ driver.RowsColumnTypePrecisionScale = &textRows{}
-+ _ driver.RowsColumnTypeScanType = &binaryRows{}
-+ _ driver.RowsColumnTypeScanType = &textRows{}
-+ _ driver.RowsNextResultSet = &binaryRows{}
-+ _ driver.RowsNextResultSet = &textRows{}
-+)
-+
-+func TestMultiResultSet(t *testing.T) {
-+ type result struct {
-+ values [][]int
-+ columns []string
-+ }
-+
-+ // checkRows is a helper test function to validate rows containing 3 result
-+ // sets with specific values and columns. The basic query would look like this:
-+ //
-+ // SELECT 1 AS col1, 2 AS col2 UNION SELECT 3, 4;
-+ // SELECT 0 UNION SELECT 1;
-+ // SELECT 1 AS col1, 2 AS col2, 3 AS col3 UNION SELECT 4, 5, 6;
-+ //
-+ // to distinguish test cases the first string argument is put in front of
-+ // every error or fatal message.
-+ checkRows := func(desc string, rows *sql.Rows, dbt *DBTest) {
-+ expected := []result{
-+ {
-+ values: [][]int{{1, 2}, {3, 4}},
-+ columns: []string{"col1", "col2"},
-+ },
-+ {
-+ values: [][]int{{1, 2, 3}, {4, 5, 6}},
-+ columns: []string{"col1", "col2", "col3"},
-+ },
-+ }
-+
-+ var res1 result
-+ for rows.Next() {
-+ var res [2]int
-+ if err := rows.Scan(&res[0], &res[1]); err != nil {
-+ dbt.Fatal(err)
-+ }
-+ res1.values = append(res1.values, res[:])
-+ }
-+
-+ cols, err := rows.Columns()
-+ if err != nil {
-+ dbt.Fatal(desc, err)
-+ }
-+ res1.columns = cols
-+
-+ if !reflect.DeepEqual(expected[0], res1) {
-+ dbt.Error(desc, "want =", expected[0], "got =", res1)
-+ }
-+
-+ if !rows.NextResultSet() {
-+ dbt.Fatal(desc, "expected next result set")
-+ }
-+
-+ // ignoring one result set
-+
-+ if !rows.NextResultSet() {
-+ dbt.Fatal(desc, "expected next result set")
-+ }
-+
-+ var res2 result
-+ cols, err = rows.Columns()
-+ if err != nil {
-+ dbt.Fatal(desc, err)
-+ }
-+ res2.columns = cols
-+
-+ for rows.Next() {
-+ var res [3]int
-+ if err := rows.Scan(&res[0], &res[1], &res[2]); err != nil {
-+ dbt.Fatal(desc, err)
-+ }
-+ res2.values = append(res2.values, res[:])
-+ }
-+
-+ if !reflect.DeepEqual(expected[1], res2) {
-+ dbt.Error(desc, "want =", expected[1], "got =", res2)
-+ }
-+
-+ if rows.NextResultSet() {
-+ dbt.Error(desc, "unexpected next result set")
-+ }
-+
-+ if err := rows.Err(); err != nil {
-+ dbt.Error(desc, err)
-+ }
-+ }
-+
-+ runTestsWithMultiStatement(t, dsn, func(dbt *DBTest) {
-+ rows := dbt.mustQuery(`DO 1;
-+ SELECT 1 AS col1, 2 AS col2 UNION SELECT 3, 4;
-+ DO 1;
-+ SELECT 0 UNION SELECT 1;
-+ SELECT 1 AS col1, 2 AS col2, 3 AS col3 UNION SELECT 4, 5, 6;`)
-+ defer rows.Close()
-+ checkRows("query: ", rows, dbt)
-+ })
-+
-+ runTestsWithMultiStatement(t, dsn, func(dbt *DBTest) {
-+ queries := []string{
-+ `
-+ DROP PROCEDURE IF EXISTS test_mrss;
-+ CREATE PROCEDURE test_mrss()
-+ BEGIN
-+ DO 1;
-+ SELECT 1 AS col1, 2 AS col2 UNION SELECT 3, 4;
-+ DO 1;
-+ SELECT 0 UNION SELECT 1;
-+ SELECT 1 AS col1, 2 AS col2, 3 AS col3 UNION SELECT 4, 5, 6;
-+ END
-+ `,
-+ `
-+ DROP PROCEDURE IF EXISTS test_mrss;
-+ CREATE PROCEDURE test_mrss()
-+ BEGIN
-+ SELECT 1 AS col1, 2 AS col2 UNION SELECT 3, 4;
-+ SELECT 0 UNION SELECT 1;
-+ SELECT 1 AS col1, 2 AS col2, 3 AS col3 UNION SELECT 4, 5, 6;
-+ END
-+ `,
-+ }
-+
-+ defer dbt.mustExec("DROP PROCEDURE IF EXISTS test_mrss")
-+
-+ for i, query := range queries {
-+ dbt.mustExec(query)
-+
-+ stmt, err := dbt.db.Prepare("CALL test_mrss()")
-+ if err != nil {
-+ dbt.Fatalf("%v (i=%d)", err, i)
-+ }
-+ defer stmt.Close()
-+
-+ for j := 0; j < 2; j++ {
-+ rows, err := stmt.Query()
-+ if err != nil {
-+ dbt.Fatalf("%v (i=%d) (j=%d)", err, i, j)
-+ }
-+ checkRows(fmt.Sprintf("prepared stmt query (i=%d) (j=%d): ", i, j), rows, dbt)
-+ }
-+ }
-+ })
-+}
-+
-+func TestMultiResultSetNoSelect(t *testing.T) {
-+ runTestsWithMultiStatement(t, dsn, func(dbt *DBTest) {
-+ rows := dbt.mustQuery("DO 1; DO 2;")
-+ defer rows.Close()
-+
-+ if rows.Next() {
-+ dbt.Error("unexpected row")
-+ }
-+
-+ if rows.NextResultSet() {
-+ dbt.Error("unexpected next result set")
-+ }
-+
-+ if err := rows.Err(); err != nil {
-+ dbt.Error("expected nil; got ", err)
-+ }
-+ })
-+}
-+
-+// tests if rows are set in a proper state if some results were ignored before
-+// calling rows.NextResultSet.
-+func TestSkipResults(t *testing.T) {
-+ runTests(t, dsn, func(dbt *DBTest) {
-+ rows := dbt.mustQuery("SELECT 1, 2")
-+ defer rows.Close()
-+
-+ if !rows.Next() {
-+ dbt.Error("expected row")
-+ }
-+
-+ if rows.NextResultSet() {
-+ dbt.Error("unexpected next result set")
-+ }
-+
-+ if err := rows.Err(); err != nil {
-+ dbt.Error("expected nil; got ", err)
-+ }
-+ })
-+}
-+
-+func TestPingContext(t *testing.T) {
-+ runTests(t, dsn, func(dbt *DBTest) {
-+ ctx, cancel := context.WithCancel(context.Background())
-+ cancel()
-+ if err := dbt.db.PingContext(ctx); err != context.Canceled {
-+ dbt.Errorf("expected context.Canceled, got %v", err)
-+ }
-+ })
-+}
-+
-+func TestContextCancelExec(t *testing.T) {
-+ runTests(t, dsn, func(dbt *DBTest) {
-+ dbt.mustExec("CREATE TABLE test (v INTEGER)")
-+ ctx, cancel := context.WithCancel(context.Background())
-+
-+ // Delay execution for just a bit until db.ExecContext has begun.
-+ defer time.AfterFunc(250*time.Millisecond, cancel).Stop()
-+
-+ // This query will be canceled.
-+ startTime := time.Now()
-+ if _, err := dbt.db.ExecContext(ctx, "INSERT INTO test VALUES (SLEEP(1))"); err != context.Canceled {
-+ dbt.Errorf("expected context.Canceled, got %v", err)
-+ }
-+ if d := time.Since(startTime); d > 500*time.Millisecond {
-+ dbt.Errorf("too long execution time: %s", d)
-+ }
-+
-+ // Wait for the INSERT query to be done.
-+ time.Sleep(time.Second)
-+
-+ // Check how many times the query is executed.
-+ var v int
-+ if err := dbt.db.QueryRow("SELECT COUNT(*) FROM test").Scan(&v); err != nil {
-+ dbt.Fatalf("%s", err.Error())
-+ }
-+ if v != 1 { // TODO: need to kill the query, and v should be 0.
-+ dbt.Skipf("[WARN] expected val to be 1, got %d", v)
-+ }
-+
-+ // Context is already canceled, so error should come before execution.
-+ if _, err := dbt.db.ExecContext(ctx, "INSERT INTO test VALUES (1)"); err == nil {
-+ dbt.Error("expected error")
-+ } else if err.Error() != "context canceled" {
-+ dbt.Fatalf("unexpected error: %s", err)
-+ }
-+
-+ // The second insert query will fail, so the table has no changes.
-+ if err := dbt.db.QueryRow("SELECT COUNT(*) FROM test").Scan(&v); err != nil {
-+ dbt.Fatalf("%s", err.Error())
-+ }
-+ if v != 1 {
-+ dbt.Skipf("[WARN] expected val to be 1, got %d", v)
-+ }
-+ })
-+}
-+
-+func TestContextCancelQuery(t *testing.T) {
-+ runTests(t, dsn, func(dbt *DBTest) {
-+ dbt.mustExec("CREATE TABLE test (v INTEGER)")
-+ ctx, cancel := context.WithCancel(context.Background())
-+
-+ // Delay execution for just a bit until db.ExecContext has begun.
-+ defer time.AfterFunc(250*time.Millisecond, cancel).Stop()
-+
-+ // This query will be canceled.
-+ startTime := time.Now()
-+ if _, err := dbt.db.QueryContext(ctx, "INSERT INTO test VALUES (SLEEP(1))"); err != context.Canceled {
-+ dbt.Errorf("expected context.Canceled, got %v", err)
-+ }
-+ if d := time.Since(startTime); d > 500*time.Millisecond {
-+ dbt.Errorf("too long execution time: %s", d)
-+ }
-+
-+ // Wait for the INSERT query to be done.
-+ time.Sleep(time.Second)
-+
-+ // Check how many times the query is executed.
-+ var v int
-+ if err := dbt.db.QueryRow("SELECT COUNT(*) FROM test").Scan(&v); err != nil {
-+ dbt.Fatalf("%s", err.Error())
-+ }
-+ if v != 1 { // TODO: need to kill the query, and v should be 0.
-+ dbt.Skipf("[WARN] expected val to be 1, got %d", v)
-+ }
-+
-+ // Context is already canceled, so error should come before execution.
-+ if _, err := dbt.db.QueryContext(ctx, "INSERT INTO test VALUES (1)"); err != context.Canceled {
-+ dbt.Errorf("expected context.Canceled, got %v", err)
-+ }
-+
-+ // The second insert query will fail, so the table has no changes.
-+ if err := dbt.db.QueryRow("SELECT COUNT(*) FROM test").Scan(&v); err != nil {
-+ dbt.Fatalf("%s", err.Error())
-+ }
-+ if v != 1 {
-+ dbt.Skipf("[WARN] expected val to be 1, got %d", v)
-+ }
-+ })
-+}
-+
-+func TestContextCancelQueryRow(t *testing.T) {
-+ runTests(t, dsn, func(dbt *DBTest) {
-+ dbt.mustExec("CREATE TABLE test (v INTEGER)")
-+ dbt.mustExec("INSERT INTO test VALUES (1), (2), (3)")
-+ ctx, cancel := context.WithCancel(context.Background())
-+
-+ rows, err := dbt.db.QueryContext(ctx, "SELECT v FROM test")
-+ if err != nil {
-+ dbt.Fatalf("%s", err.Error())
-+ }
-+
-+ // the first row will be succeed.
-+ var v int
-+ if !rows.Next() {
-+ dbt.Fatalf("unexpected end")
-+ }
-+ if err := rows.Scan(&v); err != nil {
-+ dbt.Fatalf("%s", err.Error())
-+ }
-+
-+ cancel()
-+ // make sure the driver receives the cancel request.
-+ time.Sleep(100 * time.Millisecond)
-+
-+ if rows.Next() {
-+ dbt.Errorf("expected end, but not")
-+ }
-+ if err := rows.Err(); err != context.Canceled {
-+ dbt.Errorf("expected context.Canceled, got %v", err)
-+ }
-+ })
-+}
-+
-+func TestContextCancelPrepare(t *testing.T) {
-+ runTests(t, dsn, func(dbt *DBTest) {
-+ ctx, cancel := context.WithCancel(context.Background())
-+ cancel()
-+ if _, err := dbt.db.PrepareContext(ctx, "SELECT 1"); err != context.Canceled {
-+ dbt.Errorf("expected context.Canceled, got %v", err)
-+ }
-+ })
-+}
-+
-+func TestContextCancelStmtExec(t *testing.T) {
-+ runTests(t, dsn, func(dbt *DBTest) {
-+ dbt.mustExec("CREATE TABLE test (v INTEGER)")
-+ ctx, cancel := context.WithCancel(context.Background())
-+ stmt, err := dbt.db.PrepareContext(ctx, "INSERT INTO test VALUES (SLEEP(1))")
-+ if err != nil {
-+ dbt.Fatalf("unexpected error: %v", err)
-+ }
-+
-+ // Delay execution for just a bit until db.ExecContext has begun.
-+ defer time.AfterFunc(250*time.Millisecond, cancel).Stop()
-+
-+ // This query will be canceled.
-+ startTime := time.Now()
-+ if _, err := stmt.ExecContext(ctx); err != context.Canceled {
-+ dbt.Errorf("expected context.Canceled, got %v", err)
-+ }
-+ if d := time.Since(startTime); d > 500*time.Millisecond {
-+ dbt.Errorf("too long execution time: %s", d)
-+ }
-+
-+ // Wait for the INSERT query to be done.
-+ time.Sleep(time.Second)
-+
-+ // Check how many times the query is executed.
-+ var v int
-+ if err := dbt.db.QueryRow("SELECT COUNT(*) FROM test").Scan(&v); err != nil {
-+ dbt.Fatalf("%s", err.Error())
-+ }
-+ if v != 1 { // TODO: need to kill the query, and v should be 0.
-+ dbt.Skipf("[WARN] expected val to be 1, got %d", v)
-+ }
-+ })
-+}
-+
-+func TestContextCancelStmtQuery(t *testing.T) {
-+ runTests(t, dsn, func(dbt *DBTest) {
-+ dbt.mustExec("CREATE TABLE test (v INTEGER)")
-+ ctx, cancel := context.WithCancel(context.Background())
-+ stmt, err := dbt.db.PrepareContext(ctx, "INSERT INTO test VALUES (SLEEP(1))")
-+ if err != nil {
-+ dbt.Fatalf("unexpected error: %v", err)
-+ }
-+
-+ // Delay execution for just a bit until db.ExecContext has begun.
-+ defer time.AfterFunc(250*time.Millisecond, cancel).Stop()
-+
-+ // This query will be canceled.
-+ startTime := time.Now()
-+ if _, err := stmt.QueryContext(ctx); err != context.Canceled {
-+ dbt.Errorf("expected context.Canceled, got %v", err)
-+ }
-+ if d := time.Since(startTime); d > 500*time.Millisecond {
-+ dbt.Errorf("too long execution time: %s", d)
-+ }
-+
-+ // Wait for the INSERT query has done.
-+ time.Sleep(time.Second)
-+
-+ // Check how many times the query is executed.
-+ var v int
-+ if err := dbt.db.QueryRow("SELECT COUNT(*) FROM test").Scan(&v); err != nil {
-+ dbt.Fatalf("%s", err.Error())
-+ }
-+ if v != 1 { // TODO: need to kill the query, and v should be 0.
-+ dbt.Skipf("[WARN] expected val to be 1, got %d", v)
-+ }
-+ })
-+}
-+
-+func TestContextCancelBegin(t *testing.T) {
-+ runTests(t, dsn, func(dbt *DBTest) {
-+ dbt.mustExec("CREATE TABLE test (v INTEGER)")
-+ ctx, cancel := context.WithCancel(context.Background())
-+ tx, err := dbt.db.BeginTx(ctx, nil)
-+ if err != nil {
-+ dbt.Fatal(err)
-+ }
-+
-+ // Delay execution for just a bit until db.ExecContext has begun.
-+ defer time.AfterFunc(100*time.Millisecond, cancel).Stop()
-+
-+ // This query will be canceled.
-+ startTime := time.Now()
-+ if _, err := tx.ExecContext(ctx, "INSERT INTO test VALUES (SLEEP(1))"); err != context.Canceled {
-+ dbt.Errorf("expected context.Canceled, got %v", err)
-+ }
-+ if d := time.Since(startTime); d > 500*time.Millisecond {
-+ dbt.Errorf("too long execution time: %s", d)
-+ }
-+
-+ // Transaction is canceled, so expect an error.
-+ switch err := tx.Commit(); err {
-+ case sql.ErrTxDone:
-+ // because the transaction has already been rollbacked.
-+ // the database/sql package watches ctx
-+ // and rollbacks when ctx is canceled.
-+ case context.Canceled:
-+ // the database/sql package rollbacks on another goroutine,
-+ // so the transaction may not be rollbacked depending on goroutine scheduling.
-+ default:
-+ dbt.Errorf("expected sql.ErrTxDone or context.Canceled, got %v", err)
-+ }
-+
-+ // Context is canceled, so cannot begin a transaction.
-+ if _, err := dbt.db.BeginTx(ctx, nil); err != context.Canceled {
-+ dbt.Errorf("expected context.Canceled, got %v", err)
-+ }
-+ })
-+}
-+
-+func TestContextBeginIsolationLevel(t *testing.T) {
-+ runTests(t, dsn, func(dbt *DBTest) {
-+ dbt.mustExec("CREATE TABLE test (v INTEGER)")
-+ ctx, cancel := context.WithCancel(context.Background())
-+ defer cancel()
-+
-+ tx1, err := dbt.db.BeginTx(ctx, &sql.TxOptions{
-+ Isolation: sql.LevelRepeatableRead,
-+ })
-+ if err != nil {
-+ dbt.Fatal(err)
-+ }
-+
-+ tx2, err := dbt.db.BeginTx(ctx, &sql.TxOptions{
-+ Isolation: sql.LevelReadCommitted,
-+ })
-+ if err != nil {
-+ dbt.Fatal(err)
-+ }
-+
-+ _, err = tx1.ExecContext(ctx, "INSERT INTO test VALUES (1)")
-+ if err != nil {
-+ dbt.Fatal(err)
-+ }
-+
-+ var v int
-+ row := tx2.QueryRowContext(ctx, "SELECT COUNT(*) FROM test")
-+ if err := row.Scan(&v); err != nil {
-+ dbt.Fatal(err)
-+ }
-+ // Because writer transaction wasn't commited yet, it should be available
-+ if v != 0 {
-+ dbt.Errorf("expected val to be 0, got %d", v)
-+ }
-+
-+ err = tx1.Commit()
-+ if err != nil {
-+ dbt.Fatal(err)
-+ }
-+
-+ row = tx2.QueryRowContext(ctx, "SELECT COUNT(*) FROM test")
-+ if err := row.Scan(&v); err != nil {
-+ dbt.Fatal(err)
-+ }
-+ // Data written by writer transaction is already commited, it should be selectable
-+ if v != 1 {
-+ dbt.Errorf("expected val to be 1, got %d", v)
-+ }
-+ tx2.Commit()
-+ })
-+}
-+
-+func TestContextBeginReadOnly(t *testing.T) {
-+ runTests(t, dsn, func(dbt *DBTest) {
-+ dbt.mustExec("CREATE TABLE test (v INTEGER)")
-+ ctx, cancel := context.WithCancel(context.Background())
-+ defer cancel()
-+
-+ tx, err := dbt.db.BeginTx(ctx, &sql.TxOptions{
-+ ReadOnly: true,
-+ })
-+ if _, ok := err.(*MySQLError); ok {
-+ dbt.Skip("It seems that your MySQL does not support READ ONLY transactions")
-+ return
-+ } else if err != nil {
-+ dbt.Fatal(err)
-+ }
-+
-+ // INSERT queries fail in a READ ONLY transaction.
-+ _, err = tx.ExecContext(ctx, "INSERT INTO test VALUES (1)")
-+ if _, ok := err.(*MySQLError); !ok {
-+ dbt.Errorf("expected MySQLError, got %v", err)
-+ }
-+
-+ // SELECT queries can be executed.
-+ var v int
-+ row := tx.QueryRowContext(ctx, "SELECT COUNT(*) FROM test")
-+ if err := row.Scan(&v); err != nil {
-+ dbt.Fatal(err)
-+ }
-+ if v != 0 {
-+ dbt.Errorf("expected val to be 0, got %d", v)
-+ }
-+
-+ if err := tx.Commit(); err != nil {
-+ dbt.Fatal(err)
-+ }
-+ })
-+}
-+
-+func TestRowsColumnTypes(t *testing.T) {
-+ niNULL := sql.NullInt64{Int64: 0, Valid: false}
-+ ni0 := sql.NullInt64{Int64: 0, Valid: true}
-+ ni1 := sql.NullInt64{Int64: 1, Valid: true}
-+ ni42 := sql.NullInt64{Int64: 42, Valid: true}
-+ nfNULL := sql.NullFloat64{Float64: 0.0, Valid: false}
-+ nf0 := sql.NullFloat64{Float64: 0.0, Valid: true}
-+ nf1337 := sql.NullFloat64{Float64: 13.37, Valid: true}
-+ nt0 := NullTime{Time: time.Date(2006, 01, 02, 15, 04, 05, 0, time.UTC), Valid: true}
-+ nt1 := NullTime{Time: time.Date(2006, 01, 02, 15, 04, 05, 100000000, time.UTC), Valid: true}
-+ nt2 := NullTime{Time: time.Date(2006, 01, 02, 15, 04, 05, 110000000, time.UTC), Valid: true}
-+ nt6 := NullTime{Time: time.Date(2006, 01, 02, 15, 04, 05, 111111000, time.UTC), Valid: true}
-+ nd1 := NullTime{Time: time.Date(2006, 01, 02, 0, 0, 0, 0, time.UTC), Valid: true}
-+ nd2 := NullTime{Time: time.Date(2006, 03, 04, 0, 0, 0, 0, time.UTC), Valid: true}
-+ ndNULL := NullTime{Time: time.Time{}, Valid: false}
-+ rbNULL := sql.RawBytes(nil)
-+ rb0 := sql.RawBytes("0")
-+ rb42 := sql.RawBytes("42")
-+ rbTest := sql.RawBytes("Test")
-+ rb0pad4 := sql.RawBytes("0\x00\x00\x00") // BINARY right-pads values with 0x00
-+ rbx0 := sql.RawBytes("\x00")
-+ rbx42 := sql.RawBytes("\x42")
-+
-+ var columns = []struct {
-+ name string
-+ fieldType string // type used when creating table schema
-+ databaseTypeName string // actual type used by MySQL
-+ scanType reflect.Type
-+ nullable bool
-+ precision int64 // 0 if not ok
-+ scale int64
-+ valuesIn [3]string
-+ valuesOut [3]interface{}
-+ }{
-+ {"bit8null", "BIT(8)", "BIT", scanTypeRawBytes, true, 0, 0, [3]string{"0x0", "NULL", "0x42"}, [3]interface{}{rbx0, rbNULL, rbx42}},
-+ {"boolnull", "BOOL", "TINYINT", scanTypeNullInt, true, 0, 0, [3]string{"NULL", "true", "0"}, [3]interface{}{niNULL, ni1, ni0}},
-+ {"bool", "BOOL NOT NULL", "TINYINT", scanTypeInt8, false, 0, 0, [3]string{"1", "0", "FALSE"}, [3]interface{}{int8(1), int8(0), int8(0)}},
-+ {"intnull", "INTEGER", "INT", scanTypeNullInt, true, 0, 0, [3]string{"0", "NULL", "42"}, [3]interface{}{ni0, niNULL, ni42}},
-+ {"smallint", "SMALLINT NOT NULL", "SMALLINT", scanTypeInt16, false, 0, 0, [3]string{"0", "-32768", "32767"}, [3]interface{}{int16(0), int16(-32768), int16(32767)}},
-+ {"smallintnull", "SMALLINT", "SMALLINT", scanTypeNullInt, true, 0, 0, [3]string{"0", "NULL", "42"}, [3]interface{}{ni0, niNULL, ni42}},
-+ {"int3null", "INT(3)", "INT", scanTypeNullInt, true, 0, 0, [3]string{"0", "NULL", "42"}, [3]interface{}{ni0, niNULL, ni42}},
-+ {"int7", "INT(7) NOT NULL", "INT", scanTypeInt32, false, 0, 0, [3]string{"0", "-1337", "42"}, [3]interface{}{int32(0), int32(-1337), int32(42)}},
-+ {"mediumintnull", "MEDIUMINT", "MEDIUMINT", scanTypeNullInt, true, 0, 0, [3]string{"0", "42", "NULL"}, [3]interface{}{ni0, ni42, niNULL}},
-+ {"bigint", "BIGINT NOT NULL", "BIGINT", scanTypeInt64, false, 0, 0, [3]string{"0", "65535", "-42"}, [3]interface{}{int64(0), int64(65535), int64(-42)}},
-+ {"bigintnull", "BIGINT", "BIGINT", scanTypeNullInt, true, 0, 0, [3]string{"NULL", "1", "42"}, [3]interface{}{niNULL, ni1, ni42}},
-+ {"tinyuint", "TINYINT UNSIGNED NOT NULL", "TINYINT", scanTypeUint8, false, 0, 0, [3]string{"0", "255", "42"}, [3]interface{}{uint8(0), uint8(255), uint8(42)}},
-+ {"smalluint", "SMALLINT UNSIGNED NOT NULL", "SMALLINT", scanTypeUint16, false, 0, 0, [3]string{"0", "65535", "42"}, [3]interface{}{uint16(0), uint16(65535), uint16(42)}},
-+ {"biguint", "BIGINT UNSIGNED NOT NULL", "BIGINT", scanTypeUint64, false, 0, 0, [3]string{"0", "65535", "42"}, [3]interface{}{uint64(0), uint64(65535), uint64(42)}},
-+ {"uint13", "INT(13) UNSIGNED NOT NULL", "INT", scanTypeUint32, false, 0, 0, [3]string{"0", "1337", "42"}, [3]interface{}{uint32(0), uint32(1337), uint32(42)}},
-+ {"float", "FLOAT NOT NULL", "FLOAT", scanTypeFloat32, false, math.MaxInt64, math.MaxInt64, [3]string{"0", "42", "13.37"}, [3]interface{}{float32(0), float32(42), float32(13.37)}},
-+ {"floatnull", "FLOAT", "FLOAT", scanTypeNullFloat, true, math.MaxInt64, math.MaxInt64, [3]string{"0", "NULL", "13.37"}, [3]interface{}{nf0, nfNULL, nf1337}},
-+ {"float74null", "FLOAT(7,4)", "FLOAT", scanTypeNullFloat, true, math.MaxInt64, 4, [3]string{"0", "NULL", "13.37"}, [3]interface{}{nf0, nfNULL, nf1337}},
-+ {"double", "DOUBLE NOT NULL", "DOUBLE", scanTypeFloat64, false, math.MaxInt64, math.MaxInt64, [3]string{"0", "42", "13.37"}, [3]interface{}{float64(0), float64(42), float64(13.37)}},
-+ {"doublenull", "DOUBLE", "DOUBLE", scanTypeNullFloat, true, math.MaxInt64, math.MaxInt64, [3]string{"0", "NULL", "13.37"}, [3]interface{}{nf0, nfNULL, nf1337}},
-+ {"decimal1", "DECIMAL(10,6) NOT NULL", "DECIMAL", scanTypeRawBytes, false, 10, 6, [3]string{"0", "13.37", "1234.123456"}, [3]interface{}{sql.RawBytes("0.000000"), sql.RawBytes("13.370000"), sql.RawBytes("1234.123456")}},
-+ {"decimal1null", "DECIMAL(10,6)", "DECIMAL", scanTypeRawBytes, true, 10, 6, [3]string{"0", "NULL", "1234.123456"}, [3]interface{}{sql.RawBytes("0.000000"), rbNULL, sql.RawBytes("1234.123456")}},
-+ {"decimal2", "DECIMAL(8,4) NOT NULL", "DECIMAL", scanTypeRawBytes, false, 8, 4, [3]string{"0", "13.37", "1234.123456"}, [3]interface{}{sql.RawBytes("0.0000"), sql.RawBytes("13.3700"), sql.RawBytes("1234.1235")}},
-+ {"decimal2null", "DECIMAL(8,4)", "DECIMAL", scanTypeRawBytes, true, 8, 4, [3]string{"0", "NULL", "1234.123456"}, [3]interface{}{sql.RawBytes("0.0000"), rbNULL, sql.RawBytes("1234.1235")}},
-+ {"decimal3", "DECIMAL(5,0) NOT NULL", "DECIMAL", scanTypeRawBytes, false, 5, 0, [3]string{"0", "13.37", "-12345.123456"}, [3]interface{}{rb0, sql.RawBytes("13"), sql.RawBytes("-12345")}},
-+ {"decimal3null", "DECIMAL(5,0)", "DECIMAL", scanTypeRawBytes, true, 5, 0, [3]string{"0", "NULL", "-12345.123456"}, [3]interface{}{rb0, rbNULL, sql.RawBytes("-12345")}},
-+ {"char25null", "CHAR(25)", "CHAR", scanTypeRawBytes, true, 0, 0, [3]string{"0", "NULL", "'Test'"}, [3]interface{}{rb0, rbNULL, rbTest}},
-+ {"varchar42", "VARCHAR(42) NOT NULL", "VARCHAR", scanTypeRawBytes, false, 0, 0, [3]string{"0", "'Test'", "42"}, [3]interface{}{rb0, rbTest, rb42}},
-+ {"binary4null", "BINARY(4)", "BINARY", scanTypeRawBytes, true, 0, 0, [3]string{"0", "NULL", "'Test'"}, [3]interface{}{rb0pad4, rbNULL, rbTest}},
-+ {"varbinary42", "VARBINARY(42) NOT NULL", "VARBINARY", scanTypeRawBytes, false, 0, 0, [3]string{"0", "'Test'", "42"}, [3]interface{}{rb0, rbTest, rb42}},
-+ {"tinyblobnull", "TINYBLOB", "BLOB", scanTypeRawBytes, true, 0, 0, [3]string{"0", "NULL", "'Test'"}, [3]interface{}{rb0, rbNULL, rbTest}},
-+ {"tinytextnull", "TINYTEXT", "TEXT", scanTypeRawBytes, true, 0, 0, [3]string{"0", "NULL", "'Test'"}, [3]interface{}{rb0, rbNULL, rbTest}},
-+ {"blobnull", "BLOB", "BLOB", scanTypeRawBytes, true, 0, 0, [3]string{"0", "NULL", "'Test'"}, [3]interface{}{rb0, rbNULL, rbTest}},
-+ {"textnull", "TEXT", "TEXT", scanTypeRawBytes, true, 0, 0, [3]string{"0", "NULL", "'Test'"}, [3]interface{}{rb0, rbNULL, rbTest}},
-+ {"mediumblob", "MEDIUMBLOB NOT NULL", "BLOB", scanTypeRawBytes, false, 0, 0, [3]string{"0", "'Test'", "42"}, [3]interface{}{rb0, rbTest, rb42}},
-+ {"mediumtext", "MEDIUMTEXT NOT NULL", "TEXT", scanTypeRawBytes, false, 0, 0, [3]string{"0", "'Test'", "42"}, [3]interface{}{rb0, rbTest, rb42}},
-+ {"longblob", "LONGBLOB NOT NULL", "BLOB", scanTypeRawBytes, false, 0, 0, [3]string{"0", "'Test'", "42"}, [3]interface{}{rb0, rbTest, rb42}},
-+ {"longtext", "LONGTEXT NOT NULL", "TEXT", scanTypeRawBytes, false, 0, 0, [3]string{"0", "'Test'", "42"}, [3]interface{}{rb0, rbTest, rb42}},
-+ {"datetime", "DATETIME", "DATETIME", scanTypeNullTime, true, 0, 0, [3]string{"'2006-01-02 15:04:05'", "'2006-01-02 15:04:05.1'", "'2006-01-02 15:04:05.111111'"}, [3]interface{}{nt0, nt0, nt0}},
-+ {"datetime2", "DATETIME(2)", "DATETIME", scanTypeNullTime, true, 2, 2, [3]string{"'2006-01-02 15:04:05'", "'2006-01-02 15:04:05.1'", "'2006-01-02 15:04:05.111111'"}, [3]interface{}{nt0, nt1, nt2}},
-+ {"datetime6", "DATETIME(6)", "DATETIME", scanTypeNullTime, true, 6, 6, [3]string{"'2006-01-02 15:04:05'", "'2006-01-02 15:04:05.1'", "'2006-01-02 15:04:05.111111'"}, [3]interface{}{nt0, nt1, nt6}},
-+ {"date", "DATE", "DATE", scanTypeNullTime, true, 0, 0, [3]string{"'2006-01-02'", "NULL", "'2006-03-04'"}, [3]interface{}{nd1, ndNULL, nd2}},
-+ {"year", "YEAR NOT NULL", "YEAR", scanTypeUint16, false, 0, 0, [3]string{"2006", "2000", "1994"}, [3]interface{}{uint16(2006), uint16(2000), uint16(1994)}},
-+ }
-+
-+ schema := ""
-+ values1 := ""
-+ values2 := ""
-+ values3 := ""
-+ for _, column := range columns {
-+ schema += fmt.Sprintf("`%s` %s, ", column.name, column.fieldType)
-+ values1 += column.valuesIn[0] + ", "
-+ values2 += column.valuesIn[1] + ", "
-+ values3 += column.valuesIn[2] + ", "
-+ }
-+ schema = schema[:len(schema)-2]
-+ values1 = values1[:len(values1)-2]
-+ values2 = values2[:len(values2)-2]
-+ values3 = values3[:len(values3)-2]
-+
-+ dsns := []string{
-+ dsn + "&parseTime=true",
-+ dsn + "&parseTime=false",
-+ }
-+ for _, testdsn := range dsns {
-+ runTests(t, testdsn, func(dbt *DBTest) {
-+ dbt.mustExec("CREATE TABLE test (" + schema + ")")
-+ dbt.mustExec("INSERT INTO test VALUES (" + values1 + "), (" + values2 + "), (" + values3 + ")")
-+
-+ rows, err := dbt.db.Query("SELECT * FROM test")
-+ if err != nil {
-+ t.Fatalf("Query: %v", err)
-+ }
-+
-+ tt, err := rows.ColumnTypes()
-+ if err != nil {
-+ t.Fatalf("ColumnTypes: %v", err)
-+ }
-+
-+ if len(tt) != len(columns) {
-+ t.Fatalf("unexpected number of columns: expected %d, got %d", len(columns), len(tt))
-+ }
-+
-+ types := make([]reflect.Type, len(tt))
-+ for i, tp := range tt {
-+ column := columns[i]
-+
-+ // Name
-+ name := tp.Name()
-+ if name != column.name {
-+ t.Errorf("column name mismatch %s != %s", name, column.name)
-+ continue
-+ }
-+
-+ // DatabaseTypeName
-+ databaseTypeName := tp.DatabaseTypeName()
-+ if databaseTypeName != column.databaseTypeName {
-+ t.Errorf("databasetypename name mismatch for column %q: %s != %s", name, databaseTypeName, column.databaseTypeName)
-+ continue
-+ }
-+
-+ // ScanType
-+ scanType := tp.ScanType()
-+ if scanType != column.scanType {
-+ if scanType == nil {
-+ t.Errorf("scantype is null for column %q", name)
-+ } else {
-+ t.Errorf("scantype mismatch for column %q: %s != %s", name, scanType.Name(), column.scanType.Name())
-+ }
-+ continue
-+ }
-+ types[i] = scanType
-+
-+ // Nullable
-+ nullable, ok := tp.Nullable()
-+ if !ok {
-+ t.Errorf("nullable not ok %q", name)
-+ continue
-+ }
-+ if nullable != column.nullable {
-+ t.Errorf("nullable mismatch for column %q: %t != %t", name, nullable, column.nullable)
-+ }
-+
-+ // Length
-+ // length, ok := tp.Length()
-+ // if length != column.length {
-+ // if !ok {
-+ // t.Errorf("length not ok for column %q", name)
-+ // } else {
-+ // t.Errorf("length mismatch for column %q: %d != %d", name, length, column.length)
-+ // }
-+ // continue
-+ // }
-+
-+ // Precision and Scale
-+ precision, scale, ok := tp.DecimalSize()
-+ if precision != column.precision {
-+ if !ok {
-+ t.Errorf("precision not ok for column %q", name)
-+ } else {
-+ t.Errorf("precision mismatch for column %q: %d != %d", name, precision, column.precision)
-+ }
-+ continue
-+ }
-+ if scale != column.scale {
-+ if !ok {
-+ t.Errorf("scale not ok for column %q", name)
-+ } else {
-+ t.Errorf("scale mismatch for column %q: %d != %d", name, scale, column.scale)
-+ }
-+ continue
-+ }
-+ }
-+
-+ values := make([]interface{}, len(tt))
-+ for i := range values {
-+ values[i] = reflect.New(types[i]).Interface()
-+ }
-+ i := 0
-+ for rows.Next() {
-+ err = rows.Scan(values...)
-+ if err != nil {
-+ t.Fatalf("failed to scan values in %v", err)
-+ }
-+ for j := range values {
-+ value := reflect.ValueOf(values[j]).Elem().Interface()
-+ if !reflect.DeepEqual(value, columns[j].valuesOut[i]) {
-+ if columns[j].scanType == scanTypeRawBytes {
-+ t.Errorf("row %d, column %d: %v != %v", i, j, string(value.(sql.RawBytes)), string(columns[j].valuesOut[i].(sql.RawBytes)))
-+ } else {
-+ t.Errorf("row %d, column %d: %v != %v", i, j, value, columns[j].valuesOut[i])
-+ }
-+ }
-+ }
-+ i++
-+ }
-+ if i != 3 {
-+ t.Errorf("expected 3 rows, got %d", i)
-+ }
-+
-+ if err := rows.Close(); err != nil {
-+ t.Errorf("error closing rows: %s", err)
-+ }
-+ })
-+ }
-+}
-+
-+func TestValuerWithValueReceiverGivenNilValue(t *testing.T) {
-+ runTests(t, dsn, func(dbt *DBTest) {
-+ dbt.mustExec("CREATE TABLE test (value VARCHAR(255))")
-+ dbt.db.Exec("INSERT INTO test VALUES (?)", (*testValuer)(nil))
-+ // This test will panic on the INSERT if ConvertValue() does not check for typed nil before calling Value()
-+ })
-+}
-+
-+// TestRawBytesAreNotModified checks for a race condition that arises when a query context
-+// is canceled while a user is calling rows.Scan. This is a more stringent test than the one
-+// proposed in https://github.com/golang/go/issues/23519. Here we're explicitly using
-+// `sql.RawBytes` to check the contents of our internal buffers are not modified after an implicit
-+// call to `Rows.Close`, so Context cancellation should **not** invalidate the backing buffers.
-+func TestRawBytesAreNotModified(t *testing.T) {
-+ const blob = "abcdefghijklmnop"
-+ const contextRaceIterations = 20
-+ const blobSize = defaultBufSize * 3 / 4 // Second row overwrites first row.
-+ const insertRows = 4
-+
-+ var sqlBlobs = [2]string{
-+ strings.Repeat(blob, blobSize/len(blob)),
-+ strings.Repeat(strings.ToUpper(blob), blobSize/len(blob)),
-+ }
-+
-+ runTests(t, dsn, func(dbt *DBTest) {
-+ dbt.mustExec("CREATE TABLE test (id int, value BLOB) CHARACTER SET utf8")
-+ for i := 0; i < insertRows; i++ {
-+ dbt.mustExec("INSERT INTO test VALUES (?, ?)", i+1, sqlBlobs[i&1])
-+ }
-+
-+ for i := 0; i < contextRaceIterations; i++ {
-+ func() {
-+ ctx, cancel := context.WithCancel(context.Background())
-+ defer cancel()
-+
-+ rows, err := dbt.db.QueryContext(ctx, `SELECT id, value FROM test`)
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+
-+ var b int
-+ var raw sql.RawBytes
-+ for rows.Next() {
-+ if err := rows.Scan(&b, &raw); err != nil {
-+ t.Fatal(err)
-+ }
-+
-+ before := string(raw)
-+ // Ensure cancelling the query does not corrupt the contents of `raw`
-+ cancel()
-+ time.Sleep(time.Microsecond * 100)
-+ after := string(raw)
-+
-+ if before != after {
-+ t.Fatalf("the backing storage for sql.RawBytes has been modified (i=%v)", i)
-+ }
-+ }
-+ rows.Close()
-+ }()
-+ }
-+ })
-+}
-diff --git a/vendor/github.com/go-sql-driver/mysql/dsn.go b/vendor/github.com/go-sql-driver/mysql/dsn.go
-new file mode 100644
-index 00000000000..1d9b4ab0a21
---- /dev/null
-+++ b/vendor/github.com/go-sql-driver/mysql/dsn.go
-@@ -0,0 +1,636 @@
-+// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
-+//
-+// Copyright 2016 The Go-MySQL-Driver Authors. All rights reserved.
-+//
-+// This Source Code Form is subject to the terms of the Mozilla Public
-+// License, v. 2.0. If a copy of the MPL was not distributed with this file,
-+// You can obtain one at http://mozilla.org/MPL/2.0/.
-+
-+package mysql
-+
-+import (
-+ "bytes"
-+ "crypto/rsa"
-+ "crypto/tls"
-+ "errors"
-+ "fmt"
-+ "math/big"
-+ "net"
-+ "net/url"
-+ "sort"
-+ "strconv"
-+ "strings"
-+ "time"
-+)
-+
-+var (
-+ errInvalidDSNUnescaped = errors.New("invalid DSN: did you forget to escape a param value?")
-+ errInvalidDSNAddr = errors.New("invalid DSN: network address not terminated (missing closing brace)")
-+ errInvalidDSNNoSlash = errors.New("invalid DSN: missing the slash separating the database name")
-+ errInvalidDSNUnsafeCollation = errors.New("invalid DSN: interpolateParams can not be used with unsafe collations")
-+)
-+
-+// Config is a configuration parsed from a DSN string.
-+// If a new Config is created instead of being parsed from a DSN string,
-+// the NewConfig function should be used, which sets default values.
-+type Config struct {
-+ User string // Username
-+ Passwd string // Password (requires User)
-+ Net string // Network type
-+ Addr string // Network address (requires Net)
-+ DBName string // Database name
-+ Params map[string]string // Connection parameters
-+ Collation string // Connection collation
-+ Loc *time.Location // Location for time.Time values
-+ MaxAllowedPacket int // Max packet size allowed
-+ ServerPubKey string // Server public key name
-+ pubKey *rsa.PublicKey // Server public key
-+ TLSConfig string // TLS configuration name
-+ tls *tls.Config // TLS configuration
-+ Timeout time.Duration // Dial timeout
-+ ReadTimeout time.Duration // I/O read timeout
-+ WriteTimeout time.Duration // I/O write timeout
-+
-+ AllowAllFiles bool // Allow all files to be used with LOAD DATA LOCAL INFILE
-+ AllowCleartextPasswords bool // Allows the cleartext client side plugin
-+ AllowNativePasswords bool // Allows the native password authentication method
-+ AllowOldPasswords bool // Allows the old insecure password method
-+ ClientFoundRows bool // Return number of matching rows instead of rows changed
-+ ColumnsWithAlias bool // Prepend table alias to column names
-+ InterpolateParams bool // Interpolate placeholders into query string
-+ MultiStatements bool // Allow multiple statements in one query
-+ ParseTime bool // Parse time values to time.Time
-+ RejectReadOnly bool // Reject read-only connections
-+}
-+
-+// NewConfig creates a new Config and sets default values.
-+func NewConfig() *Config {
-+ return &Config{
-+ Collation: defaultCollation,
-+ Loc: time.UTC,
-+ MaxAllowedPacket: defaultMaxAllowedPacket,
-+ AllowNativePasswords: true,
-+ }
-+}
-+
-+func (cfg *Config) Clone() *Config {
-+ cp := *cfg
-+ if cp.tls != nil {
-+ cp.tls = cfg.tls.Clone()
-+ }
-+ if len(cp.Params) > 0 {
-+ cp.Params = make(map[string]string, len(cfg.Params))
-+ for k, v := range cfg.Params {
-+ cp.Params[k] = v
-+ }
-+ }
-+ if cfg.pubKey != nil {
-+ cp.pubKey = &rsa.PublicKey{
-+ N: new(big.Int).Set(cfg.pubKey.N),
-+ E: cfg.pubKey.E,
-+ }
-+ }
-+ return &cp
-+}
-+
-+func (cfg *Config) normalize() error {
-+ if cfg.InterpolateParams && unsafeCollations[cfg.Collation] {
-+ return errInvalidDSNUnsafeCollation
-+ }
-+
-+ // Set default network if empty
-+ if cfg.Net == "" {
-+ cfg.Net = "tcp"
-+ }
-+
-+ // Set default address if empty
-+ if cfg.Addr == "" {
-+ switch cfg.Net {
-+ case "tcp":
-+ cfg.Addr = "127.0.0.1:3306"
-+ case "unix":
-+ cfg.Addr = "/tmp/mysql.sock"
-+ default:
-+ return errors.New("default addr for network '" + cfg.Net + "' unknown")
-+ }
-+ } else if cfg.Net == "tcp" {
-+ cfg.Addr = ensureHavePort(cfg.Addr)
-+ }
-+
-+ switch cfg.TLSConfig {
-+ case "false", "":
-+ // don't set anything
-+ case "true":
-+ cfg.tls = &tls.Config{}
-+ case "skip-verify", "preferred":
-+ cfg.tls = &tls.Config{InsecureSkipVerify: true}
-+ default:
-+ cfg.tls = getTLSConfigClone(cfg.TLSConfig)
-+ if cfg.tls == nil {
-+ return errors.New("invalid value / unknown config name: " + cfg.TLSConfig)
-+ }
-+ }
-+
-+ if cfg.tls != nil && cfg.tls.ServerName == "" && !cfg.tls.InsecureSkipVerify {
-+ host, _, err := net.SplitHostPort(cfg.Addr)
-+ if err == nil {
-+ cfg.tls.ServerName = host
-+ }
-+ }
-+
-+ if cfg.ServerPubKey != "" {
-+ cfg.pubKey = getServerPubKey(cfg.ServerPubKey)
-+ if cfg.pubKey == nil {
-+ return errors.New("invalid value / unknown server pub key name: " + cfg.ServerPubKey)
-+ }
-+ }
-+
-+ return nil
-+}
-+
-+// FormatDSN formats the given Config into a DSN string which can be passed to
-+// the driver.
-+func (cfg *Config) FormatDSN() string {
-+ var buf bytes.Buffer
-+
-+ // [username[:password]@]
-+ if len(cfg.User) > 0 {
-+ buf.WriteString(cfg.User)
-+ if len(cfg.Passwd) > 0 {
-+ buf.WriteByte(':')
-+ buf.WriteString(cfg.Passwd)
-+ }
-+ buf.WriteByte('@')
-+ }
-+
-+ // [protocol[(address)]]
-+ if len(cfg.Net) > 0 {
-+ buf.WriteString(cfg.Net)
-+ if len(cfg.Addr) > 0 {
-+ buf.WriteByte('(')
-+ buf.WriteString(cfg.Addr)
-+ buf.WriteByte(')')
-+ }
-+ }
-+
-+ // /dbname
-+ buf.WriteByte('/')
-+ buf.WriteString(cfg.DBName)
-+
-+ // [?param1=value1&...¶mN=valueN]
-+ hasParam := false
-+
-+ if cfg.AllowAllFiles {
-+ hasParam = true
-+ buf.WriteString("?allowAllFiles=true")
-+ }
-+
-+ if cfg.AllowCleartextPasswords {
-+ if hasParam {
-+ buf.WriteString("&allowCleartextPasswords=true")
-+ } else {
-+ hasParam = true
-+ buf.WriteString("?allowCleartextPasswords=true")
-+ }
-+ }
-+
-+ if !cfg.AllowNativePasswords {
-+ if hasParam {
-+ buf.WriteString("&allowNativePasswords=false")
-+ } else {
-+ hasParam = true
-+ buf.WriteString("?allowNativePasswords=false")
-+ }
-+ }
-+
-+ if cfg.AllowOldPasswords {
-+ if hasParam {
-+ buf.WriteString("&allowOldPasswords=true")
-+ } else {
-+ hasParam = true
-+ buf.WriteString("?allowOldPasswords=true")
-+ }
-+ }
-+
-+ if cfg.ClientFoundRows {
-+ if hasParam {
-+ buf.WriteString("&clientFoundRows=true")
-+ } else {
-+ hasParam = true
-+ buf.WriteString("?clientFoundRows=true")
-+ }
-+ }
-+
-+ if col := cfg.Collation; col != defaultCollation && len(col) > 0 {
-+ if hasParam {
-+ buf.WriteString("&collation=")
-+ } else {
-+ hasParam = true
-+ buf.WriteString("?collation=")
-+ }
-+ buf.WriteString(col)
-+ }
-+
-+ if cfg.ColumnsWithAlias {
-+ if hasParam {
-+ buf.WriteString("&columnsWithAlias=true")
-+ } else {
-+ hasParam = true
-+ buf.WriteString("?columnsWithAlias=true")
-+ }
-+ }
-+
-+ if cfg.InterpolateParams {
-+ if hasParam {
-+ buf.WriteString("&interpolateParams=true")
-+ } else {
-+ hasParam = true
-+ buf.WriteString("?interpolateParams=true")
-+ }
-+ }
-+
-+ if cfg.Loc != time.UTC && cfg.Loc != nil {
-+ if hasParam {
-+ buf.WriteString("&loc=")
-+ } else {
-+ hasParam = true
-+ buf.WriteString("?loc=")
-+ }
-+ buf.WriteString(url.QueryEscape(cfg.Loc.String()))
-+ }
-+
-+ if cfg.MultiStatements {
-+ if hasParam {
-+ buf.WriteString("&multiStatements=true")
-+ } else {
-+ hasParam = true
-+ buf.WriteString("?multiStatements=true")
-+ }
-+ }
-+
-+ if cfg.ParseTime {
-+ if hasParam {
-+ buf.WriteString("&parseTime=true")
-+ } else {
-+ hasParam = true
-+ buf.WriteString("?parseTime=true")
-+ }
-+ }
-+
-+ if cfg.ReadTimeout > 0 {
-+ if hasParam {
-+ buf.WriteString("&readTimeout=")
-+ } else {
-+ hasParam = true
-+ buf.WriteString("?readTimeout=")
-+ }
-+ buf.WriteString(cfg.ReadTimeout.String())
-+ }
-+
-+ if cfg.RejectReadOnly {
-+ if hasParam {
-+ buf.WriteString("&rejectReadOnly=true")
-+ } else {
-+ hasParam = true
-+ buf.WriteString("?rejectReadOnly=true")
-+ }
-+ }
-+
-+ if len(cfg.ServerPubKey) > 0 {
-+ if hasParam {
-+ buf.WriteString("&serverPubKey=")
-+ } else {
-+ hasParam = true
-+ buf.WriteString("?serverPubKey=")
-+ }
-+ buf.WriteString(url.QueryEscape(cfg.ServerPubKey))
-+ }
-+
-+ if cfg.Timeout > 0 {
-+ if hasParam {
-+ buf.WriteString("&timeout=")
-+ } else {
-+ hasParam = true
-+ buf.WriteString("?timeout=")
-+ }
-+ buf.WriteString(cfg.Timeout.String())
-+ }
-+
-+ if len(cfg.TLSConfig) > 0 {
-+ if hasParam {
-+ buf.WriteString("&tls=")
-+ } else {
-+ hasParam = true
-+ buf.WriteString("?tls=")
-+ }
-+ buf.WriteString(url.QueryEscape(cfg.TLSConfig))
-+ }
-+
-+ if cfg.WriteTimeout > 0 {
-+ if hasParam {
-+ buf.WriteString("&writeTimeout=")
-+ } else {
-+ hasParam = true
-+ buf.WriteString("?writeTimeout=")
-+ }
-+ buf.WriteString(cfg.WriteTimeout.String())
-+ }
-+
-+ if cfg.MaxAllowedPacket != defaultMaxAllowedPacket {
-+ if hasParam {
-+ buf.WriteString("&maxAllowedPacket=")
-+ } else {
-+ hasParam = true
-+ buf.WriteString("?maxAllowedPacket=")
-+ }
-+ buf.WriteString(strconv.Itoa(cfg.MaxAllowedPacket))
-+
-+ }
-+
-+ // other params
-+ if cfg.Params != nil {
-+ var params []string
-+ for param := range cfg.Params {
-+ params = append(params, param)
-+ }
-+ sort.Strings(params)
-+ for _, param := range params {
-+ if hasParam {
-+ buf.WriteByte('&')
-+ } else {
-+ hasParam = true
-+ buf.WriteByte('?')
-+ }
-+
-+ buf.WriteString(param)
-+ buf.WriteByte('=')
-+ buf.WriteString(url.QueryEscape(cfg.Params[param]))
-+ }
-+ }
-+
-+ return buf.String()
-+}
-+
-+// ParseDSN parses the DSN string to a Config
-+func ParseDSN(dsn string) (cfg *Config, err error) {
-+ // New config with some default values
-+ cfg = NewConfig()
-+
-+ // [user[:password]@][net[(addr)]]/dbname[?param1=value1¶mN=valueN]
-+ // Find the last '/' (since the password or the net addr might contain a '/')
-+ foundSlash := false
-+ for i := len(dsn) - 1; i >= 0; i-- {
-+ if dsn[i] == '/' {
-+ foundSlash = true
-+ var j, k int
-+
-+ // left part is empty if i <= 0
-+ if i > 0 {
-+ // [username[:password]@][protocol[(address)]]
-+ // Find the last '@' in dsn[:i]
-+ for j = i; j >= 0; j-- {
-+ if dsn[j] == '@' {
-+ // username[:password]
-+ // Find the first ':' in dsn[:j]
-+ for k = 0; k < j; k++ {
-+ if dsn[k] == ':' {
-+ cfg.Passwd = dsn[k+1 : j]
-+ break
-+ }
-+ }
-+ cfg.User = dsn[:k]
-+
-+ break
-+ }
-+ }
-+
-+ // [protocol[(address)]]
-+ // Find the first '(' in dsn[j+1:i]
-+ for k = j + 1; k < i; k++ {
-+ if dsn[k] == '(' {
-+ // dsn[i-1] must be == ')' if an address is specified
-+ if dsn[i-1] != ')' {
-+ if strings.ContainsRune(dsn[k+1:i], ')') {
-+ return nil, errInvalidDSNUnescaped
-+ }
-+ return nil, errInvalidDSNAddr
-+ }
-+ cfg.Addr = dsn[k+1 : i-1]
-+ break
-+ }
-+ }
-+ cfg.Net = dsn[j+1 : k]
-+ }
-+
-+ // dbname[?param1=value1&...¶mN=valueN]
-+ // Find the first '?' in dsn[i+1:]
-+ for j = i + 1; j < len(dsn); j++ {
-+ if dsn[j] == '?' {
-+ if err = parseDSNParams(cfg, dsn[j+1:]); err != nil {
-+ return
-+ }
-+ break
-+ }
-+ }
-+ cfg.DBName = dsn[i+1 : j]
-+
-+ break
-+ }
-+ }
-+
-+ if !foundSlash && len(dsn) > 0 {
-+ return nil, errInvalidDSNNoSlash
-+ }
-+
-+ if err = cfg.normalize(); err != nil {
-+ return nil, err
-+ }
-+ return
-+}
-+
-+// parseDSNParams parses the DSN "query string"
-+// Values must be url.QueryEscape'ed
-+func parseDSNParams(cfg *Config, params string) (err error) {
-+ for _, v := range strings.Split(params, "&") {
-+ param := strings.SplitN(v, "=", 2)
-+ if len(param) != 2 {
-+ continue
-+ }
-+
-+ // cfg params
-+ switch value := param[1]; param[0] {
-+ // Disable INFILE whitelist / enable all files
-+ case "allowAllFiles":
-+ var isBool bool
-+ cfg.AllowAllFiles, isBool = readBool(value)
-+ if !isBool {
-+ return errors.New("invalid bool value: " + value)
-+ }
-+
-+ // Use cleartext authentication mode (MySQL 5.5.10+)
-+ case "allowCleartextPasswords":
-+ var isBool bool
-+ cfg.AllowCleartextPasswords, isBool = readBool(value)
-+ if !isBool {
-+ return errors.New("invalid bool value: " + value)
-+ }
-+
-+ // Use native password authentication
-+ case "allowNativePasswords":
-+ var isBool bool
-+ cfg.AllowNativePasswords, isBool = readBool(value)
-+ if !isBool {
-+ return errors.New("invalid bool value: " + value)
-+ }
-+
-+ // Use old authentication mode (pre MySQL 4.1)
-+ case "allowOldPasswords":
-+ var isBool bool
-+ cfg.AllowOldPasswords, isBool = readBool(value)
-+ if !isBool {
-+ return errors.New("invalid bool value: " + value)
-+ }
-+
-+ // Switch "rowsAffected" mode
-+ case "clientFoundRows":
-+ var isBool bool
-+ cfg.ClientFoundRows, isBool = readBool(value)
-+ if !isBool {
-+ return errors.New("invalid bool value: " + value)
-+ }
-+
-+ // Collation
-+ case "collation":
-+ cfg.Collation = value
-+ break
-+
-+ case "columnsWithAlias":
-+ var isBool bool
-+ cfg.ColumnsWithAlias, isBool = readBool(value)
-+ if !isBool {
-+ return errors.New("invalid bool value: " + value)
-+ }
-+
-+ // Compression
-+ case "compress":
-+ return errors.New("compression not implemented yet")
-+
-+ // Enable client side placeholder substitution
-+ case "interpolateParams":
-+ var isBool bool
-+ cfg.InterpolateParams, isBool = readBool(value)
-+ if !isBool {
-+ return errors.New("invalid bool value: " + value)
-+ }
-+
-+ // Time Location
-+ case "loc":
-+ if value, err = url.QueryUnescape(value); err != nil {
-+ return
-+ }
-+ cfg.Loc, err = time.LoadLocation(value)
-+ if err != nil {
-+ return
-+ }
-+
-+ // multiple statements in one query
-+ case "multiStatements":
-+ var isBool bool
-+ cfg.MultiStatements, isBool = readBool(value)
-+ if !isBool {
-+ return errors.New("invalid bool value: " + value)
-+ }
-+
-+ // time.Time parsing
-+ case "parseTime":
-+ var isBool bool
-+ cfg.ParseTime, isBool = readBool(value)
-+ if !isBool {
-+ return errors.New("invalid bool value: " + value)
-+ }
-+
-+ // I/O read Timeout
-+ case "readTimeout":
-+ cfg.ReadTimeout, err = time.ParseDuration(value)
-+ if err != nil {
-+ return
-+ }
-+
-+ // Reject read-only connections
-+ case "rejectReadOnly":
-+ var isBool bool
-+ cfg.RejectReadOnly, isBool = readBool(value)
-+ if !isBool {
-+ return errors.New("invalid bool value: " + value)
-+ }
-+
-+ // Server public key
-+ case "serverPubKey":
-+ name, err := url.QueryUnescape(value)
-+ if err != nil {
-+ return fmt.Errorf("invalid value for server pub key name: %v", err)
-+ }
-+ cfg.ServerPubKey = name
-+
-+ // Strict mode
-+ case "strict":
-+ panic("strict mode has been removed. See https://github.com/go-sql-driver/mysql/wiki/strict-mode")
-+
-+ // Dial Timeout
-+ case "timeout":
-+ cfg.Timeout, err = time.ParseDuration(value)
-+ if err != nil {
-+ return
-+ }
-+
-+ // TLS-Encryption
-+ case "tls":
-+ boolValue, isBool := readBool(value)
-+ if isBool {
-+ if boolValue {
-+ cfg.TLSConfig = "true"
-+ } else {
-+ cfg.TLSConfig = "false"
-+ }
-+ } else if vl := strings.ToLower(value); vl == "skip-verify" || vl == "preferred" {
-+ cfg.TLSConfig = vl
-+ } else {
-+ name, err := url.QueryUnescape(value)
-+ if err != nil {
-+ return fmt.Errorf("invalid value for TLS config name: %v", err)
-+ }
-+ cfg.TLSConfig = name
-+ }
-+
-+ // I/O write Timeout
-+ case "writeTimeout":
-+ cfg.WriteTimeout, err = time.ParseDuration(value)
-+ if err != nil {
-+ return
-+ }
-+ case "maxAllowedPacket":
-+ cfg.MaxAllowedPacket, err = strconv.Atoi(value)
-+ if err != nil {
-+ return
-+ }
-+ default:
-+ // lazy init
-+ if cfg.Params == nil {
-+ cfg.Params = make(map[string]string)
-+ }
-+
-+ if cfg.Params[param[0]], err = url.QueryUnescape(value); err != nil {
-+ return
-+ }
-+ }
-+ }
-+
-+ return
-+}
-+
-+func ensureHavePort(addr string) string {
-+ if _, _, err := net.SplitHostPort(addr); err != nil {
-+ return net.JoinHostPort(addr, "3306")
-+ }
-+ return addr
-+}
-diff --git a/vendor/github.com/go-sql-driver/mysql/dsn_test.go b/vendor/github.com/go-sql-driver/mysql/dsn_test.go
-new file mode 100644
-index 00000000000..50dc2932c19
---- /dev/null
-+++ b/vendor/github.com/go-sql-driver/mysql/dsn_test.go
-@@ -0,0 +1,415 @@
-+// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
-+//
-+// Copyright 2016 The Go-MySQL-Driver Authors. All rights reserved.
-+//
-+// This Source Code Form is subject to the terms of the Mozilla Public
-+// License, v. 2.0. If a copy of the MPL was not distributed with this file,
-+// You can obtain one at http://mozilla.org/MPL/2.0/.
-+
-+package mysql
-+
-+import (
-+ "crypto/tls"
-+ "fmt"
-+ "net/url"
-+ "reflect"
-+ "testing"
-+ "time"
-+)
-+
-+var testDSNs = []struct {
-+ in string
-+ out *Config
-+}{{
-+ "username:password@protocol(address)/dbname?param=value",
-+ &Config{User: "username", Passwd: "password", Net: "protocol", Addr: "address", DBName: "dbname", Params: map[string]string{"param": "value"}, Collation: "utf8mb4_general_ci", Loc: time.UTC, MaxAllowedPacket: defaultMaxAllowedPacket, AllowNativePasswords: true},
-+}, {
-+ "username:password@protocol(address)/dbname?param=value&columnsWithAlias=true",
-+ &Config{User: "username", Passwd: "password", Net: "protocol", Addr: "address", DBName: "dbname", Params: map[string]string{"param": "value"}, Collation: "utf8mb4_general_ci", Loc: time.UTC, MaxAllowedPacket: defaultMaxAllowedPacket, AllowNativePasswords: true, ColumnsWithAlias: true},
-+}, {
-+ "username:password@protocol(address)/dbname?param=value&columnsWithAlias=true&multiStatements=true",
-+ &Config{User: "username", Passwd: "password", Net: "protocol", Addr: "address", DBName: "dbname", Params: map[string]string{"param": "value"}, Collation: "utf8mb4_general_ci", Loc: time.UTC, MaxAllowedPacket: defaultMaxAllowedPacket, AllowNativePasswords: true, ColumnsWithAlias: true, MultiStatements: true},
-+}, {
-+ "user@unix(/path/to/socket)/dbname?charset=utf8",
-+ &Config{User: "user", Net: "unix", Addr: "/path/to/socket", DBName: "dbname", Params: map[string]string{"charset": "utf8"}, Collation: "utf8mb4_general_ci", Loc: time.UTC, MaxAllowedPacket: defaultMaxAllowedPacket, AllowNativePasswords: true},
-+}, {
-+ "user:password@tcp(localhost:5555)/dbname?charset=utf8&tls=true",
-+ &Config{User: "user", Passwd: "password", Net: "tcp", Addr: "localhost:5555", DBName: "dbname", Params: map[string]string{"charset": "utf8"}, Collation: "utf8mb4_general_ci", Loc: time.UTC, MaxAllowedPacket: defaultMaxAllowedPacket, AllowNativePasswords: true, TLSConfig: "true"},
-+}, {
-+ "user:password@tcp(localhost:5555)/dbname?charset=utf8mb4,utf8&tls=skip-verify",
-+ &Config{User: "user", Passwd: "password", Net: "tcp", Addr: "localhost:5555", DBName: "dbname", Params: map[string]string{"charset": "utf8mb4,utf8"}, Collation: "utf8mb4_general_ci", Loc: time.UTC, MaxAllowedPacket: defaultMaxAllowedPacket, AllowNativePasswords: true, TLSConfig: "skip-verify"},
-+}, {
-+ "user:password@/dbname?loc=UTC&timeout=30s&readTimeout=1s&writeTimeout=1s&allowAllFiles=1&clientFoundRows=true&allowOldPasswords=TRUE&collation=utf8mb4_unicode_ci&maxAllowedPacket=16777216&tls=false&allowCleartextPasswords=true&parseTime=true&rejectReadOnly=true",
-+ &Config{User: "user", Passwd: "password", Net: "tcp", Addr: "127.0.0.1:3306", DBName: "dbname", Collation: "utf8mb4_unicode_ci", Loc: time.UTC, TLSConfig: "false", AllowCleartextPasswords: true, AllowNativePasswords: true, Timeout: 30 * time.Second, ReadTimeout: time.Second, WriteTimeout: time.Second, AllowAllFiles: true, AllowOldPasswords: true, ClientFoundRows: true, MaxAllowedPacket: 16777216, ParseTime: true, RejectReadOnly: true},
-+}, {
-+ "user:password@/dbname?allowNativePasswords=false&maxAllowedPacket=0",
-+ &Config{User: "user", Passwd: "password", Net: "tcp", Addr: "127.0.0.1:3306", DBName: "dbname", Collation: "utf8mb4_general_ci", Loc: time.UTC, MaxAllowedPacket: 0, AllowNativePasswords: false},
-+}, {
-+ "user:p@ss(word)@tcp([de:ad:be:ef::ca:fe]:80)/dbname?loc=Local",
-+ &Config{User: "user", Passwd: "p@ss(word)", Net: "tcp", Addr: "[de:ad:be:ef::ca:fe]:80", DBName: "dbname", Collation: "utf8mb4_general_ci", Loc: time.Local, MaxAllowedPacket: defaultMaxAllowedPacket, AllowNativePasswords: true},
-+}, {
-+ "/dbname",
-+ &Config{Net: "tcp", Addr: "127.0.0.1:3306", DBName: "dbname", Collation: "utf8mb4_general_ci", Loc: time.UTC, MaxAllowedPacket: defaultMaxAllowedPacket, AllowNativePasswords: true},
-+}, {
-+ "@/",
-+ &Config{Net: "tcp", Addr: "127.0.0.1:3306", Collation: "utf8mb4_general_ci", Loc: time.UTC, MaxAllowedPacket: defaultMaxAllowedPacket, AllowNativePasswords: true},
-+}, {
-+ "/",
-+ &Config{Net: "tcp", Addr: "127.0.0.1:3306", Collation: "utf8mb4_general_ci", Loc: time.UTC, MaxAllowedPacket: defaultMaxAllowedPacket, AllowNativePasswords: true},
-+}, {
-+ "",
-+ &Config{Net: "tcp", Addr: "127.0.0.1:3306", Collation: "utf8mb4_general_ci", Loc: time.UTC, MaxAllowedPacket: defaultMaxAllowedPacket, AllowNativePasswords: true},
-+}, {
-+ "user:p@/ssword@/",
-+ &Config{User: "user", Passwd: "p@/ssword", Net: "tcp", Addr: "127.0.0.1:3306", Collation: "utf8mb4_general_ci", Loc: time.UTC, MaxAllowedPacket: defaultMaxAllowedPacket, AllowNativePasswords: true},
-+}, {
-+ "unix/?arg=%2Fsome%2Fpath.ext",
-+ &Config{Net: "unix", Addr: "/tmp/mysql.sock", Params: map[string]string{"arg": "/some/path.ext"}, Collation: "utf8mb4_general_ci", Loc: time.UTC, MaxAllowedPacket: defaultMaxAllowedPacket, AllowNativePasswords: true},
-+}, {
-+ "tcp(127.0.0.1)/dbname",
-+ &Config{Net: "tcp", Addr: "127.0.0.1:3306", DBName: "dbname", Collation: "utf8mb4_general_ci", Loc: time.UTC, MaxAllowedPacket: defaultMaxAllowedPacket, AllowNativePasswords: true},
-+}, {
-+ "tcp(de:ad:be:ef::ca:fe)/dbname",
-+ &Config{Net: "tcp", Addr: "[de:ad:be:ef::ca:fe]:3306", DBName: "dbname", Collation: "utf8mb4_general_ci", Loc: time.UTC, MaxAllowedPacket: defaultMaxAllowedPacket, AllowNativePasswords: true},
-+},
-+}
-+
-+func TestDSNParser(t *testing.T) {
-+ for i, tst := range testDSNs {
-+ cfg, err := ParseDSN(tst.in)
-+ if err != nil {
-+ t.Error(err.Error())
-+ }
-+
-+ // pointer not static
-+ cfg.tls = nil
-+
-+ if !reflect.DeepEqual(cfg, tst.out) {
-+ t.Errorf("%d. ParseDSN(%q) mismatch:\ngot %+v\nwant %+v", i, tst.in, cfg, tst.out)
-+ }
-+ }
-+}
-+
-+func TestDSNParserInvalid(t *testing.T) {
-+ var invalidDSNs = []string{
-+ "@net(addr/", // no closing brace
-+ "@tcp(/", // no closing brace
-+ "tcp(/", // no closing brace
-+ "(/", // no closing brace
-+ "net(addr)//", // unescaped
-+ "User:pass@tcp(1.2.3.4:3306)", // no trailing slash
-+ "net()/", // unknown default addr
-+ //"/dbname?arg=/some/unescaped/path",
-+ }
-+
-+ for i, tst := range invalidDSNs {
-+ if _, err := ParseDSN(tst); err == nil {
-+ t.Errorf("invalid DSN #%d. (%s) didn't error!", i, tst)
-+ }
-+ }
-+}
-+
-+func TestDSNReformat(t *testing.T) {
-+ for i, tst := range testDSNs {
-+ dsn1 := tst.in
-+ cfg1, err := ParseDSN(dsn1)
-+ if err != nil {
-+ t.Error(err.Error())
-+ continue
-+ }
-+ cfg1.tls = nil // pointer not static
-+ res1 := fmt.Sprintf("%+v", cfg1)
-+
-+ dsn2 := cfg1.FormatDSN()
-+ cfg2, err := ParseDSN(dsn2)
-+ if err != nil {
-+ t.Error(err.Error())
-+ continue
-+ }
-+ cfg2.tls = nil // pointer not static
-+ res2 := fmt.Sprintf("%+v", cfg2)
-+
-+ if res1 != res2 {
-+ t.Errorf("%d. %q does not match %q", i, res2, res1)
-+ }
-+ }
-+}
-+
-+func TestDSNServerPubKey(t *testing.T) {
-+ baseDSN := "User:password@tcp(localhost:5555)/dbname?serverPubKey="
-+
-+ RegisterServerPubKey("testKey", testPubKeyRSA)
-+ defer DeregisterServerPubKey("testKey")
-+
-+ tst := baseDSN + "testKey"
-+ cfg, err := ParseDSN(tst)
-+ if err != nil {
-+ t.Error(err.Error())
-+ }
-+
-+ if cfg.ServerPubKey != "testKey" {
-+ t.Errorf("unexpected cfg.ServerPubKey value: %v", cfg.ServerPubKey)
-+ }
-+ if cfg.pubKey != testPubKeyRSA {
-+ t.Error("pub key pointer doesn't match")
-+ }
-+
-+ // Key is missing
-+ tst = baseDSN + "invalid_name"
-+ cfg, err = ParseDSN(tst)
-+ if err == nil {
-+ t.Errorf("invalid name in DSN (%s) but did not error. Got config: %#v", tst, cfg)
-+ }
-+}
-+
-+func TestDSNServerPubKeyQueryEscape(t *testing.T) {
-+ const name = "&%!:"
-+ dsn := "User:password@tcp(localhost:5555)/dbname?serverPubKey=" + url.QueryEscape(name)
-+
-+ RegisterServerPubKey(name, testPubKeyRSA)
-+ defer DeregisterServerPubKey(name)
-+
-+ cfg, err := ParseDSN(dsn)
-+ if err != nil {
-+ t.Error(err.Error())
-+ }
-+
-+ if cfg.pubKey != testPubKeyRSA {
-+ t.Error("pub key pointer doesn't match")
-+ }
-+}
-+
-+func TestDSNWithCustomTLS(t *testing.T) {
-+ baseDSN := "User:password@tcp(localhost:5555)/dbname?tls="
-+ tlsCfg := tls.Config{}
-+
-+ RegisterTLSConfig("utils_test", &tlsCfg)
-+ defer DeregisterTLSConfig("utils_test")
-+
-+ // Custom TLS is missing
-+ tst := baseDSN + "invalid_tls"
-+ cfg, err := ParseDSN(tst)
-+ if err == nil {
-+ t.Errorf("invalid custom TLS in DSN (%s) but did not error. Got config: %#v", tst, cfg)
-+ }
-+
-+ tst = baseDSN + "utils_test"
-+
-+ // Custom TLS with a server name
-+ name := "foohost"
-+ tlsCfg.ServerName = name
-+ cfg, err = ParseDSN(tst)
-+
-+ if err != nil {
-+ t.Error(err.Error())
-+ } else if cfg.tls.ServerName != name {
-+ t.Errorf("did not get the correct TLS ServerName (%s) parsing DSN (%s).", name, tst)
-+ }
-+
-+ // Custom TLS without a server name
-+ name = "localhost"
-+ tlsCfg.ServerName = ""
-+ cfg, err = ParseDSN(tst)
-+
-+ if err != nil {
-+ t.Error(err.Error())
-+ } else if cfg.tls.ServerName != name {
-+ t.Errorf("did not get the correct ServerName (%s) parsing DSN (%s).", name, tst)
-+ } else if tlsCfg.ServerName != "" {
-+ t.Errorf("tlsCfg was mutated ServerName (%s) should be empty parsing DSN (%s).", name, tst)
-+ }
-+}
-+
-+func TestDSNTLSConfig(t *testing.T) {
-+ expectedServerName := "example.com"
-+ dsn := "tcp(example.com:1234)/?tls=true"
-+
-+ cfg, err := ParseDSN(dsn)
-+ if err != nil {
-+ t.Error(err.Error())
-+ }
-+ if cfg.tls == nil {
-+ t.Error("cfg.tls should not be nil")
-+ }
-+ if cfg.tls.ServerName != expectedServerName {
-+ t.Errorf("cfg.tls.ServerName should be %q, got %q (host with port)", expectedServerName, cfg.tls.ServerName)
-+ }
-+
-+ dsn = "tcp(example.com)/?tls=true"
-+ cfg, err = ParseDSN(dsn)
-+ if err != nil {
-+ t.Error(err.Error())
-+ }
-+ if cfg.tls == nil {
-+ t.Error("cfg.tls should not be nil")
-+ }
-+ if cfg.tls.ServerName != expectedServerName {
-+ t.Errorf("cfg.tls.ServerName should be %q, got %q (host without port)", expectedServerName, cfg.tls.ServerName)
-+ }
-+}
-+
-+func TestDSNWithCustomTLSQueryEscape(t *testing.T) {
-+ const configKey = "&%!:"
-+ dsn := "User:password@tcp(localhost:5555)/dbname?tls=" + url.QueryEscape(configKey)
-+ name := "foohost"
-+ tlsCfg := tls.Config{ServerName: name}
-+
-+ RegisterTLSConfig(configKey, &tlsCfg)
-+ defer DeregisterTLSConfig(configKey)
-+
-+ cfg, err := ParseDSN(dsn)
-+
-+ if err != nil {
-+ t.Error(err.Error())
-+ } else if cfg.tls.ServerName != name {
-+ t.Errorf("did not get the correct TLS ServerName (%s) parsing DSN (%s).", name, dsn)
-+ }
-+}
-+
-+func TestDSNUnsafeCollation(t *testing.T) {
-+ _, err := ParseDSN("/dbname?collation=gbk_chinese_ci&interpolateParams=true")
-+ if err != errInvalidDSNUnsafeCollation {
-+ t.Errorf("expected %v, got %v", errInvalidDSNUnsafeCollation, err)
-+ }
-+
-+ _, err = ParseDSN("/dbname?collation=gbk_chinese_ci&interpolateParams=false")
-+ if err != nil {
-+ t.Errorf("expected %v, got %v", nil, err)
-+ }
-+
-+ _, err = ParseDSN("/dbname?collation=gbk_chinese_ci")
-+ if err != nil {
-+ t.Errorf("expected %v, got %v", nil, err)
-+ }
-+
-+ _, err = ParseDSN("/dbname?collation=ascii_bin&interpolateParams=true")
-+ if err != nil {
-+ t.Errorf("expected %v, got %v", nil, err)
-+ }
-+
-+ _, err = ParseDSN("/dbname?collation=latin1_german1_ci&interpolateParams=true")
-+ if err != nil {
-+ t.Errorf("expected %v, got %v", nil, err)
-+ }
-+
-+ _, err = ParseDSN("/dbname?collation=utf8_general_ci&interpolateParams=true")
-+ if err != nil {
-+ t.Errorf("expected %v, got %v", nil, err)
-+ }
-+
-+ _, err = ParseDSN("/dbname?collation=utf8mb4_general_ci&interpolateParams=true")
-+ if err != nil {
-+ t.Errorf("expected %v, got %v", nil, err)
-+ }
-+}
-+
-+func TestParamsAreSorted(t *testing.T) {
-+ expected := "/dbname?interpolateParams=true&foobar=baz&quux=loo"
-+ cfg := NewConfig()
-+ cfg.DBName = "dbname"
-+ cfg.InterpolateParams = true
-+ cfg.Params = map[string]string{
-+ "quux": "loo",
-+ "foobar": "baz",
-+ }
-+ actual := cfg.FormatDSN()
-+ if actual != expected {
-+ t.Errorf("generic Config.Params were not sorted: want %#v, got %#v", expected, actual)
-+ }
-+}
-+
-+func TestCloneConfig(t *testing.T) {
-+ RegisterServerPubKey("testKey", testPubKeyRSA)
-+ defer DeregisterServerPubKey("testKey")
-+
-+ expectedServerName := "example.com"
-+ dsn := "tcp(example.com:1234)/?tls=true&foobar=baz&serverPubKey=testKey"
-+ cfg, err := ParseDSN(dsn)
-+ if err != nil {
-+ t.Fatal(err.Error())
-+ }
-+
-+ cfg2 := cfg.Clone()
-+ if cfg == cfg2 {
-+ t.Errorf("Config.Clone did not create a separate config struct")
-+ }
-+
-+ if cfg2.tls.ServerName != expectedServerName {
-+ t.Errorf("cfg.tls.ServerName should be %q, got %q (host with port)", expectedServerName, cfg.tls.ServerName)
-+ }
-+
-+ cfg2.tls.ServerName = "example2.com"
-+ if cfg.tls.ServerName == cfg2.tls.ServerName {
-+ t.Errorf("changed cfg.tls.Server name should not propagate to original Config")
-+ }
-+
-+ if _, ok := cfg2.Params["foobar"]; !ok {
-+ t.Errorf("cloned Config is missing custom params")
-+ }
-+
-+ delete(cfg2.Params, "foobar")
-+
-+ if _, ok := cfg.Params["foobar"]; !ok {
-+ t.Errorf("custom params in cloned Config should not propagate to original Config")
-+ }
-+
-+ if !reflect.DeepEqual(cfg.pubKey, cfg2.pubKey) {
-+ t.Errorf("public key in Config should be identical")
-+ }
-+}
-+
-+func TestNormalizeTLSConfig(t *testing.T) {
-+ tt := []struct {
-+ tlsConfig string
-+ want *tls.Config
-+ }{
-+ {"", nil},
-+ {"false", nil},
-+ {"true", &tls.Config{ServerName: "myserver"}},
-+ {"skip-verify", &tls.Config{InsecureSkipVerify: true}},
-+ {"preferred", &tls.Config{InsecureSkipVerify: true}},
-+ {"test_tls_config", &tls.Config{ServerName: "myServerName"}},
-+ }
-+
-+ RegisterTLSConfig("test_tls_config", &tls.Config{ServerName: "myServerName"})
-+ defer func() { DeregisterTLSConfig("test_tls_config") }()
-+
-+ for _, tc := range tt {
-+ t.Run(tc.tlsConfig, func(t *testing.T) {
-+ cfg := &Config{
-+ Addr: "myserver:3306",
-+ TLSConfig: tc.tlsConfig,
-+ }
-+
-+ cfg.normalize()
-+
-+ if cfg.tls == nil {
-+ if tc.want != nil {
-+ t.Fatal("wanted a tls config but got nil instead")
-+ }
-+ return
-+ }
-+
-+ if cfg.tls.ServerName != tc.want.ServerName {
-+ t.Errorf("tls.ServerName doesn't match (want: '%s', got: '%s')",
-+ tc.want.ServerName, cfg.tls.ServerName)
-+ }
-+ if cfg.tls.InsecureSkipVerify != tc.want.InsecureSkipVerify {
-+ t.Errorf("tls.InsecureSkipVerify doesn't match (want: %T, got :%T)",
-+ tc.want.InsecureSkipVerify, cfg.tls.InsecureSkipVerify)
-+ }
-+ })
-+ }
-+}
-+
-+func BenchmarkParseDSN(b *testing.B) {
-+ b.ReportAllocs()
-+
-+ for i := 0; i < b.N; i++ {
-+ for _, tst := range testDSNs {
-+ if _, err := ParseDSN(tst.in); err != nil {
-+ b.Error(err.Error())
-+ }
-+ }
-+ }
-+}
-diff --git a/vendor/github.com/go-sql-driver/mysql/errors.go b/vendor/github.com/go-sql-driver/mysql/errors.go
-new file mode 100644
-index 00000000000..760782ff2fb
---- /dev/null
-+++ b/vendor/github.com/go-sql-driver/mysql/errors.go
-@@ -0,0 +1,65 @@
-+// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
-+//
-+// Copyright 2013 The Go-MySQL-Driver Authors. All rights reserved.
-+//
-+// This Source Code Form is subject to the terms of the Mozilla Public
-+// License, v. 2.0. If a copy of the MPL was not distributed with this file,
-+// You can obtain one at http://mozilla.org/MPL/2.0/.
-+
-+package mysql
-+
-+import (
-+ "errors"
-+ "fmt"
-+ "log"
-+ "os"
-+)
-+
-+// Various errors the driver might return. Can change between driver versions.
-+var (
-+ ErrInvalidConn = errors.New("invalid connection")
-+ ErrMalformPkt = errors.New("malformed packet")
-+ ErrNoTLS = errors.New("TLS requested but server does not support TLS")
-+ ErrCleartextPassword = errors.New("this user requires clear text authentication. If you still want to use it, please add 'allowCleartextPasswords=1' to your DSN")
-+ ErrNativePassword = errors.New("this user requires mysql native password authentication.")
-+ ErrOldPassword = errors.New("this user requires old password authentication. If you still want to use it, please add 'allowOldPasswords=1' to your DSN. See also https://github.com/go-sql-driver/mysql/wiki/old_passwords")
-+ ErrUnknownPlugin = errors.New("this authentication plugin is not supported")
-+ ErrOldProtocol = errors.New("MySQL server does not support required protocol 41+")
-+ ErrPktSync = errors.New("commands out of sync. You can't run this command now")
-+ ErrPktSyncMul = errors.New("commands out of sync. Did you run multiple statements at once?")
-+ ErrPktTooLarge = errors.New("packet for query is too large. Try adjusting the 'max_allowed_packet' variable on the server")
-+ ErrBusyBuffer = errors.New("busy buffer")
-+
-+ // errBadConnNoWrite is used for connection errors where nothing was sent to the database yet.
-+ // If this happens first in a function starting a database interaction, it should be replaced by driver.ErrBadConn
-+ // to trigger a resend.
-+ // See https://github.com/go-sql-driver/mysql/pull/302
-+ errBadConnNoWrite = errors.New("bad connection")
-+)
-+
-+var errLog = Logger(log.New(os.Stderr, "[mysql] ", log.Ldate|log.Ltime|log.Lshortfile))
-+
-+// Logger is used to log critical error messages.
-+type Logger interface {
-+ Print(v ...interface{})
-+}
-+
-+// SetLogger is used to set the logger for critical errors.
-+// The initial logger is os.Stderr.
-+func SetLogger(logger Logger) error {
-+ if logger == nil {
-+ return errors.New("logger is nil")
-+ }
-+ errLog = logger
-+ return nil
-+}
-+
-+// MySQLError is an error type which represents a single MySQL error
-+type MySQLError struct {
-+ Number uint16
-+ Message string
-+}
-+
-+func (me *MySQLError) Error() string {
-+ return fmt.Sprintf("Error %d: %s", me.Number, me.Message)
-+}
-diff --git a/vendor/github.com/go-sql-driver/mysql/errors_test.go b/vendor/github.com/go-sql-driver/mysql/errors_test.go
-new file mode 100644
-index 00000000000..96f9126d679
---- /dev/null
-+++ b/vendor/github.com/go-sql-driver/mysql/errors_test.go
-@@ -0,0 +1,42 @@
-+// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
-+//
-+// Copyright 2013 The Go-MySQL-Driver Authors. All rights reserved.
-+//
-+// This Source Code Form is subject to the terms of the Mozilla Public
-+// License, v. 2.0. If a copy of the MPL was not distributed with this file,
-+// You can obtain one at http://mozilla.org/MPL/2.0/.
-+
-+package mysql
-+
-+import (
-+ "bytes"
-+ "log"
-+ "testing"
-+)
-+
-+func TestErrorsSetLogger(t *testing.T) {
-+ previous := errLog
-+ defer func() {
-+ errLog = previous
-+ }()
-+
-+ // set up logger
-+ const expected = "prefix: test\n"
-+ buffer := bytes.NewBuffer(make([]byte, 0, 64))
-+ logger := log.New(buffer, "prefix: ", 0)
-+
-+ // print
-+ SetLogger(logger)
-+ errLog.Print("test")
-+
-+ // check result
-+ if actual := buffer.String(); actual != expected {
-+ t.Errorf("expected %q, got %q", expected, actual)
-+ }
-+}
-+
-+func TestErrorsStrictIgnoreNotes(t *testing.T) {
-+ runTests(t, dsn+"&sql_notes=false", func(dbt *DBTest) {
-+ dbt.mustExec("DROP TABLE IF EXISTS does_not_exist")
-+ })
-+}
-diff --git a/vendor/github.com/go-sql-driver/mysql/fields.go b/vendor/github.com/go-sql-driver/mysql/fields.go
-new file mode 100644
-index 00000000000..e1e2ece4b16
---- /dev/null
-+++ b/vendor/github.com/go-sql-driver/mysql/fields.go
-@@ -0,0 +1,194 @@
-+// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
-+//
-+// Copyright 2017 The Go-MySQL-Driver Authors. All rights reserved.
-+//
-+// This Source Code Form is subject to the terms of the Mozilla Public
-+// License, v. 2.0. If a copy of the MPL was not distributed with this file,
-+// You can obtain one at http://mozilla.org/MPL/2.0/.
-+
-+package mysql
-+
-+import (
-+ "database/sql"
-+ "reflect"
-+)
-+
-+func (mf *mysqlField) typeDatabaseName() string {
-+ switch mf.fieldType {
-+ case fieldTypeBit:
-+ return "BIT"
-+ case fieldTypeBLOB:
-+ if mf.charSet != collations[binaryCollation] {
-+ return "TEXT"
-+ }
-+ return "BLOB"
-+ case fieldTypeDate:
-+ return "DATE"
-+ case fieldTypeDateTime:
-+ return "DATETIME"
-+ case fieldTypeDecimal:
-+ return "DECIMAL"
-+ case fieldTypeDouble:
-+ return "DOUBLE"
-+ case fieldTypeEnum:
-+ return "ENUM"
-+ case fieldTypeFloat:
-+ return "FLOAT"
-+ case fieldTypeGeometry:
-+ return "GEOMETRY"
-+ case fieldTypeInt24:
-+ return "MEDIUMINT"
-+ case fieldTypeJSON:
-+ return "JSON"
-+ case fieldTypeLong:
-+ return "INT"
-+ case fieldTypeLongBLOB:
-+ if mf.charSet != collations[binaryCollation] {
-+ return "LONGTEXT"
-+ }
-+ return "LONGBLOB"
-+ case fieldTypeLongLong:
-+ return "BIGINT"
-+ case fieldTypeMediumBLOB:
-+ if mf.charSet != collations[binaryCollation] {
-+ return "MEDIUMTEXT"
-+ }
-+ return "MEDIUMBLOB"
-+ case fieldTypeNewDate:
-+ return "DATE"
-+ case fieldTypeNewDecimal:
-+ return "DECIMAL"
-+ case fieldTypeNULL:
-+ return "NULL"
-+ case fieldTypeSet:
-+ return "SET"
-+ case fieldTypeShort:
-+ return "SMALLINT"
-+ case fieldTypeString:
-+ if mf.charSet == collations[binaryCollation] {
-+ return "BINARY"
-+ }
-+ return "CHAR"
-+ case fieldTypeTime:
-+ return "TIME"
-+ case fieldTypeTimestamp:
-+ return "TIMESTAMP"
-+ case fieldTypeTiny:
-+ return "TINYINT"
-+ case fieldTypeTinyBLOB:
-+ if mf.charSet != collations[binaryCollation] {
-+ return "TINYTEXT"
-+ }
-+ return "TINYBLOB"
-+ case fieldTypeVarChar:
-+ if mf.charSet == collations[binaryCollation] {
-+ return "VARBINARY"
-+ }
-+ return "VARCHAR"
-+ case fieldTypeVarString:
-+ if mf.charSet == collations[binaryCollation] {
-+ return "VARBINARY"
-+ }
-+ return "VARCHAR"
-+ case fieldTypeYear:
-+ return "YEAR"
-+ default:
-+ return ""
-+ }
-+}
-+
-+var (
-+ scanTypeFloat32 = reflect.TypeOf(float32(0))
-+ scanTypeFloat64 = reflect.TypeOf(float64(0))
-+ scanTypeInt8 = reflect.TypeOf(int8(0))
-+ scanTypeInt16 = reflect.TypeOf(int16(0))
-+ scanTypeInt32 = reflect.TypeOf(int32(0))
-+ scanTypeInt64 = reflect.TypeOf(int64(0))
-+ scanTypeNullFloat = reflect.TypeOf(sql.NullFloat64{})
-+ scanTypeNullInt = reflect.TypeOf(sql.NullInt64{})
-+ scanTypeNullTime = reflect.TypeOf(NullTime{})
-+ scanTypeUint8 = reflect.TypeOf(uint8(0))
-+ scanTypeUint16 = reflect.TypeOf(uint16(0))
-+ scanTypeUint32 = reflect.TypeOf(uint32(0))
-+ scanTypeUint64 = reflect.TypeOf(uint64(0))
-+ scanTypeRawBytes = reflect.TypeOf(sql.RawBytes{})
-+ scanTypeUnknown = reflect.TypeOf(new(interface{}))
-+)
-+
-+type mysqlField struct {
-+ tableName string
-+ name string
-+ length uint32
-+ flags fieldFlag
-+ fieldType fieldType
-+ decimals byte
-+ charSet uint8
-+}
-+
-+func (mf *mysqlField) scanType() reflect.Type {
-+ switch mf.fieldType {
-+ case fieldTypeTiny:
-+ if mf.flags&flagNotNULL != 0 {
-+ if mf.flags&flagUnsigned != 0 {
-+ return scanTypeUint8
-+ }
-+ return scanTypeInt8
-+ }
-+ return scanTypeNullInt
-+
-+ case fieldTypeShort, fieldTypeYear:
-+ if mf.flags&flagNotNULL != 0 {
-+ if mf.flags&flagUnsigned != 0 {
-+ return scanTypeUint16
-+ }
-+ return scanTypeInt16
-+ }
-+ return scanTypeNullInt
-+
-+ case fieldTypeInt24, fieldTypeLong:
-+ if mf.flags&flagNotNULL != 0 {
-+ if mf.flags&flagUnsigned != 0 {
-+ return scanTypeUint32
-+ }
-+ return scanTypeInt32
-+ }
-+ return scanTypeNullInt
-+
-+ case fieldTypeLongLong:
-+ if mf.flags&flagNotNULL != 0 {
-+ if mf.flags&flagUnsigned != 0 {
-+ return scanTypeUint64
-+ }
-+ return scanTypeInt64
-+ }
-+ return scanTypeNullInt
-+
-+ case fieldTypeFloat:
-+ if mf.flags&flagNotNULL != 0 {
-+ return scanTypeFloat32
-+ }
-+ return scanTypeNullFloat
-+
-+ case fieldTypeDouble:
-+ if mf.flags&flagNotNULL != 0 {
-+ return scanTypeFloat64
-+ }
-+ return scanTypeNullFloat
-+
-+ case fieldTypeDecimal, fieldTypeNewDecimal, fieldTypeVarChar,
-+ fieldTypeBit, fieldTypeEnum, fieldTypeSet, fieldTypeTinyBLOB,
-+ fieldTypeMediumBLOB, fieldTypeLongBLOB, fieldTypeBLOB,
-+ fieldTypeVarString, fieldTypeString, fieldTypeGeometry, fieldTypeJSON,
-+ fieldTypeTime:
-+ return scanTypeRawBytes
-+
-+ case fieldTypeDate, fieldTypeNewDate,
-+ fieldTypeTimestamp, fieldTypeDateTime:
-+ // NullTime is always returned for more consistent behavior as it can
-+ // handle both cases of parseTime regardless if the field is nullable.
-+ return scanTypeNullTime
-+
-+ default:
-+ return scanTypeUnknown
-+ }
-+}
-diff --git a/vendor/github.com/go-sql-driver/mysql/go.mod b/vendor/github.com/go-sql-driver/mysql/go.mod
-new file mode 100644
-index 00000000000..29e5d800d48
---- /dev/null
-+++ b/vendor/github.com/go-sql-driver/mysql/go.mod
-@@ -0,0 +1,3 @@
-+module github.com/go-sql-driver/mysql
-+
-+go 1.9
-diff --git a/vendor/github.com/go-sql-driver/mysql/infile.go b/vendor/github.com/go-sql-driver/mysql/infile.go
-new file mode 100644
-index 00000000000..273cb0ba500
---- /dev/null
-+++ b/vendor/github.com/go-sql-driver/mysql/infile.go
-@@ -0,0 +1,182 @@
-+// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
-+//
-+// Copyright 2013 The Go-MySQL-Driver Authors. All rights reserved.
-+//
-+// This Source Code Form is subject to the terms of the Mozilla Public
-+// License, v. 2.0. If a copy of the MPL was not distributed with this file,
-+// You can obtain one at http://mozilla.org/MPL/2.0/.
-+
-+package mysql
-+
-+import (
-+ "fmt"
-+ "io"
-+ "os"
-+ "strings"
-+ "sync"
-+)
-+
-+var (
-+ fileRegister map[string]bool
-+ fileRegisterLock sync.RWMutex
-+ readerRegister map[string]func() io.Reader
-+ readerRegisterLock sync.RWMutex
-+)
-+
-+// RegisterLocalFile adds the given file to the file whitelist,
-+// so that it can be used by "LOAD DATA LOCAL INFILE ".
-+// Alternatively you can allow the use of all local files with
-+// the DSN parameter 'allowAllFiles=true'
-+//
-+// filePath := "/home/gopher/data.csv"
-+// mysql.RegisterLocalFile(filePath)
-+// err := db.Exec("LOAD DATA LOCAL INFILE '" + filePath + "' INTO TABLE foo")
-+// if err != nil {
-+// ...
-+//
-+func RegisterLocalFile(filePath string) {
-+ fileRegisterLock.Lock()
-+ // lazy map init
-+ if fileRegister == nil {
-+ fileRegister = make(map[string]bool)
-+ }
-+
-+ fileRegister[strings.Trim(filePath, `"`)] = true
-+ fileRegisterLock.Unlock()
-+}
-+
-+// DeregisterLocalFile removes the given filepath from the whitelist.
-+func DeregisterLocalFile(filePath string) {
-+ fileRegisterLock.Lock()
-+ delete(fileRegister, strings.Trim(filePath, `"`))
-+ fileRegisterLock.Unlock()
-+}
-+
-+// RegisterReaderHandler registers a handler function which is used
-+// to receive a io.Reader.
-+// The Reader can be used by "LOAD DATA LOCAL INFILE Reader::".
-+// If the handler returns a io.ReadCloser Close() is called when the
-+// request is finished.
-+//
-+// mysql.RegisterReaderHandler("data", func() io.Reader {
-+// var csvReader io.Reader // Some Reader that returns CSV data
-+// ... // Open Reader here
-+// return csvReader
-+// })
-+// err := db.Exec("LOAD DATA LOCAL INFILE 'Reader::data' INTO TABLE foo")
-+// if err != nil {
-+// ...
-+//
-+func RegisterReaderHandler(name string, handler func() io.Reader) {
-+ readerRegisterLock.Lock()
-+ // lazy map init
-+ if readerRegister == nil {
-+ readerRegister = make(map[string]func() io.Reader)
-+ }
-+
-+ readerRegister[name] = handler
-+ readerRegisterLock.Unlock()
-+}
-+
-+// DeregisterReaderHandler removes the ReaderHandler function with
-+// the given name from the registry.
-+func DeregisterReaderHandler(name string) {
-+ readerRegisterLock.Lock()
-+ delete(readerRegister, name)
-+ readerRegisterLock.Unlock()
-+}
-+
-+func deferredClose(err *error, closer io.Closer) {
-+ closeErr := closer.Close()
-+ if *err == nil {
-+ *err = closeErr
-+ }
-+}
-+
-+func (mc *mysqlConn) handleInFileRequest(name string) (err error) {
-+ var rdr io.Reader
-+ var data []byte
-+ packetSize := 16 * 1024 // 16KB is small enough for disk readahead and large enough for TCP
-+ if mc.maxWriteSize < packetSize {
-+ packetSize = mc.maxWriteSize
-+ }
-+
-+ if idx := strings.Index(name, "Reader::"); idx == 0 || (idx > 0 && name[idx-1] == '/') { // io.Reader
-+ // The server might return an an absolute path. See issue #355.
-+ name = name[idx+8:]
-+
-+ readerRegisterLock.RLock()
-+ handler, inMap := readerRegister[name]
-+ readerRegisterLock.RUnlock()
-+
-+ if inMap {
-+ rdr = handler()
-+ if rdr != nil {
-+ if cl, ok := rdr.(io.Closer); ok {
-+ defer deferredClose(&err, cl)
-+ }
-+ } else {
-+ err = fmt.Errorf("Reader '%s' is ", name)
-+ }
-+ } else {
-+ err = fmt.Errorf("Reader '%s' is not registered", name)
-+ }
-+ } else { // File
-+ name = strings.Trim(name, `"`)
-+ fileRegisterLock.RLock()
-+ fr := fileRegister[name]
-+ fileRegisterLock.RUnlock()
-+ if mc.cfg.AllowAllFiles || fr {
-+ var file *os.File
-+ var fi os.FileInfo
-+
-+ if file, err = os.Open(name); err == nil {
-+ defer deferredClose(&err, file)
-+
-+ // get file size
-+ if fi, err = file.Stat(); err == nil {
-+ rdr = file
-+ if fileSize := int(fi.Size()); fileSize < packetSize {
-+ packetSize = fileSize
-+ }
-+ }
-+ }
-+ } else {
-+ err = fmt.Errorf("local file '%s' is not registered", name)
-+ }
-+ }
-+
-+ // send content packets
-+ // if packetSize == 0, the Reader contains no data
-+ if err == nil && packetSize > 0 {
-+ data := make([]byte, 4+packetSize)
-+ var n int
-+ for err == nil {
-+ n, err = rdr.Read(data[4:])
-+ if n > 0 {
-+ if ioErr := mc.writePacket(data[:4+n]); ioErr != nil {
-+ return ioErr
-+ }
-+ }
-+ }
-+ if err == io.EOF {
-+ err = nil
-+ }
-+ }
-+
-+ // send empty packet (termination)
-+ if data == nil {
-+ data = make([]byte, 4)
-+ }
-+ if ioErr := mc.writePacket(data[:4]); ioErr != nil {
-+ return ioErr
-+ }
-+
-+ // read OK packet
-+ if err == nil {
-+ return mc.readResultOK()
-+ }
-+
-+ mc.readPacket()
-+ return err
-+}
-diff --git a/vendor/github.com/go-sql-driver/mysql/nulltime.go b/vendor/github.com/go-sql-driver/mysql/nulltime.go
-new file mode 100644
-index 00000000000..afa8a89e9ae
---- /dev/null
-+++ b/vendor/github.com/go-sql-driver/mysql/nulltime.go
-@@ -0,0 +1,50 @@
-+// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
-+//
-+// Copyright 2013 The Go-MySQL-Driver Authors. All rights reserved.
-+//
-+// This Source Code Form is subject to the terms of the Mozilla Public
-+// License, v. 2.0. If a copy of the MPL was not distributed with this file,
-+// You can obtain one at http://mozilla.org/MPL/2.0/.
-+
-+package mysql
-+
-+import (
-+ "database/sql/driver"
-+ "fmt"
-+ "time"
-+)
-+
-+// Scan implements the Scanner interface.
-+// The value type must be time.Time or string / []byte (formatted time-string),
-+// otherwise Scan fails.
-+func (nt *NullTime) Scan(value interface{}) (err error) {
-+ if value == nil {
-+ nt.Time, nt.Valid = time.Time{}, false
-+ return
-+ }
-+
-+ switch v := value.(type) {
-+ case time.Time:
-+ nt.Time, nt.Valid = v, true
-+ return
-+ case []byte:
-+ nt.Time, err = parseDateTime(string(v), time.UTC)
-+ nt.Valid = (err == nil)
-+ return
-+ case string:
-+ nt.Time, err = parseDateTime(v, time.UTC)
-+ nt.Valid = (err == nil)
-+ return
-+ }
-+
-+ nt.Valid = false
-+ return fmt.Errorf("Can't convert %T to time.Time", value)
-+}
-+
-+// Value implements the driver Valuer interface.
-+func (nt NullTime) Value() (driver.Value, error) {
-+ if !nt.Valid {
-+ return nil, nil
-+ }
-+ return nt.Time, nil
-+}
-diff --git a/vendor/github.com/go-sql-driver/mysql/nulltime_go113.go b/vendor/github.com/go-sql-driver/mysql/nulltime_go113.go
-new file mode 100644
-index 00000000000..c392594dd44
---- /dev/null
-+++ b/vendor/github.com/go-sql-driver/mysql/nulltime_go113.go
-@@ -0,0 +1,31 @@
-+// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
-+//
-+// Copyright 2013 The Go-MySQL-Driver Authors. All rights reserved.
-+//
-+// This Source Code Form is subject to the terms of the Mozilla Public
-+// License, v. 2.0. If a copy of the MPL was not distributed with this file,
-+// You can obtain one at http://mozilla.org/MPL/2.0/.
-+
-+// +build go1.13
-+
-+package mysql
-+
-+import (
-+ "database/sql"
-+)
-+
-+// NullTime represents a time.Time that may be NULL.
-+// NullTime implements the Scanner interface so
-+// it can be used as a scan destination:
-+//
-+// var nt NullTime
-+// err := db.QueryRow("SELECT time FROM foo WHERE id=?", id).Scan(&nt)
-+// ...
-+// if nt.Valid {
-+// // use nt.Time
-+// } else {
-+// // NULL value
-+// }
-+//
-+// This NullTime implementation is not driver-specific
-+type NullTime sql.NullTime
-diff --git a/vendor/github.com/go-sql-driver/mysql/nulltime_legacy.go b/vendor/github.com/go-sql-driver/mysql/nulltime_legacy.go
-new file mode 100644
-index 00000000000..86d159d4416
---- /dev/null
-+++ b/vendor/github.com/go-sql-driver/mysql/nulltime_legacy.go
-@@ -0,0 +1,34 @@
-+// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
-+//
-+// Copyright 2013 The Go-MySQL-Driver Authors. All rights reserved.
-+//
-+// This Source Code Form is subject to the terms of the Mozilla Public
-+// License, v. 2.0. If a copy of the MPL was not distributed with this file,
-+// You can obtain one at http://mozilla.org/MPL/2.0/.
-+
-+// +build !go1.13
-+
-+package mysql
-+
-+import (
-+ "time"
-+)
-+
-+// NullTime represents a time.Time that may be NULL.
-+// NullTime implements the Scanner interface so
-+// it can be used as a scan destination:
-+//
-+// var nt NullTime
-+// err := db.QueryRow("SELECT time FROM foo WHERE id=?", id).Scan(&nt)
-+// ...
-+// if nt.Valid {
-+// // use nt.Time
-+// } else {
-+// // NULL value
-+// }
-+//
-+// This NullTime implementation is not driver-specific
-+type NullTime struct {
-+ Time time.Time
-+ Valid bool // Valid is true if Time is not NULL
-+}
-diff --git a/vendor/github.com/go-sql-driver/mysql/nulltime_test.go b/vendor/github.com/go-sql-driver/mysql/nulltime_test.go
-new file mode 100644
-index 00000000000..a14ec06072d
---- /dev/null
-+++ b/vendor/github.com/go-sql-driver/mysql/nulltime_test.go
-@@ -0,0 +1,62 @@
-+// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
-+//
-+// Copyright 2013 The Go-MySQL-Driver Authors. All rights reserved.
-+//
-+// This Source Code Form is subject to the terms of the Mozilla Public
-+// License, v. 2.0. If a copy of the MPL was not distributed with this file,
-+// You can obtain one at http://mozilla.org/MPL/2.0/.
-+
-+package mysql
-+
-+import (
-+ "database/sql"
-+ "database/sql/driver"
-+ "testing"
-+ "time"
-+)
-+
-+var (
-+ // Check implementation of interfaces
-+ _ driver.Valuer = NullTime{}
-+ _ sql.Scanner = (*NullTime)(nil)
-+)
-+
-+func TestScanNullTime(t *testing.T) {
-+ var scanTests = []struct {
-+ in interface{}
-+ error bool
-+ valid bool
-+ time time.Time
-+ }{
-+ {tDate, false, true, tDate},
-+ {sDate, false, true, tDate},
-+ {[]byte(sDate), false, true, tDate},
-+ {tDateTime, false, true, tDateTime},
-+ {sDateTime, false, true, tDateTime},
-+ {[]byte(sDateTime), false, true, tDateTime},
-+ {tDate0, false, true, tDate0},
-+ {sDate0, false, true, tDate0},
-+ {[]byte(sDate0), false, true, tDate0},
-+ {sDateTime0, false, true, tDate0},
-+ {[]byte(sDateTime0), false, true, tDate0},
-+ {"", true, false, tDate0},
-+ {"1234", true, false, tDate0},
-+ {0, true, false, tDate0},
-+ }
-+
-+ var nt = NullTime{}
-+ var err error
-+
-+ for _, tst := range scanTests {
-+ err = nt.Scan(tst.in)
-+ if (err != nil) != tst.error {
-+ t.Errorf("%v: expected error status %t, got %t", tst.in, tst.error, (err != nil))
-+ }
-+ if nt.Valid != tst.valid {
-+ t.Errorf("%v: expected valid status %t, got %t", tst.in, tst.valid, nt.Valid)
-+ }
-+ if nt.Time != tst.time {
-+ t.Errorf("%v: expected time %v, got %v", tst.in, tst.time, nt.Time)
-+ }
-+ }
-+}
-diff --git a/vendor/github.com/go-sql-driver/mysql/packets.go b/vendor/github.com/go-sql-driver/mysql/packets.go
-new file mode 100644
-index 00000000000..30b3352c273
---- /dev/null
-+++ b/vendor/github.com/go-sql-driver/mysql/packets.go
-@@ -0,0 +1,1342 @@
-+// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
-+//
-+// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved.
-+//
-+// This Source Code Form is subject to the terms of the Mozilla Public
-+// License, v. 2.0. If a copy of the MPL was not distributed with this file,
-+// You can obtain one at http://mozilla.org/MPL/2.0/.
-+
-+package mysql
-+
-+import (
-+ "bytes"
-+ "crypto/tls"
-+ "database/sql/driver"
-+ "encoding/binary"
-+ "errors"
-+ "fmt"
-+ "io"
-+ "math"
-+ "time"
-+)
-+
-+// Packets documentation:
-+// http://dev.mysql.com/doc/internals/en/client-server-protocol.html
-+
-+// Read packet to buffer 'data'
-+func (mc *mysqlConn) readPacket() ([]byte, error) {
-+ var prevData []byte
-+ for {
-+ // read packet header
-+ data, err := mc.buf.readNext(4)
-+ if err != nil {
-+ if cerr := mc.canceled.Value(); cerr != nil {
-+ return nil, cerr
-+ }
-+ errLog.Print(err)
-+ mc.Close()
-+ return nil, ErrInvalidConn
-+ }
-+
-+ // packet length [24 bit]
-+ pktLen := int(uint32(data[0]) | uint32(data[1])<<8 | uint32(data[2])<<16)
-+
-+ // check packet sync [8 bit]
-+ if data[3] != mc.sequence {
-+ if data[3] > mc.sequence {
-+ return nil, ErrPktSyncMul
-+ }
-+ return nil, ErrPktSync
-+ }
-+ mc.sequence++
-+
-+ // packets with length 0 terminate a previous packet which is a
-+ // multiple of (2^24)-1 bytes long
-+ if pktLen == 0 {
-+ // there was no previous packet
-+ if prevData == nil {
-+ errLog.Print(ErrMalformPkt)
-+ mc.Close()
-+ return nil, ErrInvalidConn
-+ }
-+
-+ return prevData, nil
-+ }
-+
-+ // read packet body [pktLen bytes]
-+ data, err = mc.buf.readNext(pktLen)
-+ if err != nil {
-+ if cerr := mc.canceled.Value(); cerr != nil {
-+ return nil, cerr
-+ }
-+ errLog.Print(err)
-+ mc.Close()
-+ return nil, ErrInvalidConn
-+ }
-+
-+ // return data if this was the last packet
-+ if pktLen < maxPacketSize {
-+ // zero allocations for non-split packets
-+ if prevData == nil {
-+ return data, nil
-+ }
-+
-+ return append(prevData, data...), nil
-+ }
-+
-+ prevData = append(prevData, data...)
-+ }
-+}
-+
-+// Write packet buffer 'data'
-+func (mc *mysqlConn) writePacket(data []byte) error {
-+ pktLen := len(data) - 4
-+
-+ if pktLen > mc.maxAllowedPacket {
-+ return ErrPktTooLarge
-+ }
-+
-+ // Perform a stale connection check. We only perform this check for
-+ // the first query on a connection that has been checked out of the
-+ // connection pool: a fresh connection from the pool is more likely
-+ // to be stale, and it has not performed any previous writes that
-+ // could cause data corruption, so it's safe to return ErrBadConn
-+ // if the check fails.
-+ if mc.reset {
-+ mc.reset = false
-+ conn := mc.netConn
-+ if mc.rawConn != nil {
-+ conn = mc.rawConn
-+ }
-+ var err error
-+ // If this connection has a ReadTimeout which we've been setting on
-+ // reads, reset it to its default value before we attempt a non-blocking
-+ // read, otherwise the scheduler will just time us out before we can read
-+ if mc.cfg.ReadTimeout != 0 {
-+ err = conn.SetReadDeadline(time.Time{})
-+ }
-+ if err == nil {
-+ err = connCheck(conn)
-+ }
-+ if err != nil {
-+ errLog.Print("closing bad idle connection: ", err)
-+ mc.Close()
-+ return driver.ErrBadConn
-+ }
-+ }
-+
-+ for {
-+ var size int
-+ if pktLen >= maxPacketSize {
-+ data[0] = 0xff
-+ data[1] = 0xff
-+ data[2] = 0xff
-+ size = maxPacketSize
-+ } else {
-+ data[0] = byte(pktLen)
-+ data[1] = byte(pktLen >> 8)
-+ data[2] = byte(pktLen >> 16)
-+ size = pktLen
-+ }
-+ data[3] = mc.sequence
-+
-+ // Write packet
-+ if mc.writeTimeout > 0 {
-+ if err := mc.netConn.SetWriteDeadline(time.Now().Add(mc.writeTimeout)); err != nil {
-+ return err
-+ }
-+ }
-+
-+ n, err := mc.netConn.Write(data[:4+size])
-+ if err == nil && n == 4+size {
-+ mc.sequence++
-+ if size != maxPacketSize {
-+ return nil
-+ }
-+ pktLen -= size
-+ data = data[size:]
-+ continue
-+ }
-+
-+ // Handle error
-+ if err == nil { // n != len(data)
-+ mc.cleanup()
-+ errLog.Print(ErrMalformPkt)
-+ } else {
-+ if cerr := mc.canceled.Value(); cerr != nil {
-+ return cerr
-+ }
-+ if n == 0 && pktLen == len(data)-4 {
-+ // only for the first loop iteration when nothing was written yet
-+ return errBadConnNoWrite
-+ }
-+ mc.cleanup()
-+ errLog.Print(err)
-+ }
-+ return ErrInvalidConn
-+ }
-+}
-+
-+/******************************************************************************
-+* Initialization Process *
-+******************************************************************************/
-+
-+// Handshake Initialization Packet
-+// http://dev.mysql.com/doc/internals/en/connection-phase-packets.html#packet-Protocol::Handshake
-+func (mc *mysqlConn) readHandshakePacket() (data []byte, plugin string, err error) {
-+ data, err = mc.readPacket()
-+ if err != nil {
-+ // for init we can rewrite this to ErrBadConn for sql.Driver to retry, since
-+ // in connection initialization we don't risk retrying non-idempotent actions.
-+ if err == ErrInvalidConn {
-+ return nil, "", driver.ErrBadConn
-+ }
-+ return
-+ }
-+
-+ if data[0] == iERR {
-+ return nil, "", mc.handleErrorPacket(data)
-+ }
-+
-+ // protocol version [1 byte]
-+ if data[0] < minProtocolVersion {
-+ return nil, "", fmt.Errorf(
-+ "unsupported protocol version %d. Version %d or higher is required",
-+ data[0],
-+ minProtocolVersion,
-+ )
-+ }
-+
-+ // server version [null terminated string]
-+ // connection id [4 bytes]
-+ pos := 1 + bytes.IndexByte(data[1:], 0x00) + 1 + 4
-+
-+ // first part of the password cipher [8 bytes]
-+ authData := data[pos : pos+8]
-+
-+ // (filler) always 0x00 [1 byte]
-+ pos += 8 + 1
-+
-+ // capability flags (lower 2 bytes) [2 bytes]
-+ mc.flags = clientFlag(binary.LittleEndian.Uint16(data[pos : pos+2]))
-+ if mc.flags&clientProtocol41 == 0 {
-+ return nil, "", ErrOldProtocol
-+ }
-+ if mc.flags&clientSSL == 0 && mc.cfg.tls != nil {
-+ if mc.cfg.TLSConfig == "preferred" {
-+ mc.cfg.tls = nil
-+ } else {
-+ return nil, "", ErrNoTLS
-+ }
-+ }
-+ pos += 2
-+
-+ if len(data) > pos {
-+ // character set [1 byte]
-+ // status flags [2 bytes]
-+ // capability flags (upper 2 bytes) [2 bytes]
-+ // length of auth-plugin-data [1 byte]
-+ // reserved (all [00]) [10 bytes]
-+ pos += 1 + 2 + 2 + 1 + 10
-+
-+ // second part of the password cipher [mininum 13 bytes],
-+ // where len=MAX(13, length of auth-plugin-data - 8)
-+ //
-+ // The web documentation is ambiguous about the length. However,
-+ // according to mysql-5.7/sql/auth/sql_authentication.cc line 538,
-+ // the 13th byte is "\0 byte, terminating the second part of
-+ // a scramble". So the second part of the password cipher is
-+ // a NULL terminated string that's at least 13 bytes with the
-+ // last byte being NULL.
-+ //
-+ // The official Python library uses the fixed length 12
-+ // which seems to work but technically could have a hidden bug.
-+ authData = append(authData, data[pos:pos+12]...)
-+ pos += 13
-+
-+ // EOF if version (>= 5.5.7 and < 5.5.10) or (>= 5.6.0 and < 5.6.2)
-+ // \NUL otherwise
-+ if end := bytes.IndexByte(data[pos:], 0x00); end != -1 {
-+ plugin = string(data[pos : pos+end])
-+ } else {
-+ plugin = string(data[pos:])
-+ }
-+
-+ // make a memory safe copy of the cipher slice
-+ var b [20]byte
-+ copy(b[:], authData)
-+ return b[:], plugin, nil
-+ }
-+
-+ // make a memory safe copy of the cipher slice
-+ var b [8]byte
-+ copy(b[:], authData)
-+ return b[:], plugin, nil
-+}
-+
-+// Client Authentication Packet
-+// http://dev.mysql.com/doc/internals/en/connection-phase-packets.html#packet-Protocol::HandshakeResponse
-+func (mc *mysqlConn) writeHandshakeResponsePacket(authResp []byte, plugin string) error {
-+ // Adjust client flags based on server support
-+ clientFlags := clientProtocol41 |
-+ clientSecureConn |
-+ clientLongPassword |
-+ clientTransactions |
-+ clientLocalFiles |
-+ clientPluginAuth |
-+ clientMultiResults |
-+ mc.flags&clientLongFlag
-+
-+ if mc.cfg.ClientFoundRows {
-+ clientFlags |= clientFoundRows
-+ }
-+
-+ // To enable TLS / SSL
-+ if mc.cfg.tls != nil {
-+ clientFlags |= clientSSL
-+ }
-+
-+ if mc.cfg.MultiStatements {
-+ clientFlags |= clientMultiStatements
-+ }
-+
-+ // encode length of the auth plugin data
-+ var authRespLEIBuf [9]byte
-+ authRespLen := len(authResp)
-+ authRespLEI := appendLengthEncodedInteger(authRespLEIBuf[:0], uint64(authRespLen))
-+ if len(authRespLEI) > 1 {
-+ // if the length can not be written in 1 byte, it must be written as a
-+ // length encoded integer
-+ clientFlags |= clientPluginAuthLenEncClientData
-+ }
-+
-+ pktLen := 4 + 4 + 1 + 23 + len(mc.cfg.User) + 1 + len(authRespLEI) + len(authResp) + 21 + 1
-+
-+ // To specify a db name
-+ if n := len(mc.cfg.DBName); n > 0 {
-+ clientFlags |= clientConnectWithDB
-+ pktLen += n + 1
-+ }
-+
-+ // Calculate packet length and get buffer with that size
-+ data, err := mc.buf.takeSmallBuffer(pktLen + 4)
-+ if err != nil {
-+ // cannot take the buffer. Something must be wrong with the connection
-+ errLog.Print(err)
-+ return errBadConnNoWrite
-+ }
-+
-+ // ClientFlags [32 bit]
-+ data[4] = byte(clientFlags)
-+ data[5] = byte(clientFlags >> 8)
-+ data[6] = byte(clientFlags >> 16)
-+ data[7] = byte(clientFlags >> 24)
-+
-+ // MaxPacketSize [32 bit] (none)
-+ data[8] = 0x00
-+ data[9] = 0x00
-+ data[10] = 0x00
-+ data[11] = 0x00
-+
-+ // Charset [1 byte]
-+ var found bool
-+ data[12], found = collations[mc.cfg.Collation]
-+ if !found {
-+ // Note possibility for false negatives:
-+ // could be triggered although the collation is valid if the
-+ // collations map does not contain entries the server supports.
-+ return errors.New("unknown collation")
-+ }
-+
-+ // SSL Connection Request Packet
-+ // http://dev.mysql.com/doc/internals/en/connection-phase-packets.html#packet-Protocol::SSLRequest
-+ if mc.cfg.tls != nil {
-+ // Send TLS / SSL request packet
-+ if err := mc.writePacket(data[:(4+4+1+23)+4]); err != nil {
-+ return err
-+ }
-+
-+ // Switch to TLS
-+ tlsConn := tls.Client(mc.netConn, mc.cfg.tls)
-+ if err := tlsConn.Handshake(); err != nil {
-+ return err
-+ }
-+ mc.rawConn = mc.netConn
-+ mc.netConn = tlsConn
-+ mc.buf.nc = tlsConn
-+ }
-+
-+ // Filler [23 bytes] (all 0x00)
-+ pos := 13
-+ for ; pos < 13+23; pos++ {
-+ data[pos] = 0
-+ }
-+
-+ // User [null terminated string]
-+ if len(mc.cfg.User) > 0 {
-+ pos += copy(data[pos:], mc.cfg.User)
-+ }
-+ data[pos] = 0x00
-+ pos++
-+
-+ // Auth Data [length encoded integer]
-+ pos += copy(data[pos:], authRespLEI)
-+ pos += copy(data[pos:], authResp)
-+
-+ // Databasename [null terminated string]
-+ if len(mc.cfg.DBName) > 0 {
-+ pos += copy(data[pos:], mc.cfg.DBName)
-+ data[pos] = 0x00
-+ pos++
-+ }
-+
-+ pos += copy(data[pos:], plugin)
-+ data[pos] = 0x00
-+ pos++
-+
-+ // Send Auth packet
-+ return mc.writePacket(data[:pos])
-+}
-+
-+// http://dev.mysql.com/doc/internals/en/connection-phase-packets.html#packet-Protocol::AuthSwitchResponse
-+func (mc *mysqlConn) writeAuthSwitchPacket(authData []byte) error {
-+ pktLen := 4 + len(authData)
-+ data, err := mc.buf.takeSmallBuffer(pktLen)
-+ if err != nil {
-+ // cannot take the buffer. Something must be wrong with the connection
-+ errLog.Print(err)
-+ return errBadConnNoWrite
-+ }
-+
-+ // Add the auth data [EOF]
-+ copy(data[4:], authData)
-+ return mc.writePacket(data)
-+}
-+
-+/******************************************************************************
-+* Command Packets *
-+******************************************************************************/
-+
-+func (mc *mysqlConn) writeCommandPacket(command byte) error {
-+ // Reset Packet Sequence
-+ mc.sequence = 0
-+
-+ data, err := mc.buf.takeSmallBuffer(4 + 1)
-+ if err != nil {
-+ // cannot take the buffer. Something must be wrong with the connection
-+ errLog.Print(err)
-+ return errBadConnNoWrite
-+ }
-+
-+ // Add command byte
-+ data[4] = command
-+
-+ // Send CMD packet
-+ return mc.writePacket(data)
-+}
-+
-+func (mc *mysqlConn) writeCommandPacketStr(command byte, arg string) error {
-+ // Reset Packet Sequence
-+ mc.sequence = 0
-+
-+ pktLen := 1 + len(arg)
-+ data, err := mc.buf.takeBuffer(pktLen + 4)
-+ if err != nil {
-+ // cannot take the buffer. Something must be wrong with the connection
-+ errLog.Print(err)
-+ return errBadConnNoWrite
-+ }
-+
-+ // Add command byte
-+ data[4] = command
-+
-+ // Add arg
-+ copy(data[5:], arg)
-+
-+ // Send CMD packet
-+ return mc.writePacket(data)
-+}
-+
-+func (mc *mysqlConn) writeCommandPacketUint32(command byte, arg uint32) error {
-+ // Reset Packet Sequence
-+ mc.sequence = 0
-+
-+ data, err := mc.buf.takeSmallBuffer(4 + 1 + 4)
-+ if err != nil {
-+ // cannot take the buffer. Something must be wrong with the connection
-+ errLog.Print(err)
-+ return errBadConnNoWrite
-+ }
-+
-+ // Add command byte
-+ data[4] = command
-+
-+ // Add arg [32 bit]
-+ data[5] = byte(arg)
-+ data[6] = byte(arg >> 8)
-+ data[7] = byte(arg >> 16)
-+ data[8] = byte(arg >> 24)
-+
-+ // Send CMD packet
-+ return mc.writePacket(data)
-+}
-+
-+/******************************************************************************
-+* Result Packets *
-+******************************************************************************/
-+
-+func (mc *mysqlConn) readAuthResult() ([]byte, string, error) {
-+ data, err := mc.readPacket()
-+ if err != nil {
-+ return nil, "", err
-+ }
-+
-+ // packet indicator
-+ switch data[0] {
-+
-+ case iOK:
-+ return nil, "", mc.handleOkPacket(data)
-+
-+ case iAuthMoreData:
-+ return data[1:], "", err
-+
-+ case iEOF:
-+ if len(data) == 1 {
-+ // https://dev.mysql.com/doc/internals/en/connection-phase-packets.html#packet-Protocol::OldAuthSwitchRequest
-+ return nil, "mysql_old_password", nil
-+ }
-+ pluginEndIndex := bytes.IndexByte(data, 0x00)
-+ if pluginEndIndex < 0 {
-+ return nil, "", ErrMalformPkt
-+ }
-+ plugin := string(data[1:pluginEndIndex])
-+ authData := data[pluginEndIndex+1:]
-+ return authData, plugin, nil
-+
-+ default: // Error otherwise
-+ return nil, "", mc.handleErrorPacket(data)
-+ }
-+}
-+
-+// Returns error if Packet is not an 'Result OK'-Packet
-+func (mc *mysqlConn) readResultOK() error {
-+ data, err := mc.readPacket()
-+ if err != nil {
-+ return err
-+ }
-+
-+ if data[0] == iOK {
-+ return mc.handleOkPacket(data)
-+ }
-+ return mc.handleErrorPacket(data)
-+}
-+
-+// Result Set Header Packet
-+// http://dev.mysql.com/doc/internals/en/com-query-response.html#packet-ProtocolText::Resultset
-+func (mc *mysqlConn) readResultSetHeaderPacket() (int, error) {
-+ data, err := mc.readPacket()
-+ if err == nil {
-+ switch data[0] {
-+
-+ case iOK:
-+ return 0, mc.handleOkPacket(data)
-+
-+ case iERR:
-+ return 0, mc.handleErrorPacket(data)
-+
-+ case iLocalInFile:
-+ return 0, mc.handleInFileRequest(string(data[1:]))
-+ }
-+
-+ // column count
-+ num, _, n := readLengthEncodedInteger(data)
-+ if n-len(data) == 0 {
-+ return int(num), nil
-+ }
-+
-+ return 0, ErrMalformPkt
-+ }
-+ return 0, err
-+}
-+
-+// Error Packet
-+// http://dev.mysql.com/doc/internals/en/generic-response-packets.html#packet-ERR_Packet
-+func (mc *mysqlConn) handleErrorPacket(data []byte) error {
-+ if data[0] != iERR {
-+ return ErrMalformPkt
-+ }
-+
-+ // 0xff [1 byte]
-+
-+ // Error Number [16 bit uint]
-+ errno := binary.LittleEndian.Uint16(data[1:3])
-+
-+ // 1792: ER_CANT_EXECUTE_IN_READ_ONLY_TRANSACTION
-+ // 1290: ER_OPTION_PREVENTS_STATEMENT (returned by Aurora during failover)
-+ if (errno == 1792 || errno == 1290) && mc.cfg.RejectReadOnly {
-+ // Oops; we are connected to a read-only connection, and won't be able
-+ // to issue any write statements. Since RejectReadOnly is configured,
-+ // we throw away this connection hoping this one would have write
-+ // permission. This is specifically for a possible race condition
-+ // during failover (e.g. on AWS Aurora). See README.md for more.
-+ //
-+ // We explicitly close the connection before returning
-+ // driver.ErrBadConn to ensure that `database/sql` purges this
-+ // connection and initiates a new one for next statement next time.
-+ mc.Close()
-+ return driver.ErrBadConn
-+ }
-+
-+ pos := 3
-+
-+ // SQL State [optional: # + 5bytes string]
-+ if data[3] == 0x23 {
-+ //sqlstate := string(data[4 : 4+5])
-+ pos = 9
-+ }
-+
-+ // Error Message [string]
-+ return &MySQLError{
-+ Number: errno,
-+ Message: string(data[pos:]),
-+ }
-+}
-+
-+func readStatus(b []byte) statusFlag {
-+ return statusFlag(b[0]) | statusFlag(b[1])<<8
-+}
-+
-+// Ok Packet
-+// http://dev.mysql.com/doc/internals/en/generic-response-packets.html#packet-OK_Packet
-+func (mc *mysqlConn) handleOkPacket(data []byte) error {
-+ var n, m int
-+
-+ // 0x00 [1 byte]
-+
-+ // Affected rows [Length Coded Binary]
-+ mc.affectedRows, _, n = readLengthEncodedInteger(data[1:])
-+
-+ // Insert id [Length Coded Binary]
-+ mc.insertId, _, m = readLengthEncodedInteger(data[1+n:])
-+
-+ // server_status [2 bytes]
-+ mc.status = readStatus(data[1+n+m : 1+n+m+2])
-+ if mc.status&statusMoreResultsExists != 0 {
-+ return nil
-+ }
-+
-+ // warning count [2 bytes]
-+
-+ return nil
-+}
-+
-+// Read Packets as Field Packets until EOF-Packet or an Error appears
-+// http://dev.mysql.com/doc/internals/en/com-query-response.html#packet-Protocol::ColumnDefinition41
-+func (mc *mysqlConn) readColumns(count int) ([]mysqlField, error) {
-+ columns := make([]mysqlField, count)
-+
-+ for i := 0; ; i++ {
-+ data, err := mc.readPacket()
-+ if err != nil {
-+ return nil, err
-+ }
-+
-+ // EOF Packet
-+ if data[0] == iEOF && (len(data) == 5 || len(data) == 1) {
-+ if i == count {
-+ return columns, nil
-+ }
-+ return nil, fmt.Errorf("column count mismatch n:%d len:%d", count, len(columns))
-+ }
-+
-+ // Catalog
-+ pos, err := skipLengthEncodedString(data)
-+ if err != nil {
-+ return nil, err
-+ }
-+
-+ // Database [len coded string]
-+ n, err := skipLengthEncodedString(data[pos:])
-+ if err != nil {
-+ return nil, err
-+ }
-+ pos += n
-+
-+ // Table [len coded string]
-+ if mc.cfg.ColumnsWithAlias {
-+ tableName, _, n, err := readLengthEncodedString(data[pos:])
-+ if err != nil {
-+ return nil, err
-+ }
-+ pos += n
-+ columns[i].tableName = string(tableName)
-+ } else {
-+ n, err = skipLengthEncodedString(data[pos:])
-+ if err != nil {
-+ return nil, err
-+ }
-+ pos += n
-+ }
-+
-+ // Original table [len coded string]
-+ n, err = skipLengthEncodedString(data[pos:])
-+ if err != nil {
-+ return nil, err
-+ }
-+ pos += n
-+
-+ // Name [len coded string]
-+ name, _, n, err := readLengthEncodedString(data[pos:])
-+ if err != nil {
-+ return nil, err
-+ }
-+ columns[i].name = string(name)
-+ pos += n
-+
-+ // Original name [len coded string]
-+ n, err = skipLengthEncodedString(data[pos:])
-+ if err != nil {
-+ return nil, err
-+ }
-+ pos += n
-+
-+ // Filler [uint8]
-+ pos++
-+
-+ // Charset [charset, collation uint8]
-+ columns[i].charSet = data[pos]
-+ pos += 2
-+
-+ // Length [uint32]
-+ columns[i].length = binary.LittleEndian.Uint32(data[pos : pos+4])
-+ pos += 4
-+
-+ // Field type [uint8]
-+ columns[i].fieldType = fieldType(data[pos])
-+ pos++
-+
-+ // Flags [uint16]
-+ columns[i].flags = fieldFlag(binary.LittleEndian.Uint16(data[pos : pos+2]))
-+ pos += 2
-+
-+ // Decimals [uint8]
-+ columns[i].decimals = data[pos]
-+ //pos++
-+
-+ // Default value [len coded binary]
-+ //if pos < len(data) {
-+ // defaultVal, _, err = bytesToLengthCodedBinary(data[pos:])
-+ //}
-+ }
-+}
-+
-+// Read Packets as Field Packets until EOF-Packet or an Error appears
-+// http://dev.mysql.com/doc/internals/en/com-query-response.html#packet-ProtocolText::ResultsetRow
-+func (rows *textRows) readRow(dest []driver.Value) error {
-+ mc := rows.mc
-+
-+ if rows.rs.done {
-+ return io.EOF
-+ }
-+
-+ data, err := mc.readPacket()
-+ if err != nil {
-+ return err
-+ }
-+
-+ // EOF Packet
-+ if data[0] == iEOF && len(data) == 5 {
-+ // server_status [2 bytes]
-+ rows.mc.status = readStatus(data[3:])
-+ rows.rs.done = true
-+ if !rows.HasNextResultSet() {
-+ rows.mc = nil
-+ }
-+ return io.EOF
-+ }
-+ if data[0] == iERR {
-+ rows.mc = nil
-+ return mc.handleErrorPacket(data)
-+ }
-+
-+ // RowSet Packet
-+ var n int
-+ var isNull bool
-+ pos := 0
-+
-+ for i := range dest {
-+ // Read bytes and convert to string
-+ dest[i], isNull, n, err = readLengthEncodedString(data[pos:])
-+ pos += n
-+ if err == nil {
-+ if !isNull {
-+ if !mc.parseTime {
-+ continue
-+ } else {
-+ switch rows.rs.columns[i].fieldType {
-+ case fieldTypeTimestamp, fieldTypeDateTime,
-+ fieldTypeDate, fieldTypeNewDate:
-+ dest[i], err = parseDateTime(
-+ string(dest[i].([]byte)),
-+ mc.cfg.Loc,
-+ )
-+ if err == nil {
-+ continue
-+ }
-+ default:
-+ continue
-+ }
-+ }
-+
-+ } else {
-+ dest[i] = nil
-+ continue
-+ }
-+ }
-+ return err // err != nil
-+ }
-+
-+ return nil
-+}
-+
-+// Reads Packets until EOF-Packet or an Error appears. Returns count of Packets read
-+func (mc *mysqlConn) readUntilEOF() error {
-+ for {
-+ data, err := mc.readPacket()
-+ if err != nil {
-+ return err
-+ }
-+
-+ switch data[0] {
-+ case iERR:
-+ return mc.handleErrorPacket(data)
-+ case iEOF:
-+ if len(data) == 5 {
-+ mc.status = readStatus(data[3:])
-+ }
-+ return nil
-+ }
-+ }
-+}
-+
-+/******************************************************************************
-+* Prepared Statements *
-+******************************************************************************/
-+
-+// Prepare Result Packets
-+// http://dev.mysql.com/doc/internals/en/com-stmt-prepare-response.html
-+func (stmt *mysqlStmt) readPrepareResultPacket() (uint16, error) {
-+ data, err := stmt.mc.readPacket()
-+ if err == nil {
-+ // packet indicator [1 byte]
-+ if data[0] != iOK {
-+ return 0, stmt.mc.handleErrorPacket(data)
-+ }
-+
-+ // statement id [4 bytes]
-+ stmt.id = binary.LittleEndian.Uint32(data[1:5])
-+
-+ // Column count [16 bit uint]
-+ columnCount := binary.LittleEndian.Uint16(data[5:7])
-+
-+ // Param count [16 bit uint]
-+ stmt.paramCount = int(binary.LittleEndian.Uint16(data[7:9]))
-+
-+ // Reserved [8 bit]
-+
-+ // Warning count [16 bit uint]
-+
-+ return columnCount, nil
-+ }
-+ return 0, err
-+}
-+
-+// http://dev.mysql.com/doc/internals/en/com-stmt-send-long-data.html
-+func (stmt *mysqlStmt) writeCommandLongData(paramID int, arg []byte) error {
-+ maxLen := stmt.mc.maxAllowedPacket - 1
-+ pktLen := maxLen
-+
-+ // After the header (bytes 0-3) follows before the data:
-+ // 1 byte command
-+ // 4 bytes stmtID
-+ // 2 bytes paramID
-+ const dataOffset = 1 + 4 + 2
-+
-+ // Cannot use the write buffer since
-+ // a) the buffer is too small
-+ // b) it is in use
-+ data := make([]byte, 4+1+4+2+len(arg))
-+
-+ copy(data[4+dataOffset:], arg)
-+
-+ for argLen := len(arg); argLen > 0; argLen -= pktLen - dataOffset {
-+ if dataOffset+argLen < maxLen {
-+ pktLen = dataOffset + argLen
-+ }
-+
-+ stmt.mc.sequence = 0
-+ // Add command byte [1 byte]
-+ data[4] = comStmtSendLongData
-+
-+ // Add stmtID [32 bit]
-+ data[5] = byte(stmt.id)
-+ data[6] = byte(stmt.id >> 8)
-+ data[7] = byte(stmt.id >> 16)
-+ data[8] = byte(stmt.id >> 24)
-+
-+ // Add paramID [16 bit]
-+ data[9] = byte(paramID)
-+ data[10] = byte(paramID >> 8)
-+
-+ // Send CMD packet
-+ err := stmt.mc.writePacket(data[:4+pktLen])
-+ if err == nil {
-+ data = data[pktLen-dataOffset:]
-+ continue
-+ }
-+ return err
-+
-+ }
-+
-+ // Reset Packet Sequence
-+ stmt.mc.sequence = 0
-+ return nil
-+}
-+
-+// Execute Prepared Statement
-+// http://dev.mysql.com/doc/internals/en/com-stmt-execute.html
-+func (stmt *mysqlStmt) writeExecutePacket(args []driver.Value) error {
-+ if len(args) != stmt.paramCount {
-+ return fmt.Errorf(
-+ "argument count mismatch (got: %d; has: %d)",
-+ len(args),
-+ stmt.paramCount,
-+ )
-+ }
-+
-+ const minPktLen = 4 + 1 + 4 + 1 + 4
-+ mc := stmt.mc
-+
-+ // Determine threshold dynamically to avoid packet size shortage.
-+ longDataSize := mc.maxAllowedPacket / (stmt.paramCount + 1)
-+ if longDataSize < 64 {
-+ longDataSize = 64
-+ }
-+
-+ // Reset packet-sequence
-+ mc.sequence = 0
-+
-+ var data []byte
-+ var err error
-+
-+ if len(args) == 0 {
-+ data, err = mc.buf.takeBuffer(minPktLen)
-+ } else {
-+ data, err = mc.buf.takeCompleteBuffer()
-+ // In this case the len(data) == cap(data) which is used to optimise the flow below.
-+ }
-+ if err != nil {
-+ // cannot take the buffer. Something must be wrong with the connection
-+ errLog.Print(err)
-+ return errBadConnNoWrite
-+ }
-+
-+ // command [1 byte]
-+ data[4] = comStmtExecute
-+
-+ // statement_id [4 bytes]
-+ data[5] = byte(stmt.id)
-+ data[6] = byte(stmt.id >> 8)
-+ data[7] = byte(stmt.id >> 16)
-+ data[8] = byte(stmt.id >> 24)
-+
-+ // flags (0: CURSOR_TYPE_NO_CURSOR) [1 byte]
-+ data[9] = 0x00
-+
-+ // iteration_count (uint32(1)) [4 bytes]
-+ data[10] = 0x01
-+ data[11] = 0x00
-+ data[12] = 0x00
-+ data[13] = 0x00
-+
-+ if len(args) > 0 {
-+ pos := minPktLen
-+
-+ var nullMask []byte
-+ if maskLen, typesLen := (len(args)+7)/8, 1+2*len(args); pos+maskLen+typesLen >= cap(data) {
-+ // buffer has to be extended but we don't know by how much so
-+ // we depend on append after all data with known sizes fit.
-+ // We stop at that because we deal with a lot of columns here
-+ // which makes the required allocation size hard to guess.
-+ tmp := make([]byte, pos+maskLen+typesLen)
-+ copy(tmp[:pos], data[:pos])
-+ data = tmp
-+ nullMask = data[pos : pos+maskLen]
-+ // No need to clean nullMask as make ensures that.
-+ pos += maskLen
-+ } else {
-+ nullMask = data[pos : pos+maskLen]
-+ for i := range nullMask {
-+ nullMask[i] = 0
-+ }
-+ pos += maskLen
-+ }
-+
-+ // newParameterBoundFlag 1 [1 byte]
-+ data[pos] = 0x01
-+ pos++
-+
-+ // type of each parameter [len(args)*2 bytes]
-+ paramTypes := data[pos:]
-+ pos += len(args) * 2
-+
-+ // value of each parameter [n bytes]
-+ paramValues := data[pos:pos]
-+ valuesCap := cap(paramValues)
-+
-+ for i, arg := range args {
-+ // build NULL-bitmap
-+ if arg == nil {
-+ nullMask[i/8] |= 1 << (uint(i) & 7)
-+ paramTypes[i+i] = byte(fieldTypeNULL)
-+ paramTypes[i+i+1] = 0x00
-+ continue
-+ }
-+
-+ // cache types and values
-+ switch v := arg.(type) {
-+ case int64:
-+ paramTypes[i+i] = byte(fieldTypeLongLong)
-+ paramTypes[i+i+1] = 0x00
-+
-+ if cap(paramValues)-len(paramValues)-8 >= 0 {
-+ paramValues = paramValues[:len(paramValues)+8]
-+ binary.LittleEndian.PutUint64(
-+ paramValues[len(paramValues)-8:],
-+ uint64(v),
-+ )
-+ } else {
-+ paramValues = append(paramValues,
-+ uint64ToBytes(uint64(v))...,
-+ )
-+ }
-+
-+ case uint64:
-+ paramTypes[i+i] = byte(fieldTypeLongLong)
-+ paramTypes[i+i+1] = 0x80 // type is unsigned
-+
-+ if cap(paramValues)-len(paramValues)-8 >= 0 {
-+ paramValues = paramValues[:len(paramValues)+8]
-+ binary.LittleEndian.PutUint64(
-+ paramValues[len(paramValues)-8:],
-+ uint64(v),
-+ )
-+ } else {
-+ paramValues = append(paramValues,
-+ uint64ToBytes(uint64(v))...,
-+ )
-+ }
-+
-+ case float64:
-+ paramTypes[i+i] = byte(fieldTypeDouble)
-+ paramTypes[i+i+1] = 0x00
-+
-+ if cap(paramValues)-len(paramValues)-8 >= 0 {
-+ paramValues = paramValues[:len(paramValues)+8]
-+ binary.LittleEndian.PutUint64(
-+ paramValues[len(paramValues)-8:],
-+ math.Float64bits(v),
-+ )
-+ } else {
-+ paramValues = append(paramValues,
-+ uint64ToBytes(math.Float64bits(v))...,
-+ )
-+ }
-+
-+ case bool:
-+ paramTypes[i+i] = byte(fieldTypeTiny)
-+ paramTypes[i+i+1] = 0x00
-+
-+ if v {
-+ paramValues = append(paramValues, 0x01)
-+ } else {
-+ paramValues = append(paramValues, 0x00)
-+ }
-+
-+ case []byte:
-+ // Common case (non-nil value) first
-+ if v != nil {
-+ paramTypes[i+i] = byte(fieldTypeString)
-+ paramTypes[i+i+1] = 0x00
-+
-+ if len(v) < longDataSize {
-+ paramValues = appendLengthEncodedInteger(paramValues,
-+ uint64(len(v)),
-+ )
-+ paramValues = append(paramValues, v...)
-+ } else {
-+ if err := stmt.writeCommandLongData(i, v); err != nil {
-+ return err
-+ }
-+ }
-+ continue
-+ }
-+
-+ // Handle []byte(nil) as a NULL value
-+ nullMask[i/8] |= 1 << (uint(i) & 7)
-+ paramTypes[i+i] = byte(fieldTypeNULL)
-+ paramTypes[i+i+1] = 0x00
-+
-+ case string:
-+ paramTypes[i+i] = byte(fieldTypeString)
-+ paramTypes[i+i+1] = 0x00
-+
-+ if len(v) < longDataSize {
-+ paramValues = appendLengthEncodedInteger(paramValues,
-+ uint64(len(v)),
-+ )
-+ paramValues = append(paramValues, v...)
-+ } else {
-+ if err := stmt.writeCommandLongData(i, []byte(v)); err != nil {
-+ return err
-+ }
-+ }
-+
-+ case time.Time:
-+ paramTypes[i+i] = byte(fieldTypeString)
-+ paramTypes[i+i+1] = 0x00
-+
-+ var a [64]byte
-+ var b = a[:0]
-+
-+ if v.IsZero() {
-+ b = append(b, "0000-00-00"...)
-+ } else {
-+ b = v.In(mc.cfg.Loc).AppendFormat(b, timeFormat)
-+ }
-+
-+ paramValues = appendLengthEncodedInteger(paramValues,
-+ uint64(len(b)),
-+ )
-+ paramValues = append(paramValues, b...)
-+
-+ default:
-+ return fmt.Errorf("cannot convert type: %T", arg)
-+ }
-+ }
-+
-+ // Check if param values exceeded the available buffer
-+ // In that case we must build the data packet with the new values buffer
-+ if valuesCap != cap(paramValues) {
-+ data = append(data[:pos], paramValues...)
-+ if err = mc.buf.store(data); err != nil {
-+ errLog.Print(err)
-+ return errBadConnNoWrite
-+ }
-+ }
-+
-+ pos += len(paramValues)
-+ data = data[:pos]
-+ }
-+
-+ return mc.writePacket(data)
-+}
-+
-+func (mc *mysqlConn) discardResults() error {
-+ for mc.status&statusMoreResultsExists != 0 {
-+ resLen, err := mc.readResultSetHeaderPacket()
-+ if err != nil {
-+ return err
-+ }
-+ if resLen > 0 {
-+ // columns
-+ if err := mc.readUntilEOF(); err != nil {
-+ return err
-+ }
-+ // rows
-+ if err := mc.readUntilEOF(); err != nil {
-+ return err
-+ }
-+ }
-+ }
-+ return nil
-+}
-+
-+// http://dev.mysql.com/doc/internals/en/binary-protocol-resultset-row.html
-+func (rows *binaryRows) readRow(dest []driver.Value) error {
-+ data, err := rows.mc.readPacket()
-+ if err != nil {
-+ return err
-+ }
-+
-+ // packet indicator [1 byte]
-+ if data[0] != iOK {
-+ // EOF Packet
-+ if data[0] == iEOF && len(data) == 5 {
-+ rows.mc.status = readStatus(data[3:])
-+ rows.rs.done = true
-+ if !rows.HasNextResultSet() {
-+ rows.mc = nil
-+ }
-+ return io.EOF
-+ }
-+ mc := rows.mc
-+ rows.mc = nil
-+
-+ // Error otherwise
-+ return mc.handleErrorPacket(data)
-+ }
-+
-+ // NULL-bitmap, [(column-count + 7 + 2) / 8 bytes]
-+ pos := 1 + (len(dest)+7+2)>>3
-+ nullMask := data[1:pos]
-+
-+ for i := range dest {
-+ // Field is NULL
-+ // (byte >> bit-pos) % 2 == 1
-+ if ((nullMask[(i+2)>>3] >> uint((i+2)&7)) & 1) == 1 {
-+ dest[i] = nil
-+ continue
-+ }
-+
-+ // Convert to byte-coded string
-+ switch rows.rs.columns[i].fieldType {
-+ case fieldTypeNULL:
-+ dest[i] = nil
-+ continue
-+
-+ // Numeric Types
-+ case fieldTypeTiny:
-+ if rows.rs.columns[i].flags&flagUnsigned != 0 {
-+ dest[i] = int64(data[pos])
-+ } else {
-+ dest[i] = int64(int8(data[pos]))
-+ }
-+ pos++
-+ continue
-+
-+ case fieldTypeShort, fieldTypeYear:
-+ if rows.rs.columns[i].flags&flagUnsigned != 0 {
-+ dest[i] = int64(binary.LittleEndian.Uint16(data[pos : pos+2]))
-+ } else {
-+ dest[i] = int64(int16(binary.LittleEndian.Uint16(data[pos : pos+2])))
-+ }
-+ pos += 2
-+ continue
-+
-+ case fieldTypeInt24, fieldTypeLong:
-+ if rows.rs.columns[i].flags&flagUnsigned != 0 {
-+ dest[i] = int64(binary.LittleEndian.Uint32(data[pos : pos+4]))
-+ } else {
-+ dest[i] = int64(int32(binary.LittleEndian.Uint32(data[pos : pos+4])))
-+ }
-+ pos += 4
-+ continue
-+
-+ case fieldTypeLongLong:
-+ if rows.rs.columns[i].flags&flagUnsigned != 0 {
-+ val := binary.LittleEndian.Uint64(data[pos : pos+8])
-+ if val > math.MaxInt64 {
-+ dest[i] = uint64ToString(val)
-+ } else {
-+ dest[i] = int64(val)
-+ }
-+ } else {
-+ dest[i] = int64(binary.LittleEndian.Uint64(data[pos : pos+8]))
-+ }
-+ pos += 8
-+ continue
-+
-+ case fieldTypeFloat:
-+ dest[i] = math.Float32frombits(binary.LittleEndian.Uint32(data[pos : pos+4]))
-+ pos += 4
-+ continue
-+
-+ case fieldTypeDouble:
-+ dest[i] = math.Float64frombits(binary.LittleEndian.Uint64(data[pos : pos+8]))
-+ pos += 8
-+ continue
-+
-+ // Length coded Binary Strings
-+ case fieldTypeDecimal, fieldTypeNewDecimal, fieldTypeVarChar,
-+ fieldTypeBit, fieldTypeEnum, fieldTypeSet, fieldTypeTinyBLOB,
-+ fieldTypeMediumBLOB, fieldTypeLongBLOB, fieldTypeBLOB,
-+ fieldTypeVarString, fieldTypeString, fieldTypeGeometry, fieldTypeJSON:
-+ var isNull bool
-+ var n int
-+ dest[i], isNull, n, err = readLengthEncodedString(data[pos:])
-+ pos += n
-+ if err == nil {
-+ if !isNull {
-+ continue
-+ } else {
-+ dest[i] = nil
-+ continue
-+ }
-+ }
-+ return err
-+
-+ case
-+ fieldTypeDate, fieldTypeNewDate, // Date YYYY-MM-DD
-+ fieldTypeTime, // Time [-][H]HH:MM:SS[.fractal]
-+ fieldTypeTimestamp, fieldTypeDateTime: // Timestamp YYYY-MM-DD HH:MM:SS[.fractal]
-+
-+ num, isNull, n := readLengthEncodedInteger(data[pos:])
-+ pos += n
-+
-+ switch {
-+ case isNull:
-+ dest[i] = nil
-+ continue
-+ case rows.rs.columns[i].fieldType == fieldTypeTime:
-+ // database/sql does not support an equivalent to TIME, return a string
-+ var dstlen uint8
-+ switch decimals := rows.rs.columns[i].decimals; decimals {
-+ case 0x00, 0x1f:
-+ dstlen = 8
-+ case 1, 2, 3, 4, 5, 6:
-+ dstlen = 8 + 1 + decimals
-+ default:
-+ return fmt.Errorf(
-+ "protocol error, illegal decimals value %d",
-+ rows.rs.columns[i].decimals,
-+ )
-+ }
-+ dest[i], err = formatBinaryTime(data[pos:pos+int(num)], dstlen)
-+ case rows.mc.parseTime:
-+ dest[i], err = parseBinaryDateTime(num, data[pos:], rows.mc.cfg.Loc)
-+ default:
-+ var dstlen uint8
-+ if rows.rs.columns[i].fieldType == fieldTypeDate {
-+ dstlen = 10
-+ } else {
-+ switch decimals := rows.rs.columns[i].decimals; decimals {
-+ case 0x00, 0x1f:
-+ dstlen = 19
-+ case 1, 2, 3, 4, 5, 6:
-+ dstlen = 19 + 1 + decimals
-+ default:
-+ return fmt.Errorf(
-+ "protocol error, illegal decimals value %d",
-+ rows.rs.columns[i].decimals,
-+ )
-+ }
-+ }
-+ dest[i], err = formatBinaryDateTime(data[pos:pos+int(num)], dstlen)
-+ }
-+
-+ if err == nil {
-+ pos += int(num)
-+ continue
-+ } else {
-+ return err
-+ }
-+
-+ // Please report if this happens!
-+ default:
-+ return fmt.Errorf("unknown field type %d", rows.rs.columns[i].fieldType)
-+ }
-+ }
-+
-+ return nil
-+}
-diff --git a/vendor/github.com/go-sql-driver/mysql/packets_test.go b/vendor/github.com/go-sql-driver/mysql/packets_test.go
-new file mode 100644
-index 00000000000..b61e4dbf777
---- /dev/null
-+++ b/vendor/github.com/go-sql-driver/mysql/packets_test.go
-@@ -0,0 +1,336 @@
-+// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
-+//
-+// Copyright 2016 The Go-MySQL-Driver Authors. All rights reserved.
-+//
-+// This Source Code Form is subject to the terms of the Mozilla Public
-+// License, v. 2.0. If a copy of the MPL was not distributed with this file,
-+// You can obtain one at http://mozilla.org/MPL/2.0/.
-+
-+package mysql
-+
-+import (
-+ "bytes"
-+ "errors"
-+ "net"
-+ "testing"
-+ "time"
-+)
-+
-+var (
-+ errConnClosed = errors.New("connection is closed")
-+ errConnTooManyReads = errors.New("too many reads")
-+ errConnTooManyWrites = errors.New("too many writes")
-+)
-+
-+// struct to mock a net.Conn for testing purposes
-+type mockConn struct {
-+ laddr net.Addr
-+ raddr net.Addr
-+ data []byte
-+ written []byte
-+ queuedReplies [][]byte
-+ closed bool
-+ read int
-+ reads int
-+ writes int
-+ maxReads int
-+ maxWrites int
-+}
-+
-+func (m *mockConn) Read(b []byte) (n int, err error) {
-+ if m.closed {
-+ return 0, errConnClosed
-+ }
-+
-+ m.reads++
-+ if m.maxReads > 0 && m.reads > m.maxReads {
-+ return 0, errConnTooManyReads
-+ }
-+
-+ n = copy(b, m.data)
-+ m.read += n
-+ m.data = m.data[n:]
-+ return
-+}
-+func (m *mockConn) Write(b []byte) (n int, err error) {
-+ if m.closed {
-+ return 0, errConnClosed
-+ }
-+
-+ m.writes++
-+ if m.maxWrites > 0 && m.writes > m.maxWrites {
-+ return 0, errConnTooManyWrites
-+ }
-+
-+ n = len(b)
-+ m.written = append(m.written, b...)
-+
-+ if n > 0 && len(m.queuedReplies) > 0 {
-+ m.data = m.queuedReplies[0]
-+ m.queuedReplies = m.queuedReplies[1:]
-+ }
-+ return
-+}
-+func (m *mockConn) Close() error {
-+ m.closed = true
-+ return nil
-+}
-+func (m *mockConn) LocalAddr() net.Addr {
-+ return m.laddr
-+}
-+func (m *mockConn) RemoteAddr() net.Addr {
-+ return m.raddr
-+}
-+func (m *mockConn) SetDeadline(t time.Time) error {
-+ return nil
-+}
-+func (m *mockConn) SetReadDeadline(t time.Time) error {
-+ return nil
-+}
-+func (m *mockConn) SetWriteDeadline(t time.Time) error {
-+ return nil
-+}
-+
-+// make sure mockConn implements the net.Conn interface
-+var _ net.Conn = new(mockConn)
-+
-+func newRWMockConn(sequence uint8) (*mockConn, *mysqlConn) {
-+ conn := new(mockConn)
-+ mc := &mysqlConn{
-+ buf: newBuffer(conn),
-+ cfg: NewConfig(),
-+ netConn: conn,
-+ closech: make(chan struct{}),
-+ maxAllowedPacket: defaultMaxAllowedPacket,
-+ sequence: sequence,
-+ }
-+ return conn, mc
-+}
-+
-+func TestReadPacketSingleByte(t *testing.T) {
-+ conn := new(mockConn)
-+ mc := &mysqlConn{
-+ buf: newBuffer(conn),
-+ }
-+
-+ conn.data = []byte{0x01, 0x00, 0x00, 0x00, 0xff}
-+ conn.maxReads = 1
-+ packet, err := mc.readPacket()
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+ if len(packet) != 1 {
-+ t.Fatalf("unexpected packet length: expected %d, got %d", 1, len(packet))
-+ }
-+ if packet[0] != 0xff {
-+ t.Fatalf("unexpected packet content: expected %x, got %x", 0xff, packet[0])
-+ }
-+}
-+
-+func TestReadPacketWrongSequenceID(t *testing.T) {
-+ conn := new(mockConn)
-+ mc := &mysqlConn{
-+ buf: newBuffer(conn),
-+ }
-+
-+ // too low sequence id
-+ conn.data = []byte{0x01, 0x00, 0x00, 0x00, 0xff}
-+ conn.maxReads = 1
-+ mc.sequence = 1
-+ _, err := mc.readPacket()
-+ if err != ErrPktSync {
-+ t.Errorf("expected ErrPktSync, got %v", err)
-+ }
-+
-+ // reset
-+ conn.reads = 0
-+ mc.sequence = 0
-+ mc.buf = newBuffer(conn)
-+
-+ // too high sequence id
-+ conn.data = []byte{0x01, 0x00, 0x00, 0x42, 0xff}
-+ _, err = mc.readPacket()
-+ if err != ErrPktSyncMul {
-+ t.Errorf("expected ErrPktSyncMul, got %v", err)
-+ }
-+}
-+
-+func TestReadPacketSplit(t *testing.T) {
-+ conn := new(mockConn)
-+ mc := &mysqlConn{
-+ buf: newBuffer(conn),
-+ }
-+
-+ data := make([]byte, maxPacketSize*2+4*3)
-+ const pkt2ofs = maxPacketSize + 4
-+ const pkt3ofs = 2 * (maxPacketSize + 4)
-+
-+ // case 1: payload has length maxPacketSize
-+ data = data[:pkt2ofs+4]
-+
-+ // 1st packet has maxPacketSize length and sequence id 0
-+ // ff ff ff 00 ...
-+ data[0] = 0xff
-+ data[1] = 0xff
-+ data[2] = 0xff
-+
-+ // mark the payload start and end of 1st packet so that we can check if the
-+ // content was correctly appended
-+ data[4] = 0x11
-+ data[maxPacketSize+3] = 0x22
-+
-+ // 2nd packet has payload length 0 and squence id 1
-+ // 00 00 00 01
-+ data[pkt2ofs+3] = 0x01
-+
-+ conn.data = data
-+ conn.maxReads = 3
-+ packet, err := mc.readPacket()
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+ if len(packet) != maxPacketSize {
-+ t.Fatalf("unexpected packet length: expected %d, got %d", maxPacketSize, len(packet))
-+ }
-+ if packet[0] != 0x11 {
-+ t.Fatalf("unexpected payload start: expected %x, got %x", 0x11, packet[0])
-+ }
-+ if packet[maxPacketSize-1] != 0x22 {
-+ t.Fatalf("unexpected payload end: expected %x, got %x", 0x22, packet[maxPacketSize-1])
-+ }
-+
-+ // case 2: payload has length which is a multiple of maxPacketSize
-+ data = data[:cap(data)]
-+
-+ // 2nd packet now has maxPacketSize length
-+ data[pkt2ofs] = 0xff
-+ data[pkt2ofs+1] = 0xff
-+ data[pkt2ofs+2] = 0xff
-+
-+ // mark the payload start and end of the 2nd packet
-+ data[pkt2ofs+4] = 0x33
-+ data[pkt2ofs+maxPacketSize+3] = 0x44
-+
-+ // 3rd packet has payload length 0 and squence id 2
-+ // 00 00 00 02
-+ data[pkt3ofs+3] = 0x02
-+
-+ conn.data = data
-+ conn.reads = 0
-+ conn.maxReads = 5
-+ mc.sequence = 0
-+ packet, err = mc.readPacket()
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+ if len(packet) != 2*maxPacketSize {
-+ t.Fatalf("unexpected packet length: expected %d, got %d", 2*maxPacketSize, len(packet))
-+ }
-+ if packet[0] != 0x11 {
-+ t.Fatalf("unexpected payload start: expected %x, got %x", 0x11, packet[0])
-+ }
-+ if packet[2*maxPacketSize-1] != 0x44 {
-+ t.Fatalf("unexpected payload end: expected %x, got %x", 0x44, packet[2*maxPacketSize-1])
-+ }
-+
-+ // case 3: payload has a length larger maxPacketSize, which is not an exact
-+ // multiple of it
-+ data = data[:pkt2ofs+4+42]
-+ data[pkt2ofs] = 0x2a
-+ data[pkt2ofs+1] = 0x00
-+ data[pkt2ofs+2] = 0x00
-+ data[pkt2ofs+4+41] = 0x44
-+
-+ conn.data = data
-+ conn.reads = 0
-+ conn.maxReads = 4
-+ mc.sequence = 0
-+ packet, err = mc.readPacket()
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+ if len(packet) != maxPacketSize+42 {
-+ t.Fatalf("unexpected packet length: expected %d, got %d", maxPacketSize+42, len(packet))
-+ }
-+ if packet[0] != 0x11 {
-+ t.Fatalf("unexpected payload start: expected %x, got %x", 0x11, packet[0])
-+ }
-+ if packet[maxPacketSize+41] != 0x44 {
-+ t.Fatalf("unexpected payload end: expected %x, got %x", 0x44, packet[maxPacketSize+41])
-+ }
-+}
-+
-+func TestReadPacketFail(t *testing.T) {
-+ conn := new(mockConn)
-+ mc := &mysqlConn{
-+ buf: newBuffer(conn),
-+ closech: make(chan struct{}),
-+ }
-+
-+ // illegal empty (stand-alone) packet
-+ conn.data = []byte{0x00, 0x00, 0x00, 0x00}
-+ conn.maxReads = 1
-+ _, err := mc.readPacket()
-+ if err != ErrInvalidConn {
-+ t.Errorf("expected ErrInvalidConn, got %v", err)
-+ }
-+
-+ // reset
-+ conn.reads = 0
-+ mc.sequence = 0
-+ mc.buf = newBuffer(conn)
-+
-+ // fail to read header
-+ conn.closed = true
-+ _, err = mc.readPacket()
-+ if err != ErrInvalidConn {
-+ t.Errorf("expected ErrInvalidConn, got %v", err)
-+ }
-+
-+ // reset
-+ conn.closed = false
-+ conn.reads = 0
-+ mc.sequence = 0
-+ mc.buf = newBuffer(conn)
-+
-+ // fail to read body
-+ conn.maxReads = 1
-+ _, err = mc.readPacket()
-+ if err != ErrInvalidConn {
-+ t.Errorf("expected ErrInvalidConn, got %v", err)
-+ }
-+}
-+
-+// https://github.com/go-sql-driver/mysql/pull/801
-+// not-NUL terminated plugin_name in init packet
-+func TestRegression801(t *testing.T) {
-+ conn := new(mockConn)
-+ mc := &mysqlConn{
-+ buf: newBuffer(conn),
-+ cfg: new(Config),
-+ sequence: 42,
-+ closech: make(chan struct{}),
-+ }
-+
-+ conn.data = []byte{72, 0, 0, 42, 10, 53, 46, 53, 46, 56, 0, 165, 0, 0, 0,
-+ 60, 70, 63, 58, 68, 104, 34, 97, 0, 223, 247, 33, 2, 0, 15, 128, 21, 0,
-+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 98, 120, 114, 47, 85, 75, 109, 99, 51, 77,
-+ 50, 64, 0, 109, 121, 115, 113, 108, 95, 110, 97, 116, 105, 118, 101, 95,
-+ 112, 97, 115, 115, 119, 111, 114, 100}
-+ conn.maxReads = 1
-+
-+ authData, pluginName, err := mc.readHandshakePacket()
-+ if err != nil {
-+ t.Fatalf("got error: %v", err)
-+ }
-+
-+ if pluginName != "mysql_native_password" {
-+ t.Errorf("expected plugin name 'mysql_native_password', got '%s'", pluginName)
-+ }
-+
-+ expectedAuthData := []byte{60, 70, 63, 58, 68, 104, 34, 97, 98, 120, 114,
-+ 47, 85, 75, 109, 99, 51, 77, 50, 64}
-+ if !bytes.Equal(authData, expectedAuthData) {
-+ t.Errorf("expected authData '%v', got '%v'", expectedAuthData, authData)
-+ }
-+}
-diff --git a/vendor/github.com/go-sql-driver/mysql/result.go b/vendor/github.com/go-sql-driver/mysql/result.go
-new file mode 100644
-index 00000000000..c6438d0347d
---- /dev/null
-+++ b/vendor/github.com/go-sql-driver/mysql/result.go
-@@ -0,0 +1,22 @@
-+// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
-+//
-+// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved.
-+//
-+// This Source Code Form is subject to the terms of the Mozilla Public
-+// License, v. 2.0. If a copy of the MPL was not distributed with this file,
-+// You can obtain one at http://mozilla.org/MPL/2.0/.
-+
-+package mysql
-+
-+type mysqlResult struct {
-+ affectedRows int64
-+ insertId int64
-+}
-+
-+func (res *mysqlResult) LastInsertId() (int64, error) {
-+ return res.insertId, nil
-+}
-+
-+func (res *mysqlResult) RowsAffected() (int64, error) {
-+ return res.affectedRows, nil
-+}
-diff --git a/vendor/github.com/go-sql-driver/mysql/rows.go b/vendor/github.com/go-sql-driver/mysql/rows.go
-new file mode 100644
-index 00000000000..888bdb5f0ad
---- /dev/null
-+++ b/vendor/github.com/go-sql-driver/mysql/rows.go
-@@ -0,0 +1,223 @@
-+// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
-+//
-+// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved.
-+//
-+// This Source Code Form is subject to the terms of the Mozilla Public
-+// License, v. 2.0. If a copy of the MPL was not distributed with this file,
-+// You can obtain one at http://mozilla.org/MPL/2.0/.
-+
-+package mysql
-+
-+import (
-+ "database/sql/driver"
-+ "io"
-+ "math"
-+ "reflect"
-+)
-+
-+type resultSet struct {
-+ columns []mysqlField
-+ columnNames []string
-+ done bool
-+}
-+
-+type mysqlRows struct {
-+ mc *mysqlConn
-+ rs resultSet
-+ finish func()
-+}
-+
-+type binaryRows struct {
-+ mysqlRows
-+}
-+
-+type textRows struct {
-+ mysqlRows
-+}
-+
-+func (rows *mysqlRows) Columns() []string {
-+ if rows.rs.columnNames != nil {
-+ return rows.rs.columnNames
-+ }
-+
-+ columns := make([]string, len(rows.rs.columns))
-+ if rows.mc != nil && rows.mc.cfg.ColumnsWithAlias {
-+ for i := range columns {
-+ if tableName := rows.rs.columns[i].tableName; len(tableName) > 0 {
-+ columns[i] = tableName + "." + rows.rs.columns[i].name
-+ } else {
-+ columns[i] = rows.rs.columns[i].name
-+ }
-+ }
-+ } else {
-+ for i := range columns {
-+ columns[i] = rows.rs.columns[i].name
-+ }
-+ }
-+
-+ rows.rs.columnNames = columns
-+ return columns
-+}
-+
-+func (rows *mysqlRows) ColumnTypeDatabaseTypeName(i int) string {
-+ return rows.rs.columns[i].typeDatabaseName()
-+}
-+
-+// func (rows *mysqlRows) ColumnTypeLength(i int) (length int64, ok bool) {
-+// return int64(rows.rs.columns[i].length), true
-+// }
-+
-+func (rows *mysqlRows) ColumnTypeNullable(i int) (nullable, ok bool) {
-+ return rows.rs.columns[i].flags&flagNotNULL == 0, true
-+}
-+
-+func (rows *mysqlRows) ColumnTypePrecisionScale(i int) (int64, int64, bool) {
-+ column := rows.rs.columns[i]
-+ decimals := int64(column.decimals)
-+
-+ switch column.fieldType {
-+ case fieldTypeDecimal, fieldTypeNewDecimal:
-+ if decimals > 0 {
-+ return int64(column.length) - 2, decimals, true
-+ }
-+ return int64(column.length) - 1, decimals, true
-+ case fieldTypeTimestamp, fieldTypeDateTime, fieldTypeTime:
-+ return decimals, decimals, true
-+ case fieldTypeFloat, fieldTypeDouble:
-+ if decimals == 0x1f {
-+ return math.MaxInt64, math.MaxInt64, true
-+ }
-+ return math.MaxInt64, decimals, true
-+ }
-+
-+ return 0, 0, false
-+}
-+
-+func (rows *mysqlRows) ColumnTypeScanType(i int) reflect.Type {
-+ return rows.rs.columns[i].scanType()
-+}
-+
-+func (rows *mysqlRows) Close() (err error) {
-+ if f := rows.finish; f != nil {
-+ f()
-+ rows.finish = nil
-+ }
-+
-+ mc := rows.mc
-+ if mc == nil {
-+ return nil
-+ }
-+ if err := mc.error(); err != nil {
-+ return err
-+ }
-+
-+ // flip the buffer for this connection if we need to drain it.
-+ // note that for a successful query (i.e. one where rows.next()
-+ // has been called until it returns false), `rows.mc` will be nil
-+ // by the time the user calls `(*Rows).Close`, so we won't reach this
-+ // see: https://github.com/golang/go/commit/651ddbdb5056ded455f47f9c494c67b389622a47
-+ mc.buf.flip()
-+
-+ // Remove unread packets from stream
-+ if !rows.rs.done {
-+ err = mc.readUntilEOF()
-+ }
-+ if err == nil {
-+ if err = mc.discardResults(); err != nil {
-+ return err
-+ }
-+ }
-+
-+ rows.mc = nil
-+ return err
-+}
-+
-+func (rows *mysqlRows) HasNextResultSet() (b bool) {
-+ if rows.mc == nil {
-+ return false
-+ }
-+ return rows.mc.status&statusMoreResultsExists != 0
-+}
-+
-+func (rows *mysqlRows) nextResultSet() (int, error) {
-+ if rows.mc == nil {
-+ return 0, io.EOF
-+ }
-+ if err := rows.mc.error(); err != nil {
-+ return 0, err
-+ }
-+
-+ // Remove unread packets from stream
-+ if !rows.rs.done {
-+ if err := rows.mc.readUntilEOF(); err != nil {
-+ return 0, err
-+ }
-+ rows.rs.done = true
-+ }
-+
-+ if !rows.HasNextResultSet() {
-+ rows.mc = nil
-+ return 0, io.EOF
-+ }
-+ rows.rs = resultSet{}
-+ return rows.mc.readResultSetHeaderPacket()
-+}
-+
-+func (rows *mysqlRows) nextNotEmptyResultSet() (int, error) {
-+ for {
-+ resLen, err := rows.nextResultSet()
-+ if err != nil {
-+ return 0, err
-+ }
-+
-+ if resLen > 0 {
-+ return resLen, nil
-+ }
-+
-+ rows.rs.done = true
-+ }
-+}
-+
-+func (rows *binaryRows) NextResultSet() error {
-+ resLen, err := rows.nextNotEmptyResultSet()
-+ if err != nil {
-+ return err
-+ }
-+
-+ rows.rs.columns, err = rows.mc.readColumns(resLen)
-+ return err
-+}
-+
-+func (rows *binaryRows) Next(dest []driver.Value) error {
-+ if mc := rows.mc; mc != nil {
-+ if err := mc.error(); err != nil {
-+ return err
-+ }
-+
-+ // Fetch next row from stream
-+ return rows.readRow(dest)
-+ }
-+ return io.EOF
-+}
-+
-+func (rows *textRows) NextResultSet() (err error) {
-+ resLen, err := rows.nextNotEmptyResultSet()
-+ if err != nil {
-+ return err
-+ }
-+
-+ rows.rs.columns, err = rows.mc.readColumns(resLen)
-+ return err
-+}
-+
-+func (rows *textRows) Next(dest []driver.Value) error {
-+ if mc := rows.mc; mc != nil {
-+ if err := mc.error(); err != nil {
-+ return err
-+ }
-+
-+ // Fetch next row from stream
-+ return rows.readRow(dest)
-+ }
-+ return io.EOF
-+}
-diff --git a/vendor/github.com/go-sql-driver/mysql/statement.go b/vendor/github.com/go-sql-driver/mysql/statement.go
-new file mode 100644
-index 00000000000..f7e370939a1
---- /dev/null
-+++ b/vendor/github.com/go-sql-driver/mysql/statement.go
-@@ -0,0 +1,204 @@
-+// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
-+//
-+// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved.
-+//
-+// This Source Code Form is subject to the terms of the Mozilla Public
-+// License, v. 2.0. If a copy of the MPL was not distributed with this file,
-+// You can obtain one at http://mozilla.org/MPL/2.0/.
-+
-+package mysql
-+
-+import (
-+ "database/sql/driver"
-+ "fmt"
-+ "io"
-+ "reflect"
-+)
-+
-+type mysqlStmt struct {
-+ mc *mysqlConn
-+ id uint32
-+ paramCount int
-+}
-+
-+func (stmt *mysqlStmt) Close() error {
-+ if stmt.mc == nil || stmt.mc.closed.IsSet() {
-+ // driver.Stmt.Close can be called more than once, thus this function
-+ // has to be idempotent.
-+ // See also Issue #450 and golang/go#16019.
-+ //errLog.Print(ErrInvalidConn)
-+ return driver.ErrBadConn
-+ }
-+
-+ err := stmt.mc.writeCommandPacketUint32(comStmtClose, stmt.id)
-+ stmt.mc = nil
-+ return err
-+}
-+
-+func (stmt *mysqlStmt) NumInput() int {
-+ return stmt.paramCount
-+}
-+
-+func (stmt *mysqlStmt) ColumnConverter(idx int) driver.ValueConverter {
-+ return converter{}
-+}
-+
-+func (stmt *mysqlStmt) Exec(args []driver.Value) (driver.Result, error) {
-+ if stmt.mc.closed.IsSet() {
-+ errLog.Print(ErrInvalidConn)
-+ return nil, driver.ErrBadConn
-+ }
-+ // Send command
-+ err := stmt.writeExecutePacket(args)
-+ if err != nil {
-+ return nil, stmt.mc.markBadConn(err)
-+ }
-+
-+ mc := stmt.mc
-+
-+ mc.affectedRows = 0
-+ mc.insertId = 0
-+
-+ // Read Result
-+ resLen, err := mc.readResultSetHeaderPacket()
-+ if err != nil {
-+ return nil, err
-+ }
-+
-+ if resLen > 0 {
-+ // Columns
-+ if err = mc.readUntilEOF(); err != nil {
-+ return nil, err
-+ }
-+
-+ // Rows
-+ if err := mc.readUntilEOF(); err != nil {
-+ return nil, err
-+ }
-+ }
-+
-+ if err := mc.discardResults(); err != nil {
-+ return nil, err
-+ }
-+
-+ return &mysqlResult{
-+ affectedRows: int64(mc.affectedRows),
-+ insertId: int64(mc.insertId),
-+ }, nil
-+}
-+
-+func (stmt *mysqlStmt) Query(args []driver.Value) (driver.Rows, error) {
-+ return stmt.query(args)
-+}
-+
-+func (stmt *mysqlStmt) query(args []driver.Value) (*binaryRows, error) {
-+ if stmt.mc.closed.IsSet() {
-+ errLog.Print(ErrInvalidConn)
-+ return nil, driver.ErrBadConn
-+ }
-+ // Send command
-+ err := stmt.writeExecutePacket(args)
-+ if err != nil {
-+ return nil, stmt.mc.markBadConn(err)
-+ }
-+
-+ mc := stmt.mc
-+
-+ // Read Result
-+ resLen, err := mc.readResultSetHeaderPacket()
-+ if err != nil {
-+ return nil, err
-+ }
-+
-+ rows := new(binaryRows)
-+
-+ if resLen > 0 {
-+ rows.mc = mc
-+ rows.rs.columns, err = mc.readColumns(resLen)
-+ } else {
-+ rows.rs.done = true
-+
-+ switch err := rows.NextResultSet(); err {
-+ case nil, io.EOF:
-+ return rows, nil
-+ default:
-+ return nil, err
-+ }
-+ }
-+
-+ return rows, err
-+}
-+
-+type converter struct{}
-+
-+// ConvertValue mirrors the reference/default converter in database/sql/driver
-+// with _one_ exception. We support uint64 with their high bit and the default
-+// implementation does not. This function should be kept in sync with
-+// database/sql/driver defaultConverter.ConvertValue() except for that
-+// deliberate difference.
-+func (c converter) ConvertValue(v interface{}) (driver.Value, error) {
-+ if driver.IsValue(v) {
-+ return v, nil
-+ }
-+
-+ if vr, ok := v.(driver.Valuer); ok {
-+ sv, err := callValuerValue(vr)
-+ if err != nil {
-+ return nil, err
-+ }
-+ if !driver.IsValue(sv) {
-+ return nil, fmt.Errorf("non-Value type %T returned from Value", sv)
-+ }
-+ return sv, nil
-+ }
-+
-+ rv := reflect.ValueOf(v)
-+ switch rv.Kind() {
-+ case reflect.Ptr:
-+ // indirect pointers
-+ if rv.IsNil() {
-+ return nil, nil
-+ } else {
-+ return c.ConvertValue(rv.Elem().Interface())
-+ }
-+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
-+ return rv.Int(), nil
-+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
-+ return rv.Uint(), nil
-+ case reflect.Float32, reflect.Float64:
-+ return rv.Float(), nil
-+ case reflect.Bool:
-+ return rv.Bool(), nil
-+ case reflect.Slice:
-+ ek := rv.Type().Elem().Kind()
-+ if ek == reflect.Uint8 {
-+ return rv.Bytes(), nil
-+ }
-+ return nil, fmt.Errorf("unsupported type %T, a slice of %s", v, ek)
-+ case reflect.String:
-+ return rv.String(), nil
-+ }
-+ return nil, fmt.Errorf("unsupported type %T, a %s", v, rv.Kind())
-+}
-+
-+var valuerReflectType = reflect.TypeOf((*driver.Valuer)(nil)).Elem()
-+
-+// callValuerValue returns vr.Value(), with one exception:
-+// If vr.Value is an auto-generated method on a pointer type and the
-+// pointer is nil, it would panic at runtime in the panicwrap
-+// method. Treat it like nil instead.
-+//
-+// This is so people can implement driver.Value on value types and
-+// still use nil pointers to those types to mean nil/NULL, just like
-+// string/*string.
-+//
-+// This is an exact copy of the same-named unexported function from the
-+// database/sql package.
-+func callValuerValue(vr driver.Valuer) (v driver.Value, err error) {
-+ if rv := reflect.ValueOf(vr); rv.Kind() == reflect.Ptr &&
-+ rv.IsNil() &&
-+ rv.Type().Elem().Implements(valuerReflectType) {
-+ return nil, nil
-+ }
-+ return vr.Value()
-+}
-diff --git a/vendor/github.com/go-sql-driver/mysql/statement_test.go b/vendor/github.com/go-sql-driver/mysql/statement_test.go
-new file mode 100644
-index 00000000000..4b9914f8ec4
---- /dev/null
-+++ b/vendor/github.com/go-sql-driver/mysql/statement_test.go
-@@ -0,0 +1,126 @@
-+// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
-+//
-+// Copyright 2017 The Go-MySQL-Driver Authors. All rights reserved.
-+//
-+// This Source Code Form is subject to the terms of the Mozilla Public
-+// License, v. 2.0. If a copy of the MPL was not distributed with this file,
-+// You can obtain one at http://mozilla.org/MPL/2.0/.
-+
-+package mysql
-+
-+import (
-+ "bytes"
-+ "testing"
-+)
-+
-+func TestConvertDerivedString(t *testing.T) {
-+ type derived string
-+
-+ output, err := converter{}.ConvertValue(derived("value"))
-+ if err != nil {
-+ t.Fatal("Derived string type not convertible", err)
-+ }
-+
-+ if output != "value" {
-+ t.Fatalf("Derived string type not converted, got %#v %T", output, output)
-+ }
-+}
-+
-+func TestConvertDerivedByteSlice(t *testing.T) {
-+ type derived []uint8
-+
-+ output, err := converter{}.ConvertValue(derived("value"))
-+ if err != nil {
-+ t.Fatal("Byte slice not convertible", err)
-+ }
-+
-+ if bytes.Compare(output.([]byte), []byte("value")) != 0 {
-+ t.Fatalf("Byte slice not converted, got %#v %T", output, output)
-+ }
-+}
-+
-+func TestConvertDerivedUnsupportedSlice(t *testing.T) {
-+ type derived []int
-+
-+ _, err := converter{}.ConvertValue(derived{1})
-+ if err == nil || err.Error() != "unsupported type mysql.derived, a slice of int" {
-+ t.Fatal("Unexpected error", err)
-+ }
-+}
-+
-+func TestConvertDerivedBool(t *testing.T) {
-+ type derived bool
-+
-+ output, err := converter{}.ConvertValue(derived(true))
-+ if err != nil {
-+ t.Fatal("Derived bool type not convertible", err)
-+ }
-+
-+ if output != true {
-+ t.Fatalf("Derived bool type not converted, got %#v %T", output, output)
-+ }
-+}
-+
-+func TestConvertPointer(t *testing.T) {
-+ str := "value"
-+
-+ output, err := converter{}.ConvertValue(&str)
-+ if err != nil {
-+ t.Fatal("Pointer type not convertible", err)
-+ }
-+
-+ if output != "value" {
-+ t.Fatalf("Pointer type not converted, got %#v %T", output, output)
-+ }
-+}
-+
-+func TestConvertSignedIntegers(t *testing.T) {
-+ values := []interface{}{
-+ int8(-42),
-+ int16(-42),
-+ int32(-42),
-+ int64(-42),
-+ int(-42),
-+ }
-+
-+ for _, value := range values {
-+ output, err := converter{}.ConvertValue(value)
-+ if err != nil {
-+ t.Fatalf("%T type not convertible %s", value, err)
-+ }
-+
-+ if output != int64(-42) {
-+ t.Fatalf("%T type not converted, got %#v %T", value, output, output)
-+ }
-+ }
-+}
-+
-+func TestConvertUnsignedIntegers(t *testing.T) {
-+ values := []interface{}{
-+ uint8(42),
-+ uint16(42),
-+ uint32(42),
-+ uint64(42),
-+ uint(42),
-+ }
-+
-+ for _, value := range values {
-+ output, err := converter{}.ConvertValue(value)
-+ if err != nil {
-+ t.Fatalf("%T type not convertible %s", value, err)
-+ }
-+
-+ if output != uint64(42) {
-+ t.Fatalf("%T type not converted, got %#v %T", value, output, output)
-+ }
-+ }
-+
-+ output, err := converter{}.ConvertValue(^uint64(0))
-+ if err != nil {
-+ t.Fatal("uint64 high-bit not convertible", err)
-+ }
-+
-+ if output != ^uint64(0) {
-+ t.Fatalf("uint64 high-bit converted, got %#v %T", output, output)
-+ }
-+}
-diff --git a/vendor/github.com/go-sql-driver/mysql/transaction.go b/vendor/github.com/go-sql-driver/mysql/transaction.go
-new file mode 100644
-index 00000000000..417d72793b1
---- /dev/null
-+++ b/vendor/github.com/go-sql-driver/mysql/transaction.go
-@@ -0,0 +1,31 @@
-+// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
-+//
-+// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved.
-+//
-+// This Source Code Form is subject to the terms of the Mozilla Public
-+// License, v. 2.0. If a copy of the MPL was not distributed with this file,
-+// You can obtain one at http://mozilla.org/MPL/2.0/.
-+
-+package mysql
-+
-+type mysqlTx struct {
-+ mc *mysqlConn
-+}
-+
-+func (tx *mysqlTx) Commit() (err error) {
-+ if tx.mc == nil || tx.mc.closed.IsSet() {
-+ return ErrInvalidConn
-+ }
-+ err = tx.mc.exec("COMMIT")
-+ tx.mc = nil
-+ return
-+}
-+
-+func (tx *mysqlTx) Rollback() (err error) {
-+ if tx.mc == nil || tx.mc.closed.IsSet() {
-+ return ErrInvalidConn
-+ }
-+ err = tx.mc.exec("ROLLBACK")
-+ tx.mc = nil
-+ return
-+}
-diff --git a/vendor/github.com/go-sql-driver/mysql/utils.go b/vendor/github.com/go-sql-driver/mysql/utils.go
-new file mode 100644
-index 00000000000..9552e80b5ac
---- /dev/null
-+++ b/vendor/github.com/go-sql-driver/mysql/utils.go
-@@ -0,0 +1,701 @@
-+// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
-+//
-+// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved.
-+//
-+// This Source Code Form is subject to the terms of the Mozilla Public
-+// License, v. 2.0. If a copy of the MPL was not distributed with this file,
-+// You can obtain one at http://mozilla.org/MPL/2.0/.
-+
-+package mysql
-+
-+import (
-+ "crypto/tls"
-+ "database/sql"
-+ "database/sql/driver"
-+ "encoding/binary"
-+ "errors"
-+ "fmt"
-+ "io"
-+ "strconv"
-+ "strings"
-+ "sync"
-+ "sync/atomic"
-+ "time"
-+)
-+
-+// Registry for custom tls.Configs
-+var (
-+ tlsConfigLock sync.RWMutex
-+ tlsConfigRegistry map[string]*tls.Config
-+)
-+
-+// RegisterTLSConfig registers a custom tls.Config to be used with sql.Open.
-+// Use the key as a value in the DSN where tls=value.
-+//
-+// Note: The provided tls.Config is exclusively owned by the driver after
-+// registering it.
-+//
-+// rootCertPool := x509.NewCertPool()
-+// pem, err := ioutil.ReadFile("/path/ca-cert.pem")
-+// if err != nil {
-+// log.Fatal(err)
-+// }
-+// if ok := rootCertPool.AppendCertsFromPEM(pem); !ok {
-+// log.Fatal("Failed to append PEM.")
-+// }
-+// clientCert := make([]tls.Certificate, 0, 1)
-+// certs, err := tls.LoadX509KeyPair("/path/client-cert.pem", "/path/client-key.pem")
-+// if err != nil {
-+// log.Fatal(err)
-+// }
-+// clientCert = append(clientCert, certs)
-+// mysql.RegisterTLSConfig("custom", &tls.Config{
-+// RootCAs: rootCertPool,
-+// Certificates: clientCert,
-+// })
-+// db, err := sql.Open("mysql", "user@tcp(localhost:3306)/test?tls=custom")
-+//
-+func RegisterTLSConfig(key string, config *tls.Config) error {
-+ if _, isBool := readBool(key); isBool || strings.ToLower(key) == "skip-verify" || strings.ToLower(key) == "preferred" {
-+ return fmt.Errorf("key '%s' is reserved", key)
-+ }
-+
-+ tlsConfigLock.Lock()
-+ if tlsConfigRegistry == nil {
-+ tlsConfigRegistry = make(map[string]*tls.Config)
-+ }
-+
-+ tlsConfigRegistry[key] = config
-+ tlsConfigLock.Unlock()
-+ return nil
-+}
-+
-+// DeregisterTLSConfig removes the tls.Config associated with key.
-+func DeregisterTLSConfig(key string) {
-+ tlsConfigLock.Lock()
-+ if tlsConfigRegistry != nil {
-+ delete(tlsConfigRegistry, key)
-+ }
-+ tlsConfigLock.Unlock()
-+}
-+
-+func getTLSConfigClone(key string) (config *tls.Config) {
-+ tlsConfigLock.RLock()
-+ if v, ok := tlsConfigRegistry[key]; ok {
-+ config = v.Clone()
-+ }
-+ tlsConfigLock.RUnlock()
-+ return
-+}
-+
-+// Returns the bool value of the input.
-+// The 2nd return value indicates if the input was a valid bool value
-+func readBool(input string) (value bool, valid bool) {
-+ switch input {
-+ case "1", "true", "TRUE", "True":
-+ return true, true
-+ case "0", "false", "FALSE", "False":
-+ return false, true
-+ }
-+
-+ // Not a valid bool value
-+ return
-+}
-+
-+/******************************************************************************
-+* Time related utils *
-+******************************************************************************/
-+
-+func parseDateTime(str string, loc *time.Location) (t time.Time, err error) {
-+ base := "0000-00-00 00:00:00.0000000"
-+ switch len(str) {
-+ case 10, 19, 21, 22, 23, 24, 25, 26: // up to "YYYY-MM-DD HH:MM:SS.MMMMMM"
-+ if str == base[:len(str)] {
-+ return
-+ }
-+ t, err = time.Parse(timeFormat[:len(str)], str)
-+ default:
-+ err = fmt.Errorf("invalid time string: %s", str)
-+ return
-+ }
-+
-+ // Adjust location
-+ if err == nil && loc != time.UTC {
-+ y, mo, d := t.Date()
-+ h, mi, s := t.Clock()
-+ t, err = time.Date(y, mo, d, h, mi, s, t.Nanosecond(), loc), nil
-+ }
-+
-+ return
-+}
-+
-+func parseBinaryDateTime(num uint64, data []byte, loc *time.Location) (driver.Value, error) {
-+ switch num {
-+ case 0:
-+ return time.Time{}, nil
-+ case 4:
-+ return time.Date(
-+ int(binary.LittleEndian.Uint16(data[:2])), // year
-+ time.Month(data[2]), // month
-+ int(data[3]), // day
-+ 0, 0, 0, 0,
-+ loc,
-+ ), nil
-+ case 7:
-+ return time.Date(
-+ int(binary.LittleEndian.Uint16(data[:2])), // year
-+ time.Month(data[2]), // month
-+ int(data[3]), // day
-+ int(data[4]), // hour
-+ int(data[5]), // minutes
-+ int(data[6]), // seconds
-+ 0,
-+ loc,
-+ ), nil
-+ case 11:
-+ return time.Date(
-+ int(binary.LittleEndian.Uint16(data[:2])), // year
-+ time.Month(data[2]), // month
-+ int(data[3]), // day
-+ int(data[4]), // hour
-+ int(data[5]), // minutes
-+ int(data[6]), // seconds
-+ int(binary.LittleEndian.Uint32(data[7:11]))*1000, // nanoseconds
-+ loc,
-+ ), nil
-+ }
-+ return nil, fmt.Errorf("invalid DATETIME packet length %d", num)
-+}
-+
-+// zeroDateTime is used in formatBinaryDateTime to avoid an allocation
-+// if the DATE or DATETIME has the zero value.
-+// It must never be changed.
-+// The current behavior depends on database/sql copying the result.
-+var zeroDateTime = []byte("0000-00-00 00:00:00.000000")
-+
-+const digits01 = "0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789"
-+const digits10 = "0000000000111111111122222222223333333333444444444455555555556666666666777777777788888888889999999999"
-+
-+func appendMicrosecs(dst, src []byte, decimals int) []byte {
-+ if decimals <= 0 {
-+ return dst
-+ }
-+ if len(src) == 0 {
-+ return append(dst, ".000000"[:decimals+1]...)
-+ }
-+
-+ microsecs := binary.LittleEndian.Uint32(src[:4])
-+ p1 := byte(microsecs / 10000)
-+ microsecs -= 10000 * uint32(p1)
-+ p2 := byte(microsecs / 100)
-+ microsecs -= 100 * uint32(p2)
-+ p3 := byte(microsecs)
-+
-+ switch decimals {
-+ default:
-+ return append(dst, '.',
-+ digits10[p1], digits01[p1],
-+ digits10[p2], digits01[p2],
-+ digits10[p3], digits01[p3],
-+ )
-+ case 1:
-+ return append(dst, '.',
-+ digits10[p1],
-+ )
-+ case 2:
-+ return append(dst, '.',
-+ digits10[p1], digits01[p1],
-+ )
-+ case 3:
-+ return append(dst, '.',
-+ digits10[p1], digits01[p1],
-+ digits10[p2],
-+ )
-+ case 4:
-+ return append(dst, '.',
-+ digits10[p1], digits01[p1],
-+ digits10[p2], digits01[p2],
-+ )
-+ case 5:
-+ return append(dst, '.',
-+ digits10[p1], digits01[p1],
-+ digits10[p2], digits01[p2],
-+ digits10[p3],
-+ )
-+ }
-+}
-+
-+func formatBinaryDateTime(src []byte, length uint8) (driver.Value, error) {
-+ // length expects the deterministic length of the zero value,
-+ // negative time and 100+ hours are automatically added if needed
-+ if len(src) == 0 {
-+ return zeroDateTime[:length], nil
-+ }
-+ var dst []byte // return value
-+ var p1, p2, p3 byte // current digit pair
-+
-+ switch length {
-+ case 10, 19, 21, 22, 23, 24, 25, 26:
-+ default:
-+ t := "DATE"
-+ if length > 10 {
-+ t += "TIME"
-+ }
-+ return nil, fmt.Errorf("illegal %s length %d", t, length)
-+ }
-+ switch len(src) {
-+ case 4, 7, 11:
-+ default:
-+ t := "DATE"
-+ if length > 10 {
-+ t += "TIME"
-+ }
-+ return nil, fmt.Errorf("illegal %s packet length %d", t, len(src))
-+ }
-+ dst = make([]byte, 0, length)
-+ // start with the date
-+ year := binary.LittleEndian.Uint16(src[:2])
-+ pt := year / 100
-+ p1 = byte(year - 100*uint16(pt))
-+ p2, p3 = src[2], src[3]
-+ dst = append(dst,
-+ digits10[pt], digits01[pt],
-+ digits10[p1], digits01[p1], '-',
-+ digits10[p2], digits01[p2], '-',
-+ digits10[p3], digits01[p3],
-+ )
-+ if length == 10 {
-+ return dst, nil
-+ }
-+ if len(src) == 4 {
-+ return append(dst, zeroDateTime[10:length]...), nil
-+ }
-+ dst = append(dst, ' ')
-+ p1 = src[4] // hour
-+ src = src[5:]
-+
-+ // p1 is 2-digit hour, src is after hour
-+ p2, p3 = src[0], src[1]
-+ dst = append(dst,
-+ digits10[p1], digits01[p1], ':',
-+ digits10[p2], digits01[p2], ':',
-+ digits10[p3], digits01[p3],
-+ )
-+ return appendMicrosecs(dst, src[2:], int(length)-20), nil
-+}
-+
-+func formatBinaryTime(src []byte, length uint8) (driver.Value, error) {
-+ // length expects the deterministic length of the zero value,
-+ // negative time and 100+ hours are automatically added if needed
-+ if len(src) == 0 {
-+ return zeroDateTime[11 : 11+length], nil
-+ }
-+ var dst []byte // return value
-+
-+ switch length {
-+ case
-+ 8, // time (can be up to 10 when negative and 100+ hours)
-+ 10, 11, 12, 13, 14, 15: // time with fractional seconds
-+ default:
-+ return nil, fmt.Errorf("illegal TIME length %d", length)
-+ }
-+ switch len(src) {
-+ case 8, 12:
-+ default:
-+ return nil, fmt.Errorf("invalid TIME packet length %d", len(src))
-+ }
-+ // +2 to enable negative time and 100+ hours
-+ dst = make([]byte, 0, length+2)
-+ if src[0] == 1 {
-+ dst = append(dst, '-')
-+ }
-+ days := binary.LittleEndian.Uint32(src[1:5])
-+ hours := int64(days)*24 + int64(src[5])
-+
-+ if hours >= 100 {
-+ dst = strconv.AppendInt(dst, hours, 10)
-+ } else {
-+ dst = append(dst, digits10[hours], digits01[hours])
-+ }
-+
-+ min, sec := src[6], src[7]
-+ dst = append(dst, ':',
-+ digits10[min], digits01[min], ':',
-+ digits10[sec], digits01[sec],
-+ )
-+ return appendMicrosecs(dst, src[8:], int(length)-9), nil
-+}
-+
-+/******************************************************************************
-+* Convert from and to bytes *
-+******************************************************************************/
-+
-+func uint64ToBytes(n uint64) []byte {
-+ return []byte{
-+ byte(n),
-+ byte(n >> 8),
-+ byte(n >> 16),
-+ byte(n >> 24),
-+ byte(n >> 32),
-+ byte(n >> 40),
-+ byte(n >> 48),
-+ byte(n >> 56),
-+ }
-+}
-+
-+func uint64ToString(n uint64) []byte {
-+ var a [20]byte
-+ i := 20
-+
-+ // U+0030 = 0
-+ // ...
-+ // U+0039 = 9
-+
-+ var q uint64
-+ for n >= 10 {
-+ i--
-+ q = n / 10
-+ a[i] = uint8(n-q*10) + 0x30
-+ n = q
-+ }
-+
-+ i--
-+ a[i] = uint8(n) + 0x30
-+
-+ return a[i:]
-+}
-+
-+// treats string value as unsigned integer representation
-+func stringToInt(b []byte) int {
-+ val := 0
-+ for i := range b {
-+ val *= 10
-+ val += int(b[i] - 0x30)
-+ }
-+ return val
-+}
-+
-+// returns the string read as a bytes slice, wheter the value is NULL,
-+// the number of bytes read and an error, in case the string is longer than
-+// the input slice
-+func readLengthEncodedString(b []byte) ([]byte, bool, int, error) {
-+ // Get length
-+ num, isNull, n := readLengthEncodedInteger(b)
-+ if num < 1 {
-+ return b[n:n], isNull, n, nil
-+ }
-+
-+ n += int(num)
-+
-+ // Check data length
-+ if len(b) >= n {
-+ return b[n-int(num) : n : n], false, n, nil
-+ }
-+ return nil, false, n, io.EOF
-+}
-+
-+// returns the number of bytes skipped and an error, in case the string is
-+// longer than the input slice
-+func skipLengthEncodedString(b []byte) (int, error) {
-+ // Get length
-+ num, _, n := readLengthEncodedInteger(b)
-+ if num < 1 {
-+ return n, nil
-+ }
-+
-+ n += int(num)
-+
-+ // Check data length
-+ if len(b) >= n {
-+ return n, nil
-+ }
-+ return n, io.EOF
-+}
-+
-+// returns the number read, whether the value is NULL and the number of bytes read
-+func readLengthEncodedInteger(b []byte) (uint64, bool, int) {
-+ // See issue #349
-+ if len(b) == 0 {
-+ return 0, true, 1
-+ }
-+
-+ switch b[0] {
-+ // 251: NULL
-+ case 0xfb:
-+ return 0, true, 1
-+
-+ // 252: value of following 2
-+ case 0xfc:
-+ return uint64(b[1]) | uint64(b[2])<<8, false, 3
-+
-+ // 253: value of following 3
-+ case 0xfd:
-+ return uint64(b[1]) | uint64(b[2])<<8 | uint64(b[3])<<16, false, 4
-+
-+ // 254: value of following 8
-+ case 0xfe:
-+ return uint64(b[1]) | uint64(b[2])<<8 | uint64(b[3])<<16 |
-+ uint64(b[4])<<24 | uint64(b[5])<<32 | uint64(b[6])<<40 |
-+ uint64(b[7])<<48 | uint64(b[8])<<56,
-+ false, 9
-+ }
-+
-+ // 0-250: value of first byte
-+ return uint64(b[0]), false, 1
-+}
-+
-+// encodes a uint64 value and appends it to the given bytes slice
-+func appendLengthEncodedInteger(b []byte, n uint64) []byte {
-+ switch {
-+ case n <= 250:
-+ return append(b, byte(n))
-+
-+ case n <= 0xffff:
-+ return append(b, 0xfc, byte(n), byte(n>>8))
-+
-+ case n <= 0xffffff:
-+ return append(b, 0xfd, byte(n), byte(n>>8), byte(n>>16))
-+ }
-+ return append(b, 0xfe, byte(n), byte(n>>8), byte(n>>16), byte(n>>24),
-+ byte(n>>32), byte(n>>40), byte(n>>48), byte(n>>56))
-+}
-+
-+// reserveBuffer checks cap(buf) and expand buffer to len(buf) + appendSize.
-+// If cap(buf) is not enough, reallocate new buffer.
-+func reserveBuffer(buf []byte, appendSize int) []byte {
-+ newSize := len(buf) + appendSize
-+ if cap(buf) < newSize {
-+ // Grow buffer exponentially
-+ newBuf := make([]byte, len(buf)*2+appendSize)
-+ copy(newBuf, buf)
-+ buf = newBuf
-+ }
-+ return buf[:newSize]
-+}
-+
-+// escapeBytesBackslash escapes []byte with backslashes (\)
-+// This escapes the contents of a string (provided as []byte) by adding backslashes before special
-+// characters, and turning others into specific escape sequences, such as
-+// turning newlines into \n and null bytes into \0.
-+// https://github.com/mysql/mysql-server/blob/mysql-5.7.5/mysys/charset.c#L823-L932
-+func escapeBytesBackslash(buf, v []byte) []byte {
-+ pos := len(buf)
-+ buf = reserveBuffer(buf, len(v)*2)
-+
-+ for _, c := range v {
-+ switch c {
-+ case '\x00':
-+ buf[pos] = '\\'
-+ buf[pos+1] = '0'
-+ pos += 2
-+ case '\n':
-+ buf[pos] = '\\'
-+ buf[pos+1] = 'n'
-+ pos += 2
-+ case '\r':
-+ buf[pos] = '\\'
-+ buf[pos+1] = 'r'
-+ pos += 2
-+ case '\x1a':
-+ buf[pos] = '\\'
-+ buf[pos+1] = 'Z'
-+ pos += 2
-+ case '\'':
-+ buf[pos] = '\\'
-+ buf[pos+1] = '\''
-+ pos += 2
-+ case '"':
-+ buf[pos] = '\\'
-+ buf[pos+1] = '"'
-+ pos += 2
-+ case '\\':
-+ buf[pos] = '\\'
-+ buf[pos+1] = '\\'
-+ pos += 2
-+ default:
-+ buf[pos] = c
-+ pos++
-+ }
-+ }
-+
-+ return buf[:pos]
-+}
-+
-+// escapeStringBackslash is similar to escapeBytesBackslash but for string.
-+func escapeStringBackslash(buf []byte, v string) []byte {
-+ pos := len(buf)
-+ buf = reserveBuffer(buf, len(v)*2)
-+
-+ for i := 0; i < len(v); i++ {
-+ c := v[i]
-+ switch c {
-+ case '\x00':
-+ buf[pos] = '\\'
-+ buf[pos+1] = '0'
-+ pos += 2
-+ case '\n':
-+ buf[pos] = '\\'
-+ buf[pos+1] = 'n'
-+ pos += 2
-+ case '\r':
-+ buf[pos] = '\\'
-+ buf[pos+1] = 'r'
-+ pos += 2
-+ case '\x1a':
-+ buf[pos] = '\\'
-+ buf[pos+1] = 'Z'
-+ pos += 2
-+ case '\'':
-+ buf[pos] = '\\'
-+ buf[pos+1] = '\''
-+ pos += 2
-+ case '"':
-+ buf[pos] = '\\'
-+ buf[pos+1] = '"'
-+ pos += 2
-+ case '\\':
-+ buf[pos] = '\\'
-+ buf[pos+1] = '\\'
-+ pos += 2
-+ default:
-+ buf[pos] = c
-+ pos++
-+ }
-+ }
-+
-+ return buf[:pos]
-+}
-+
-+// escapeBytesQuotes escapes apostrophes in []byte by doubling them up.
-+// This escapes the contents of a string by doubling up any apostrophes that
-+// it contains. This is used when the NO_BACKSLASH_ESCAPES SQL_MODE is in
-+// effect on the server.
-+// https://github.com/mysql/mysql-server/blob/mysql-5.7.5/mysys/charset.c#L963-L1038
-+func escapeBytesQuotes(buf, v []byte) []byte {
-+ pos := len(buf)
-+ buf = reserveBuffer(buf, len(v)*2)
-+
-+ for _, c := range v {
-+ if c == '\'' {
-+ buf[pos] = '\''
-+ buf[pos+1] = '\''
-+ pos += 2
-+ } else {
-+ buf[pos] = c
-+ pos++
-+ }
-+ }
-+
-+ return buf[:pos]
-+}
-+
-+// escapeStringQuotes is similar to escapeBytesQuotes but for string.
-+func escapeStringQuotes(buf []byte, v string) []byte {
-+ pos := len(buf)
-+ buf = reserveBuffer(buf, len(v)*2)
-+
-+ for i := 0; i < len(v); i++ {
-+ c := v[i]
-+ if c == '\'' {
-+ buf[pos] = '\''
-+ buf[pos+1] = '\''
-+ pos += 2
-+ } else {
-+ buf[pos] = c
-+ pos++
-+ }
-+ }
-+
-+ return buf[:pos]
-+}
-+
-+/******************************************************************************
-+* Sync utils *
-+******************************************************************************/
-+
-+// noCopy may be embedded into structs which must not be copied
-+// after the first use.
-+//
-+// See https://github.com/golang/go/issues/8005#issuecomment-190753527
-+// for details.
-+type noCopy struct{}
-+
-+// Lock is a no-op used by -copylocks checker from `go vet`.
-+func (*noCopy) Lock() {}
-+
-+// atomicBool is a wrapper around uint32 for usage as a boolean value with
-+// atomic access.
-+type atomicBool struct {
-+ _noCopy noCopy
-+ value uint32
-+}
-+
-+// IsSet returns whether the current boolean value is true
-+func (ab *atomicBool) IsSet() bool {
-+ return atomic.LoadUint32(&ab.value) > 0
-+}
-+
-+// Set sets the value of the bool regardless of the previous value
-+func (ab *atomicBool) Set(value bool) {
-+ if value {
-+ atomic.StoreUint32(&ab.value, 1)
-+ } else {
-+ atomic.StoreUint32(&ab.value, 0)
-+ }
-+}
-+
-+// TrySet sets the value of the bool and returns whether the value changed
-+func (ab *atomicBool) TrySet(value bool) bool {
-+ if value {
-+ return atomic.SwapUint32(&ab.value, 1) == 0
-+ }
-+ return atomic.SwapUint32(&ab.value, 0) > 0
-+}
-+
-+// atomicError is a wrapper for atomically accessed error values
-+type atomicError struct {
-+ _noCopy noCopy
-+ value atomic.Value
-+}
-+
-+// Set sets the error value regardless of the previous value.
-+// The value must not be nil
-+func (ae *atomicError) Set(value error) {
-+ ae.value.Store(value)
-+}
-+
-+// Value returns the current error value
-+func (ae *atomicError) Value() error {
-+ if v := ae.value.Load(); v != nil {
-+ // this will panic if the value doesn't implement the error interface
-+ return v.(error)
-+ }
-+ return nil
-+}
-+
-+func namedValueToValue(named []driver.NamedValue) ([]driver.Value, error) {
-+ dargs := make([]driver.Value, len(named))
-+ for n, param := range named {
-+ if len(param.Name) > 0 {
-+ // TODO: support the use of Named Parameters #561
-+ return nil, errors.New("mysql: driver does not support the use of Named Parameters")
-+ }
-+ dargs[n] = param.Value
-+ }
-+ return dargs, nil
-+}
-+
-+func mapIsolationLevel(level driver.IsolationLevel) (string, error) {
-+ switch sql.IsolationLevel(level) {
-+ case sql.LevelRepeatableRead:
-+ return "REPEATABLE READ", nil
-+ case sql.LevelReadCommitted:
-+ return "READ COMMITTED", nil
-+ case sql.LevelReadUncommitted:
-+ return "READ UNCOMMITTED", nil
-+ case sql.LevelSerializable:
-+ return "SERIALIZABLE", nil
-+ default:
-+ return "", fmt.Errorf("mysql: unsupported isolation level: %v", level)
-+ }
-+}
-diff --git a/vendor/github.com/go-sql-driver/mysql/utils_test.go b/vendor/github.com/go-sql-driver/mysql/utils_test.go
-new file mode 100644
-index 00000000000..10a60c2d0b0
---- /dev/null
-+++ b/vendor/github.com/go-sql-driver/mysql/utils_test.go
-@@ -0,0 +1,293 @@
-+// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
-+//
-+// Copyright 2013 The Go-MySQL-Driver Authors. All rights reserved.
-+//
-+// This Source Code Form is subject to the terms of the Mozilla Public
-+// License, v. 2.0. If a copy of the MPL was not distributed with this file,
-+// You can obtain one at http://mozilla.org/MPL/2.0/.
-+
-+package mysql
-+
-+import (
-+ "bytes"
-+ "database/sql"
-+ "database/sql/driver"
-+ "encoding/binary"
-+ "testing"
-+)
-+
-+func TestLengthEncodedInteger(t *testing.T) {
-+ var integerTests = []struct {
-+ num uint64
-+ encoded []byte
-+ }{
-+ {0x0000000000000000, []byte{0x00}},
-+ {0x0000000000000012, []byte{0x12}},
-+ {0x00000000000000fa, []byte{0xfa}},
-+ {0x0000000000000100, []byte{0xfc, 0x00, 0x01}},
-+ {0x0000000000001234, []byte{0xfc, 0x34, 0x12}},
-+ {0x000000000000ffff, []byte{0xfc, 0xff, 0xff}},
-+ {0x0000000000010000, []byte{0xfd, 0x00, 0x00, 0x01}},
-+ {0x0000000000123456, []byte{0xfd, 0x56, 0x34, 0x12}},
-+ {0x0000000000ffffff, []byte{0xfd, 0xff, 0xff, 0xff}},
-+ {0x0000000001000000, []byte{0xfe, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00}},
-+ {0x123456789abcdef0, []byte{0xfe, 0xf0, 0xde, 0xbc, 0x9a, 0x78, 0x56, 0x34, 0x12}},
-+ {0xffffffffffffffff, []byte{0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}},
-+ }
-+
-+ for _, tst := range integerTests {
-+ num, isNull, numLen := readLengthEncodedInteger(tst.encoded)
-+ if isNull {
-+ t.Errorf("%x: expected %d, got NULL", tst.encoded, tst.num)
-+ }
-+ if num != tst.num {
-+ t.Errorf("%x: expected %d, got %d", tst.encoded, tst.num, num)
-+ }
-+ if numLen != len(tst.encoded) {
-+ t.Errorf("%x: expected size %d, got %d", tst.encoded, len(tst.encoded), numLen)
-+ }
-+ encoded := appendLengthEncodedInteger(nil, num)
-+ if !bytes.Equal(encoded, tst.encoded) {
-+ t.Errorf("%v: expected %x, got %x", num, tst.encoded, encoded)
-+ }
-+ }
-+}
-+
-+func TestFormatBinaryDateTime(t *testing.T) {
-+ rawDate := [11]byte{}
-+ binary.LittleEndian.PutUint16(rawDate[:2], 1978) // years
-+ rawDate[2] = 12 // months
-+ rawDate[3] = 30 // days
-+ rawDate[4] = 15 // hours
-+ rawDate[5] = 46 // minutes
-+ rawDate[6] = 23 // seconds
-+ binary.LittleEndian.PutUint32(rawDate[7:], 987654) // microseconds
-+ expect := func(expected string, inlen, outlen uint8) {
-+ actual, _ := formatBinaryDateTime(rawDate[:inlen], outlen)
-+ bytes, ok := actual.([]byte)
-+ if !ok {
-+ t.Errorf("formatBinaryDateTime must return []byte, was %T", actual)
-+ }
-+ if string(bytes) != expected {
-+ t.Errorf(
-+ "expected %q, got %q for length in %d, out %d",
-+ expected, actual, inlen, outlen,
-+ )
-+ }
-+ }
-+ expect("0000-00-00", 0, 10)
-+ expect("0000-00-00 00:00:00", 0, 19)
-+ expect("1978-12-30", 4, 10)
-+ expect("1978-12-30 15:46:23", 7, 19)
-+ expect("1978-12-30 15:46:23.987654", 11, 26)
-+}
-+
-+func TestFormatBinaryTime(t *testing.T) {
-+ expect := func(expected string, src []byte, outlen uint8) {
-+ actual, _ := formatBinaryTime(src, outlen)
-+ bytes, ok := actual.([]byte)
-+ if !ok {
-+ t.Errorf("formatBinaryDateTime must return []byte, was %T", actual)
-+ }
-+ if string(bytes) != expected {
-+ t.Errorf(
-+ "expected %q, got %q for src=%q and outlen=%d",
-+ expected, actual, src, outlen)
-+ }
-+ }
-+
-+ // binary format:
-+ // sign (0: positive, 1: negative), days(4), hours, minutes, seconds, micro(4)
-+
-+ // Zeros
-+ expect("00:00:00", []byte{}, 8)
-+ expect("00:00:00.0", []byte{}, 10)
-+ expect("00:00:00.000000", []byte{}, 15)
-+
-+ // Without micro(4)
-+ expect("12:34:56", []byte{0, 0, 0, 0, 0, 12, 34, 56}, 8)
-+ expect("-12:34:56", []byte{1, 0, 0, 0, 0, 12, 34, 56}, 8)
-+ expect("12:34:56.00", []byte{0, 0, 0, 0, 0, 12, 34, 56}, 11)
-+ expect("24:34:56", []byte{0, 1, 0, 0, 0, 0, 34, 56}, 8)
-+ expect("-99:34:56", []byte{1, 4, 0, 0, 0, 3, 34, 56}, 8)
-+ expect("103079215103:34:56", []byte{0, 255, 255, 255, 255, 23, 34, 56}, 8)
-+
-+ // With micro(4)
-+ expect("12:34:56.00", []byte{0, 0, 0, 0, 0, 12, 34, 56, 99, 0, 0, 0}, 11)
-+ expect("12:34:56.000099", []byte{0, 0, 0, 0, 0, 12, 34, 56, 99, 0, 0, 0}, 15)
-+}
-+
-+func TestEscapeBackslash(t *testing.T) {
-+ expect := func(expected, value string) {
-+ actual := string(escapeBytesBackslash([]byte{}, []byte(value)))
-+ if actual != expected {
-+ t.Errorf(
-+ "expected %s, got %s",
-+ expected, actual,
-+ )
-+ }
-+
-+ actual = string(escapeStringBackslash([]byte{}, value))
-+ if actual != expected {
-+ t.Errorf(
-+ "expected %s, got %s",
-+ expected, actual,
-+ )
-+ }
-+ }
-+
-+ expect("foo\\0bar", "foo\x00bar")
-+ expect("foo\\nbar", "foo\nbar")
-+ expect("foo\\rbar", "foo\rbar")
-+ expect("foo\\Zbar", "foo\x1abar")
-+ expect("foo\\\"bar", "foo\"bar")
-+ expect("foo\\\\bar", "foo\\bar")
-+ expect("foo\\'bar", "foo'bar")
-+}
-+
-+func TestEscapeQuotes(t *testing.T) {
-+ expect := func(expected, value string) {
-+ actual := string(escapeBytesQuotes([]byte{}, []byte(value)))
-+ if actual != expected {
-+ t.Errorf(
-+ "expected %s, got %s",
-+ expected, actual,
-+ )
-+ }
-+
-+ actual = string(escapeStringQuotes([]byte{}, value))
-+ if actual != expected {
-+ t.Errorf(
-+ "expected %s, got %s",
-+ expected, actual,
-+ )
-+ }
-+ }
-+
-+ expect("foo\x00bar", "foo\x00bar") // not affected
-+ expect("foo\nbar", "foo\nbar") // not affected
-+ expect("foo\rbar", "foo\rbar") // not affected
-+ expect("foo\x1abar", "foo\x1abar") // not affected
-+ expect("foo''bar", "foo'bar") // affected
-+ expect("foo\"bar", "foo\"bar") // not affected
-+}
-+
-+func TestAtomicBool(t *testing.T) {
-+ var ab atomicBool
-+ if ab.IsSet() {
-+ t.Fatal("Expected value to be false")
-+ }
-+
-+ ab.Set(true)
-+ if ab.value != 1 {
-+ t.Fatal("Set(true) did not set value to 1")
-+ }
-+ if !ab.IsSet() {
-+ t.Fatal("Expected value to be true")
-+ }
-+
-+ ab.Set(true)
-+ if !ab.IsSet() {
-+ t.Fatal("Expected value to be true")
-+ }
-+
-+ ab.Set(false)
-+ if ab.value != 0 {
-+ t.Fatal("Set(false) did not set value to 0")
-+ }
-+ if ab.IsSet() {
-+ t.Fatal("Expected value to be false")
-+ }
-+
-+ ab.Set(false)
-+ if ab.IsSet() {
-+ t.Fatal("Expected value to be false")
-+ }
-+ if ab.TrySet(false) {
-+ t.Fatal("Expected TrySet(false) to fail")
-+ }
-+ if !ab.TrySet(true) {
-+ t.Fatal("Expected TrySet(true) to succeed")
-+ }
-+ if !ab.IsSet() {
-+ t.Fatal("Expected value to be true")
-+ }
-+
-+ ab.Set(true)
-+ if !ab.IsSet() {
-+ t.Fatal("Expected value to be true")
-+ }
-+ if ab.TrySet(true) {
-+ t.Fatal("Expected TrySet(true) to fail")
-+ }
-+ if !ab.TrySet(false) {
-+ t.Fatal("Expected TrySet(false) to succeed")
-+ }
-+ if ab.IsSet() {
-+ t.Fatal("Expected value to be false")
-+ }
-+
-+ ab._noCopy.Lock() // we've "tested" it ¯\_(ツ)_/¯
-+}
-+
-+func TestAtomicError(t *testing.T) {
-+ var ae atomicError
-+ if ae.Value() != nil {
-+ t.Fatal("Expected value to be nil")
-+ }
-+
-+ ae.Set(ErrMalformPkt)
-+ if v := ae.Value(); v != ErrMalformPkt {
-+ if v == nil {
-+ t.Fatal("Value is still nil")
-+ }
-+ t.Fatal("Error did not match")
-+ }
-+ ae.Set(ErrPktSync)
-+ if ae.Value() == ErrMalformPkt {
-+ t.Fatal("Error still matches old error")
-+ }
-+ if v := ae.Value(); v != ErrPktSync {
-+ t.Fatal("Error did not match")
-+ }
-+}
-+
-+func TestIsolationLevelMapping(t *testing.T) {
-+ data := []struct {
-+ level driver.IsolationLevel
-+ expected string
-+ }{
-+ {
-+ level: driver.IsolationLevel(sql.LevelReadCommitted),
-+ expected: "READ COMMITTED",
-+ },
-+ {
-+ level: driver.IsolationLevel(sql.LevelRepeatableRead),
-+ expected: "REPEATABLE READ",
-+ },
-+ {
-+ level: driver.IsolationLevel(sql.LevelReadUncommitted),
-+ expected: "READ UNCOMMITTED",
-+ },
-+ {
-+ level: driver.IsolationLevel(sql.LevelSerializable),
-+ expected: "SERIALIZABLE",
-+ },
-+ }
-+
-+ for i, td := range data {
-+ if actual, err := mapIsolationLevel(td.level); actual != td.expected || err != nil {
-+ t.Fatal(i, td.expected, actual, err)
-+ }
-+ }
-+
-+ // check unsupported mapping
-+ expectedErr := "mysql: unsupported isolation level: 7"
-+ actual, err := mapIsolationLevel(driver.IsolationLevel(sql.LevelLinearizable))
-+ if actual != "" || err == nil {
-+ t.Fatal("Expected error on unsupported isolation level")
-+ }
-+ if err.Error() != expectedErr {
-+ t.Fatalf("Expected error to be %q, got %q", expectedErr, err)
-+ }
-+}
-diff --git a/vendor/github.com/gxed/GoEndian/.gitignore b/vendor/github.com/gxed/GoEndian/.gitignore
-new file mode 100644
-index 00000000000..836562412fe
---- /dev/null
-+++ b/vendor/github.com/gxed/GoEndian/.gitignore
-@@ -0,0 +1,23 @@
-+# Compiled Object files, Static and Dynamic libs (Shared Objects)
-+*.o
-+*.a
-+*.so
-+
-+# Folders
-+_obj
-+_test
-+
-+# Architecture specific extensions/prefixes
-+*.[568vq]
-+[568vq].out
-+
-+*.cgo1.go
-+*.cgo2.c
-+_cgo_defun.c
-+_cgo_gotypes.go
-+_cgo_export.*
-+
-+_testmain.go
-+
-+*.exe
-+*.test
-diff --git a/vendor/github.com/gxed/GoEndian/.gx/lastpubver b/vendor/github.com/gxed/GoEndian/.gx/lastpubver
-new file mode 100644
-index 00000000000..35d60ca8e5a
---- /dev/null
-+++ b/vendor/github.com/gxed/GoEndian/.gx/lastpubver
-@@ -0,0 +1 @@
-+1.0.0: QmeFPvhFJGXGiXAc9zunNxZjCaWgYQpcsdwip2NWLcccyw
-diff --git a/vendor/github.com/gxed/GoEndian/LICENSE b/vendor/github.com/gxed/GoEndian/LICENSE
-new file mode 100644
-index 00000000000..ad410e11302
---- /dev/null
-+++ b/vendor/github.com/gxed/GoEndian/LICENSE
-@@ -0,0 +1,201 @@
-+Apache License
-+ Version 2.0, January 2004
-+ http://www.apache.org/licenses/
-+
-+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-+
-+ 1. Definitions.
-+
-+ "License" shall mean the terms and conditions for use, reproduction,
-+ and distribution as defined by Sections 1 through 9 of this document.
-+
-+ "Licensor" shall mean the copyright owner or entity authorized by
-+ the copyright owner that is granting the License.
-+
-+ "Legal Entity" shall mean the union of the acting entity and all
-+ other entities that control, are controlled by, or are under common
-+ control with that entity. For the purposes of this definition,
-+ "control" means (i) the power, direct or indirect, to cause the
-+ direction or management of such entity, whether by contract or
-+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
-+ outstanding shares, or (iii) beneficial ownership of such entity.
-+
-+ "You" (or "Your") shall mean an individual or Legal Entity
-+ exercising permissions granted by this License.
-+
-+ "Source" form shall mean the preferred form for making modifications,
-+ including but not limited to software source code, documentation
-+ source, and configuration files.
-+
-+ "Object" form shall mean any form resulting from mechanical
-+ transformation or translation of a Source form, including but
-+ not limited to compiled object code, generated documentation,
-+ and conversions to other media types.
-+
-+ "Work" shall mean the work of authorship, whether in Source or
-+ Object form, made available under the License, as indicated by a
-+ copyright notice that is included in or attached to the work
-+ (an example is provided in the Appendix below).
-+
-+ "Derivative Works" shall mean any work, whether in Source or Object
-+ form, that is based on (or derived from) the Work and for which the
-+ editorial revisions, annotations, elaborations, or other modifications
-+ represent, as a whole, an original work of authorship. For the purposes
-+ of this License, Derivative Works shall not include works that remain
-+ separable from, or merely link (or bind by name) to the interfaces of,
-+ the Work and Derivative Works thereof.
-+
-+ "Contribution" shall mean any work of authorship, including
-+ the original version of the Work and any modifications or additions
-+ to that Work or Derivative Works thereof, that is intentionally
-+ submitted to Licensor for inclusion in the Work by the copyright owner
-+ or by an individual or Legal Entity authorized to submit on behalf of
-+ the copyright owner. For the purposes of this definition, "submitted"
-+ means any form of electronic, verbal, or written communication sent
-+ to the Licensor or its representatives, including but not limited to
-+ communication on electronic mailing lists, source code control systems,
-+ and issue tracking systems that are managed by, or on behalf of, the
-+ Licensor for the purpose of discussing and improving the Work, but
-+ excluding communication that is conspicuously marked or otherwise
-+ designated in writing by the copyright owner as "Not a Contribution."
-+
-+ "Contributor" shall mean Licensor and any individual or Legal Entity
-+ on behalf of whom a Contribution has been received by Licensor and
-+ subsequently incorporated within the Work.
-+
-+ 2. Grant of Copyright License. Subject to the terms and conditions of
-+ this License, each Contributor hereby grants to You a perpetual,
-+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
-+ copyright license to reproduce, prepare Derivative Works of,
-+ publicly display, publicly perform, sublicense, and distribute the
-+ Work and such Derivative Works in Source or Object form.
-+
-+ 3. Grant of Patent License. Subject to the terms and conditions of
-+ this License, each Contributor hereby grants to You a perpetual,
-+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
-+ (except as stated in this section) patent license to make, have made,
-+ use, offer to sell, sell, import, and otherwise transfer the Work,
-+ where such license applies only to those patent claims licensable
-+ by such Contributor that are necessarily infringed by their
-+ Contribution(s) alone or by combination of their Contribution(s)
-+ with the Work to which such Contribution(s) was submitted. If You
-+ institute patent litigation against any entity (including a
-+ cross-claim or counterclaim in a lawsuit) alleging that the Work
-+ or a Contribution incorporated within the Work constitutes direct
-+ or contributory patent infringement, then any patent licenses
-+ granted to You under this License for that Work shall terminate
-+ as of the date such litigation is filed.
-+
-+ 4. Redistribution. You may reproduce and distribute copies of the
-+ Work or Derivative Works thereof in any medium, with or without
-+ modifications, and in Source or Object form, provided that You
-+ meet the following conditions:
-+
-+ (a) You must give any other recipients of the Work or
-+ Derivative Works a copy of this License; and
-+
-+ (b) You must cause any modified files to carry prominent notices
-+ stating that You changed the files; and
-+
-+ (c) You must retain, in the Source form of any Derivative Works
-+ that You distribute, all copyright, patent, trademark, and
-+ attribution notices from the Source form of the Work,
-+ excluding those notices that do not pertain to any part of
-+ the Derivative Works; and
-+
-+ (d) If the Work includes a "NOTICE" text file as part of its
-+ distribution, then any Derivative Works that You distribute must
-+ include a readable copy of the attribution notices contained
-+ within such NOTICE file, excluding those notices that do not
-+ pertain to any part of the Derivative Works, in at least one
-+ of the following places: within a NOTICE text file distributed
-+ as part of the Derivative Works; within the Source form or
-+ documentation, if provided along with the Derivative Works; or,
-+ within a display generated by the Derivative Works, if and
-+ wherever such third-party notices normally appear. The contents
-+ of the NOTICE file are for informational purposes only and
-+ do not modify the License. You may add Your own attribution
-+ notices within Derivative Works that You distribute, alongside
-+ or as an addendum to the NOTICE text from the Work, provided
-+ that such additional attribution notices cannot be construed
-+ as modifying the License.
-+
-+ You may add Your own copyright statement to Your modifications and
-+ may provide additional or different license terms and conditions
-+ for use, reproduction, or distribution of Your modifications, or
-+ for any such Derivative Works as a whole, provided Your use,
-+ reproduction, and distribution of the Work otherwise complies with
-+ the conditions stated in this License.
-+
-+ 5. Submission of Contributions. Unless You explicitly state otherwise,
-+ any Contribution intentionally submitted for inclusion in the Work
-+ by You to the Licensor shall be under the terms and conditions of
-+ this License, without any additional terms or conditions.
-+ Notwithstanding the above, nothing herein shall supersede or modify
-+ the terms of any separate license agreement you may have executed
-+ with Licensor regarding such Contributions.
-+
-+ 6. Trademarks. This License does not grant permission to use the trade
-+ names, trademarks, service marks, or product names of the Licensor,
-+ except as required for reasonable and customary use in describing the
-+ origin of the Work and reproducing the content of the NOTICE file.
-+
-+ 7. Disclaimer of Warranty. Unless required by applicable law or
-+ agreed to in writing, Licensor provides the Work (and each
-+ Contributor provides its Contributions) on an "AS IS" BASIS,
-+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-+ implied, including, without limitation, any warranties or conditions
-+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
-+ PARTICULAR PURPOSE. You are solely responsible for determining the
-+ appropriateness of using or redistributing the Work and assume any
-+ risks associated with Your exercise of permissions under this License.
-+
-+ 8. Limitation of Liability. In no event and under no legal theory,
-+ whether in tort (including negligence), contract, or otherwise,
-+ unless required by applicable law (such as deliberate and grossly
-+ negligent acts) or agreed to in writing, shall any Contributor be
-+ liable to You for damages, including any direct, indirect, special,
-+ incidental, or consequential damages of any character arising as a
-+ result of this License or out of the use or inability to use the
-+ Work (including but not limited to damages for loss of goodwill,
-+ work stoppage, computer failure or malfunction, or any and all
-+ other commercial damages or losses), even if such Contributor
-+ has been advised of the possibility of such damages.
-+
-+ 9. Accepting Warranty or Additional Liability. While redistributing
-+ the Work or Derivative Works thereof, You may choose to offer,
-+ and charge a fee for, acceptance of support, warranty, indemnity,
-+ or other liability obligations and/or rights consistent with this
-+ License. However, in accepting such obligations, You may act only
-+ on Your own behalf and on Your sole responsibility, not on behalf
-+ of any other Contributor, and only if You agree to indemnify,
-+ defend, and hold each Contributor harmless for any liability
-+ incurred by, or claims asserted against, such Contributor by reason
-+ of your accepting any such warranty or additional liability.
-+
-+ END OF TERMS AND CONDITIONS
-+
-+ APPENDIX: How to apply the Apache License to your work.
-+
-+ To apply the Apache License to your work, attach the following
-+ boilerplate notice, with the fields enclosed by brackets "{}"
-+ replaced with your own identifying information. (Don't include
-+ the brackets!) The text should be enclosed in the appropriate
-+ comment syntax for the file format. We also recommend that a
-+ file or class name and description of purpose be included on the
-+ same "printed page" as the copyright notice for easier
-+ identification within third-party archives.
-+
-+ Copyright {yyyy} {name of copyright owner}
-+
-+ Licensed under the Apache License, Version 2.0 (the "License");
-+ you may not use this file except in compliance with the License.
-+ You may obtain a copy of the License at
-+
-+ http://www.apache.org/licenses/LICENSE-2.0
-+
-+ Unless required by applicable law or agreed to in writing, software
-+ distributed under the License is distributed on an "AS IS" BASIS,
-+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-+ See the License for the specific language governing permissions and
-+ limitations under the License.
-\ No newline at end of file
-diff --git a/vendor/github.com/gxed/GoEndian/README.md b/vendor/github.com/gxed/GoEndian/README.md
-new file mode 100644
-index 00000000000..210eb6f6baf
---- /dev/null
-+++ b/vendor/github.com/gxed/GoEndian/README.md
-@@ -0,0 +1,63 @@
-+GoEndian
-+========
-+
-+A tool to detect byte order for golang.
-+
-+A sample test code :
-+
-+```
-+package main
-+
-+import (
-+ "encoding/binary"
-+ "fmt"
-+ "github.com/virtao/GoEndian"
-+)
-+
-+func main() {
-+ printEndian()
-+ useEndian()
-+}
-+
-+func printEndian() {
-+ fmt.Println("Machine byte order : ")
-+ if endian.IsBigEndian() {
-+ fmt.Println("Big Endian")
-+ } else {
-+ fmt.Println("Little Endian")
-+ }
-+}
-+
-+func useEndian() {
-+ var iTest int32 = 0x12345678
-+ var bTest []byte = make([]byte, 4)
-+ fmt.Println("Int32 to Bytes : ")
-+
-+ fmt.Println("0x12345678 to current endian : ")
-+ endian.Endian.PutUint32(bTest, uint32(iTest))
-+ fmt.Println(bTest)
-+
-+ fmt.Println("0x12345678 to big endian : ")
-+ binary.BigEndian.PutUint32(bTest, uint32(iTest))
-+ fmt.Println(bTest)
-+
-+ fmt.Println("0x12345678 to little endian : ")
-+ binary.LittleEndian.PutUint32(bTest, uint32(iTest))
-+ fmt.Println(bTest)
-+
-+}
-+```
-+
-+The result output:
-+
-+```
-+ Machine byte order :
-+ Little Endian
-+ Int32 to Bytes :
-+ 0x12345678 to current endian :
-+ [120 86 52 18]
-+ 0x12345678 to big endian :
-+ [18 52 86 120]
-+ 0x12345678 to little endian :
-+ [120 86 52 18]
-+```
-\ No newline at end of file
-diff --git a/vendor/github.com/gxed/GoEndian/endian.go b/vendor/github.com/gxed/GoEndian/endian.go
-new file mode 100644
-index 00000000000..eb5d1bfab8e
---- /dev/null
-+++ b/vendor/github.com/gxed/GoEndian/endian.go
-@@ -0,0 +1,43 @@
-+package endian
-+
-+import (
-+ "encoding/binary"
-+ "unsafe"
-+)
-+
-+//保存机器大小端
-+var Endian binary.ByteOrder
-+var bigEndian bool
-+
-+func IsBigEndian() bool {
-+ return bigEndian
-+}
-+
-+func IsLittleEndian() bool {
-+ return !bigEndian
-+}
-+
-+func init() {
-+ if getEndian() {
-+ Endian = binary.BigEndian
-+ bigEndian = true
-+ } else {
-+ Endian = binary.LittleEndian
-+ bigEndian = false
-+ }
-+}
-+
-+//以下代码判断机器大小端
-+const INT_SIZE int = int(unsafe.Sizeof(0))
-+
-+//true = big endian, false = little endian
-+func getEndian() (ret bool) {
-+ var i int = 0x1
-+ bs := (*[INT_SIZE]byte)(unsafe.Pointer(&i))
-+ if bs[0] == 0 {
-+ return true
-+ } else {
-+ return false
-+ }
-+
-+}
-diff --git a/vendor/github.com/gxed/GoEndian/package.json b/vendor/github.com/gxed/GoEndian/package.json
-new file mode 100644
-index 00000000000..b36d1e5dca2
---- /dev/null
-+++ b/vendor/github.com/gxed/GoEndian/package.json
-@@ -0,0 +1,15 @@
-+{
-+ "author": "gxed",
-+ "bugs": {
-+ "URL": "https://github.com/gxed/GoEndian/issues",
-+ "url": "https://github.com/gxed/GoEndian/issues"
-+ },
-+ "gx": {
-+ "dvcsimport": "github.com/gxed/GoEndian"
-+ },
-+ "gxVersion": "0.9.0",
-+ "language": "go",
-+ "license": "",
-+ "name": "GoEndian",
-+ "version": "1.0.0"
-+}
-diff --git a/vendor/github.com/lib/pq/.gitignore b/vendor/github.com/lib/pq/.gitignore
-new file mode 100644
-index 00000000000..0f1d00e1196
---- /dev/null
-+++ b/vendor/github.com/lib/pq/.gitignore
-@@ -0,0 +1,4 @@
-+.db
-+*.test
-+*~
-+*.swp
-diff --git a/vendor/github.com/lib/pq/.travis.sh b/vendor/github.com/lib/pq/.travis.sh
-new file mode 100755
-index 00000000000..ebf447030be
---- /dev/null
-+++ b/vendor/github.com/lib/pq/.travis.sh
-@@ -0,0 +1,73 @@
-+#!/bin/bash
-+
-+set -eu
-+
-+client_configure() {
-+ sudo chmod 600 $PQSSLCERTTEST_PATH/postgresql.key
-+}
-+
-+pgdg_repository() {
-+ local sourcelist='sources.list.d/postgresql.list'
-+
-+ curl -sS 'https://www.postgresql.org/media/keys/ACCC4CF8.asc' | sudo apt-key add -
-+ echo deb http://apt.postgresql.org/pub/repos/apt/ $(lsb_release -cs)-pgdg main $PGVERSION | sudo tee "/etc/apt/$sourcelist"
-+ sudo apt-get -o Dir::Etc::sourcelist="$sourcelist" -o Dir::Etc::sourceparts='-' -o APT::Get::List-Cleanup='0' update
-+}
-+
-+postgresql_configure() {
-+ sudo tee /etc/postgresql/$PGVERSION/main/pg_hba.conf > /dev/null <<-config
-+ local all all trust
-+ hostnossl all pqgossltest 127.0.0.1/32 reject
-+ hostnossl all pqgosslcert 127.0.0.1/32 reject
-+ hostssl all pqgossltest 127.0.0.1/32 trust
-+ hostssl all pqgosslcert 127.0.0.1/32 cert
-+ host all all 127.0.0.1/32 trust
-+ hostnossl all pqgossltest ::1/128 reject
-+ hostnossl all pqgosslcert ::1/128 reject
-+ hostssl all pqgossltest ::1/128 trust
-+ hostssl all pqgosslcert ::1/128 cert
-+ host all all ::1/128 trust
-+ config
-+
-+ xargs sudo install -o postgres -g postgres -m 600 -t /var/lib/postgresql/$PGVERSION/main/ <<-certificates
-+ certs/root.crt
-+ certs/server.crt
-+ certs/server.key
-+ certificates
-+
-+ sort -VCu <<-versions ||
-+ $PGVERSION
-+ 9.2
-+ versions
-+ sudo tee -a /etc/postgresql/$PGVERSION/main/postgresql.conf > /dev/null <<-config
-+ ssl_ca_file = 'root.crt'
-+ ssl_cert_file = 'server.crt'
-+ ssl_key_file = 'server.key'
-+ config
-+
-+ echo 127.0.0.1 postgres | sudo tee -a /etc/hosts > /dev/null
-+
-+ sudo service postgresql restart
-+}
-+
-+postgresql_install() {
-+ xargs sudo apt-get -y -o Dpkg::Options::='--force-confdef' -o Dpkg::Options::='--force-confnew' install <<-packages
-+ postgresql-$PGVERSION
-+ postgresql-server-dev-$PGVERSION
-+ postgresql-contrib-$PGVERSION
-+ packages
-+}
-+
-+postgresql_uninstall() {
-+ sudo service postgresql stop
-+ xargs sudo apt-get -y --purge remove <<-packages
-+ libpq-dev
-+ libpq5
-+ postgresql
-+ postgresql-client-common
-+ postgresql-common
-+ packages
-+ sudo rm -rf /var/lib/postgresql
-+}
-+
-+$1
-diff --git a/vendor/github.com/lib/pq/.travis.yml b/vendor/github.com/lib/pq/.travis.yml
-new file mode 100644
-index 00000000000..8396f5d9d47
---- /dev/null
-+++ b/vendor/github.com/lib/pq/.travis.yml
-@@ -0,0 +1,44 @@
-+language: go
-+
-+go:
-+ - 1.11.x
-+ - 1.12.x
-+ - master
-+
-+sudo: true
-+
-+env:
-+ global:
-+ - PGUSER=postgres
-+ - PQGOSSLTESTS=1
-+ - PQSSLCERTTEST_PATH=$PWD/certs
-+ - PGHOST=127.0.0.1
-+ matrix:
-+ - PGVERSION=10
-+ - PGVERSION=9.6
-+ - PGVERSION=9.5
-+ - PGVERSION=9.4
-+
-+before_install:
-+ - ./.travis.sh postgresql_uninstall
-+ - ./.travis.sh pgdg_repository
-+ - ./.travis.sh postgresql_install
-+ - ./.travis.sh postgresql_configure
-+ - ./.travis.sh client_configure
-+ - go get golang.org/x/tools/cmd/goimports
-+ - go get golang.org/x/lint/golint
-+ - GO111MODULE=on go get honnef.co/go/tools/cmd/staticcheck@2019.2.1
-+
-+before_script:
-+ - createdb pqgotest
-+ - createuser -DRS pqgossltest
-+ - createuser -DRS pqgosslcert
-+
-+script:
-+ - >
-+ goimports -d -e $(find -name '*.go') | awk '{ print } END { exit NR == 0 ? 0 : 1 }'
-+ - go vet ./...
-+ - staticcheck -go 1.11 ./...
-+ - golint ./...
-+ - PQTEST_BINARY_PARAMETERS=no go test -race -v ./...
-+ - PQTEST_BINARY_PARAMETERS=yes go test -race -v ./...
-diff --git a/vendor/github.com/lib/pq/CONTRIBUTING.md b/vendor/github.com/lib/pq/CONTRIBUTING.md
-new file mode 100644
-index 00000000000..84c937f1561
---- /dev/null
-+++ b/vendor/github.com/lib/pq/CONTRIBUTING.md
-@@ -0,0 +1,29 @@
-+## Contributing to pq
-+
-+`pq` has a backlog of pull requests, but contributions are still very
-+much welcome. You can help with patch review, submitting bug reports,
-+or adding new functionality. There is no formal style guide, but
-+please conform to the style of existing code and general Go formatting
-+conventions when submitting patches.
-+
-+### Patch review
-+
-+Help review existing open pull requests by commenting on the code or
-+proposed functionality.
-+
-+### Bug reports
-+
-+We appreciate any bug reports, but especially ones with self-contained
-+(doesn't depend on code outside of pq), minimal (can't be simplified
-+further) test cases. It's especially helpful if you can submit a pull
-+request with just the failing test case (you'll probably want to
-+pattern it after the tests in
-+[conn_test.go](https://github.com/lib/pq/blob/master/conn_test.go).
-+
-+### New functionality
-+
-+There are a number of pending patches for new functionality, so
-+additional feature patches will take a while to merge. Still, patches
-+are generally reviewed based on usefulness and complexity in addition
-+to time-in-queue, so if you have a knockout idea, take a shot. Feel
-+free to open an issue discussion your proposed patch beforehand.
-diff --git a/vendor/github.com/lib/pq/LICENSE.md b/vendor/github.com/lib/pq/LICENSE.md
-new file mode 100644
-index 00000000000..5773904a30e
---- /dev/null
-+++ b/vendor/github.com/lib/pq/LICENSE.md
-@@ -0,0 +1,8 @@
-+Copyright (c) 2011-2013, 'pq' Contributors
-+Portions Copyright (C) 2011 Blake Mizerany
-+
-+Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
-+
-+The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
-+
-+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-diff --git a/vendor/github.com/lib/pq/README.md b/vendor/github.com/lib/pq/README.md
-new file mode 100644
-index 00000000000..385fe73508e
---- /dev/null
-+++ b/vendor/github.com/lib/pq/README.md
-@@ -0,0 +1,95 @@
-+# pq - A pure Go postgres driver for Go's database/sql package
-+
-+[![GoDoc](https://godoc.org/github.com/lib/pq?status.svg)](https://godoc.org/github.com/lib/pq)
-+[![Build Status](https://travis-ci.org/lib/pq.svg?branch=master)](https://travis-ci.org/lib/pq)
-+
-+## Install
-+
-+ go get github.com/lib/pq
-+
-+## Docs
-+
-+For detailed documentation and basic usage examples, please see the package
-+documentation at .
-+
-+## Tests
-+
-+`go test` is used for testing. See [TESTS.md](TESTS.md) for more details.
-+
-+## Features
-+
-+* SSL
-+* Handles bad connections for `database/sql`
-+* Scan `time.Time` correctly (i.e. `timestamp[tz]`, `time[tz]`, `date`)
-+* Scan binary blobs correctly (i.e. `bytea`)
-+* Package for `hstore` support
-+* COPY FROM support
-+* pq.ParseURL for converting urls to connection strings for sql.Open.
-+* Many libpq compatible environment variables
-+* Unix socket support
-+* Notifications: `LISTEN`/`NOTIFY`
-+* pgpass support
-+
-+## Future / Things you can help with
-+
-+* Better COPY FROM / COPY TO (see discussion in #181)
-+
-+## Thank you (alphabetical)
-+
-+Some of these contributors are from the original library `bmizerany/pq.go` whose
-+code still exists in here.
-+
-+* Andy Balholm (andybalholm)
-+* Ben Berkert (benburkert)
-+* Benjamin Heatwole (bheatwole)
-+* Bill Mill (llimllib)
-+* Bjørn Madsen (aeons)
-+* Blake Gentry (bgentry)
-+* Brad Fitzpatrick (bradfitz)
-+* Charlie Melbye (cmelbye)
-+* Chris Bandy (cbandy)
-+* Chris Gilling (cgilling)
-+* Chris Walsh (cwds)
-+* Dan Sosedoff (sosedoff)
-+* Daniel Farina (fdr)
-+* Eric Chlebek (echlebek)
-+* Eric Garrido (minusnine)
-+* Eric Urban (hydrogen18)
-+* Everyone at The Go Team
-+* Evan Shaw (edsrzf)
-+* Ewan Chou (coocood)
-+* Fazal Majid (fazalmajid)
-+* Federico Romero (federomero)
-+* Fumin (fumin)
-+* Gary Burd (garyburd)
-+* Heroku (heroku)
-+* James Pozdena (jpoz)
-+* Jason McVetta (jmcvetta)
-+* Jeremy Jay (pbnjay)
-+* Joakim Sernbrant (serbaut)
-+* John Gallagher (jgallagher)
-+* Jonathan Rudenberg (titanous)
-+* Joël Stemmer (jstemmer)
-+* Kamil Kisiel (kisielk)
-+* Kelly Dunn (kellydunn)
-+* Keith Rarick (kr)
-+* Kir Shatrov (kirs)
-+* Lann Martin (lann)
-+* Maciek Sakrejda (uhoh-itsmaciek)
-+* Marc Brinkmann (mbr)
-+* Marko Tiikkaja (johto)
-+* Matt Newberry (MattNewberry)
-+* Matt Robenolt (mattrobenolt)
-+* Martin Olsen (martinolsen)
-+* Mike Lewis (mikelikespie)
-+* Nicolas Patry (Narsil)
-+* Oliver Tonnhofer (olt)
-+* Patrick Hayes (phayes)
-+* Paul Hammond (paulhammond)
-+* Ryan Smith (ryandotsmith)
-+* Samuel Stauffer (samuel)
-+* Timothée Peignier (cyberdelia)
-+* Travis Cline (tmc)
-+* TruongSinh Tran-Nguyen (truongsinh)
-+* Yaismel Miranda (ympons)
-+* notedit (notedit)
-diff --git a/vendor/github.com/lib/pq/TESTS.md b/vendor/github.com/lib/pq/TESTS.md
-new file mode 100644
-index 00000000000..f05021115be
---- /dev/null
-+++ b/vendor/github.com/lib/pq/TESTS.md
-@@ -0,0 +1,33 @@
-+# Tests
-+
-+## Running Tests
-+
-+`go test` is used for testing. A running PostgreSQL
-+server is required, with the ability to log in. The
-+database to connect to test with is "pqgotest," on
-+"localhost" but these can be overridden using [environment
-+variables](https://www.postgresql.org/docs/9.3/static/libpq-envars.html).
-+
-+Example:
-+
-+ PGHOST=/run/postgresql go test
-+
-+## Benchmarks
-+
-+A benchmark suite can be run as part of the tests:
-+
-+ go test -bench .
-+
-+## Example setup (Docker)
-+
-+Run a postgres container:
-+
-+```
-+docker run --expose 5432:5432 postgres
-+```
-+
-+Run tests:
-+
-+```
-+PGHOST=localhost PGPORT=5432 PGUSER=postgres PGSSLMODE=disable PGDATABASE=postgres go test
-+```
-diff --git a/vendor/github.com/lib/pq/array.go b/vendor/github.com/lib/pq/array.go
-new file mode 100644
-index 00000000000..e4933e22764
---- /dev/null
-+++ b/vendor/github.com/lib/pq/array.go
-@@ -0,0 +1,756 @@
-+package pq
-+
-+import (
-+ "bytes"
-+ "database/sql"
-+ "database/sql/driver"
-+ "encoding/hex"
-+ "fmt"
-+ "reflect"
-+ "strconv"
-+ "strings"
-+)
-+
-+var typeByteSlice = reflect.TypeOf([]byte{})
-+var typeDriverValuer = reflect.TypeOf((*driver.Valuer)(nil)).Elem()
-+var typeSQLScanner = reflect.TypeOf((*sql.Scanner)(nil)).Elem()
-+
-+// Array returns the optimal driver.Valuer and sql.Scanner for an array or
-+// slice of any dimension.
-+//
-+// For example:
-+// db.Query(`SELECT * FROM t WHERE id = ANY($1)`, pq.Array([]int{235, 401}))
-+//
-+// var x []sql.NullInt64
-+// db.QueryRow('SELECT ARRAY[235, 401]').Scan(pq.Array(&x))
-+//
-+// Scanning multi-dimensional arrays is not supported. Arrays where the lower
-+// bound is not one (such as `[0:0]={1}') are not supported.
-+func Array(a interface{}) interface {
-+ driver.Valuer
-+ sql.Scanner
-+} {
-+ switch a := a.(type) {
-+ case []bool:
-+ return (*BoolArray)(&a)
-+ case []float64:
-+ return (*Float64Array)(&a)
-+ case []int64:
-+ return (*Int64Array)(&a)
-+ case []string:
-+ return (*StringArray)(&a)
-+
-+ case *[]bool:
-+ return (*BoolArray)(a)
-+ case *[]float64:
-+ return (*Float64Array)(a)
-+ case *[]int64:
-+ return (*Int64Array)(a)
-+ case *[]string:
-+ return (*StringArray)(a)
-+ }
-+
-+ return GenericArray{a}
-+}
-+
-+// ArrayDelimiter may be optionally implemented by driver.Valuer or sql.Scanner
-+// to override the array delimiter used by GenericArray.
-+type ArrayDelimiter interface {
-+ // ArrayDelimiter returns the delimiter character(s) for this element's type.
-+ ArrayDelimiter() string
-+}
-+
-+// BoolArray represents a one-dimensional array of the PostgreSQL boolean type.
-+type BoolArray []bool
-+
-+// Scan implements the sql.Scanner interface.
-+func (a *BoolArray) Scan(src interface{}) error {
-+ switch src := src.(type) {
-+ case []byte:
-+ return a.scanBytes(src)
-+ case string:
-+ return a.scanBytes([]byte(src))
-+ case nil:
-+ *a = nil
-+ return nil
-+ }
-+
-+ return fmt.Errorf("pq: cannot convert %T to BoolArray", src)
-+}
-+
-+func (a *BoolArray) scanBytes(src []byte) error {
-+ elems, err := scanLinearArray(src, []byte{','}, "BoolArray")
-+ if err != nil {
-+ return err
-+ }
-+ if *a != nil && len(elems) == 0 {
-+ *a = (*a)[:0]
-+ } else {
-+ b := make(BoolArray, len(elems))
-+ for i, v := range elems {
-+ if len(v) != 1 {
-+ return fmt.Errorf("pq: could not parse boolean array index %d: invalid boolean %q", i, v)
-+ }
-+ switch v[0] {
-+ case 't':
-+ b[i] = true
-+ case 'f':
-+ b[i] = false
-+ default:
-+ return fmt.Errorf("pq: could not parse boolean array index %d: invalid boolean %q", i, v)
-+ }
-+ }
-+ *a = b
-+ }
-+ return nil
-+}
-+
-+// Value implements the driver.Valuer interface.
-+func (a BoolArray) Value() (driver.Value, error) {
-+ if a == nil {
-+ return nil, nil
-+ }
-+
-+ if n := len(a); n > 0 {
-+ // There will be exactly two curly brackets, N bytes of values,
-+ // and N-1 bytes of delimiters.
-+ b := make([]byte, 1+2*n)
-+
-+ for i := 0; i < n; i++ {
-+ b[2*i] = ','
-+ if a[i] {
-+ b[1+2*i] = 't'
-+ } else {
-+ b[1+2*i] = 'f'
-+ }
-+ }
-+
-+ b[0] = '{'
-+ b[2*n] = '}'
-+
-+ return string(b), nil
-+ }
-+
-+ return "{}", nil
-+}
-+
-+// ByteaArray represents a one-dimensional array of the PostgreSQL bytea type.
-+type ByteaArray [][]byte
-+
-+// Scan implements the sql.Scanner interface.
-+func (a *ByteaArray) Scan(src interface{}) error {
-+ switch src := src.(type) {
-+ case []byte:
-+ return a.scanBytes(src)
-+ case string:
-+ return a.scanBytes([]byte(src))
-+ case nil:
-+ *a = nil
-+ return nil
-+ }
-+
-+ return fmt.Errorf("pq: cannot convert %T to ByteaArray", src)
-+}
-+
-+func (a *ByteaArray) scanBytes(src []byte) error {
-+ elems, err := scanLinearArray(src, []byte{','}, "ByteaArray")
-+ if err != nil {
-+ return err
-+ }
-+ if *a != nil && len(elems) == 0 {
-+ *a = (*a)[:0]
-+ } else {
-+ b := make(ByteaArray, len(elems))
-+ for i, v := range elems {
-+ b[i], err = parseBytea(v)
-+ if err != nil {
-+ return fmt.Errorf("could not parse bytea array index %d: %s", i, err.Error())
-+ }
-+ }
-+ *a = b
-+ }
-+ return nil
-+}
-+
-+// Value implements the driver.Valuer interface. It uses the "hex" format which
-+// is only supported on PostgreSQL 9.0 or newer.
-+func (a ByteaArray) Value() (driver.Value, error) {
-+ if a == nil {
-+ return nil, nil
-+ }
-+
-+ if n := len(a); n > 0 {
-+ // There will be at least two curly brackets, 2*N bytes of quotes,
-+ // 3*N bytes of hex formatting, and N-1 bytes of delimiters.
-+ size := 1 + 6*n
-+ for _, x := range a {
-+ size += hex.EncodedLen(len(x))
-+ }
-+
-+ b := make([]byte, size)
-+
-+ for i, s := 0, b; i < n; i++ {
-+ o := copy(s, `,"\\x`)
-+ o += hex.Encode(s[o:], a[i])
-+ s[o] = '"'
-+ s = s[o+1:]
-+ }
-+
-+ b[0] = '{'
-+ b[size-1] = '}'
-+
-+ return string(b), nil
-+ }
-+
-+ return "{}", nil
-+}
-+
-+// Float64Array represents a one-dimensional array of the PostgreSQL double
-+// precision type.
-+type Float64Array []float64
-+
-+// Scan implements the sql.Scanner interface.
-+func (a *Float64Array) Scan(src interface{}) error {
-+ switch src := src.(type) {
-+ case []byte:
-+ return a.scanBytes(src)
-+ case string:
-+ return a.scanBytes([]byte(src))
-+ case nil:
-+ *a = nil
-+ return nil
-+ }
-+
-+ return fmt.Errorf("pq: cannot convert %T to Float64Array", src)
-+}
-+
-+func (a *Float64Array) scanBytes(src []byte) error {
-+ elems, err := scanLinearArray(src, []byte{','}, "Float64Array")
-+ if err != nil {
-+ return err
-+ }
-+ if *a != nil && len(elems) == 0 {
-+ *a = (*a)[:0]
-+ } else {
-+ b := make(Float64Array, len(elems))
-+ for i, v := range elems {
-+ if b[i], err = strconv.ParseFloat(string(v), 64); err != nil {
-+ return fmt.Errorf("pq: parsing array element index %d: %v", i, err)
-+ }
-+ }
-+ *a = b
-+ }
-+ return nil
-+}
-+
-+// Value implements the driver.Valuer interface.
-+func (a Float64Array) Value() (driver.Value, error) {
-+ if a == nil {
-+ return nil, nil
-+ }
-+
-+ if n := len(a); n > 0 {
-+ // There will be at least two curly brackets, N bytes of values,
-+ // and N-1 bytes of delimiters.
-+ b := make([]byte, 1, 1+2*n)
-+ b[0] = '{'
-+
-+ b = strconv.AppendFloat(b, a[0], 'f', -1, 64)
-+ for i := 1; i < n; i++ {
-+ b = append(b, ',')
-+ b = strconv.AppendFloat(b, a[i], 'f', -1, 64)
-+ }
-+
-+ return string(append(b, '}')), nil
-+ }
-+
-+ return "{}", nil
-+}
-+
-+// GenericArray implements the driver.Valuer and sql.Scanner interfaces for
-+// an array or slice of any dimension.
-+type GenericArray struct{ A interface{} }
-+
-+func (GenericArray) evaluateDestination(rt reflect.Type) (reflect.Type, func([]byte, reflect.Value) error, string) {
-+ var assign func([]byte, reflect.Value) error
-+ var del = ","
-+
-+ // TODO calculate the assign function for other types
-+ // TODO repeat this section on the element type of arrays or slices (multidimensional)
-+ {
-+ if reflect.PtrTo(rt).Implements(typeSQLScanner) {
-+ // dest is always addressable because it is an element of a slice.
-+ assign = func(src []byte, dest reflect.Value) (err error) {
-+ ss := dest.Addr().Interface().(sql.Scanner)
-+ if src == nil {
-+ err = ss.Scan(nil)
-+ } else {
-+ err = ss.Scan(src)
-+ }
-+ return
-+ }
-+ goto FoundType
-+ }
-+
-+ assign = func([]byte, reflect.Value) error {
-+ return fmt.Errorf("pq: scanning to %s is not implemented; only sql.Scanner", rt)
-+ }
-+ }
-+
-+FoundType:
-+
-+ if ad, ok := reflect.Zero(rt).Interface().(ArrayDelimiter); ok {
-+ del = ad.ArrayDelimiter()
-+ }
-+
-+ return rt, assign, del
-+}
-+
-+// Scan implements the sql.Scanner interface.
-+func (a GenericArray) Scan(src interface{}) error {
-+ dpv := reflect.ValueOf(a.A)
-+ switch {
-+ case dpv.Kind() != reflect.Ptr:
-+ return fmt.Errorf("pq: destination %T is not a pointer to array or slice", a.A)
-+ case dpv.IsNil():
-+ return fmt.Errorf("pq: destination %T is nil", a.A)
-+ }
-+
-+ dv := dpv.Elem()
-+ switch dv.Kind() {
-+ case reflect.Slice:
-+ case reflect.Array:
-+ default:
-+ return fmt.Errorf("pq: destination %T is not a pointer to array or slice", a.A)
-+ }
-+
-+ switch src := src.(type) {
-+ case []byte:
-+ return a.scanBytes(src, dv)
-+ case string:
-+ return a.scanBytes([]byte(src), dv)
-+ case nil:
-+ if dv.Kind() == reflect.Slice {
-+ dv.Set(reflect.Zero(dv.Type()))
-+ return nil
-+ }
-+ }
-+
-+ return fmt.Errorf("pq: cannot convert %T to %s", src, dv.Type())
-+}
-+
-+func (a GenericArray) scanBytes(src []byte, dv reflect.Value) error {
-+ dtype, assign, del := a.evaluateDestination(dv.Type().Elem())
-+ dims, elems, err := parseArray(src, []byte(del))
-+ if err != nil {
-+ return err
-+ }
-+
-+ // TODO allow multidimensional
-+
-+ if len(dims) > 1 {
-+ return fmt.Errorf("pq: scanning from multidimensional ARRAY%s is not implemented",
-+ strings.Replace(fmt.Sprint(dims), " ", "][", -1))
-+ }
-+
-+ // Treat a zero-dimensional array like an array with a single dimension of zero.
-+ if len(dims) == 0 {
-+ dims = append(dims, 0)
-+ }
-+
-+ for i, rt := 0, dv.Type(); i < len(dims); i, rt = i+1, rt.Elem() {
-+ switch rt.Kind() {
-+ case reflect.Slice:
-+ case reflect.Array:
-+ if rt.Len() != dims[i] {
-+ return fmt.Errorf("pq: cannot convert ARRAY%s to %s",
-+ strings.Replace(fmt.Sprint(dims), " ", "][", -1), dv.Type())
-+ }
-+ default:
-+ // TODO handle multidimensional
-+ }
-+ }
-+
-+ values := reflect.MakeSlice(reflect.SliceOf(dtype), len(elems), len(elems))
-+ for i, e := range elems {
-+ if err := assign(e, values.Index(i)); err != nil {
-+ return fmt.Errorf("pq: parsing array element index %d: %v", i, err)
-+ }
-+ }
-+
-+ // TODO handle multidimensional
-+
-+ switch dv.Kind() {
-+ case reflect.Slice:
-+ dv.Set(values.Slice(0, dims[0]))
-+ case reflect.Array:
-+ for i := 0; i < dims[0]; i++ {
-+ dv.Index(i).Set(values.Index(i))
-+ }
-+ }
-+
-+ return nil
-+}
-+
-+// Value implements the driver.Valuer interface.
-+func (a GenericArray) Value() (driver.Value, error) {
-+ if a.A == nil {
-+ return nil, nil
-+ }
-+
-+ rv := reflect.ValueOf(a.A)
-+
-+ switch rv.Kind() {
-+ case reflect.Slice:
-+ if rv.IsNil() {
-+ return nil, nil
-+ }
-+ case reflect.Array:
-+ default:
-+ return nil, fmt.Errorf("pq: Unable to convert %T to array", a.A)
-+ }
-+
-+ if n := rv.Len(); n > 0 {
-+ // There will be at least two curly brackets, N bytes of values,
-+ // and N-1 bytes of delimiters.
-+ b := make([]byte, 0, 1+2*n)
-+
-+ b, _, err := appendArray(b, rv, n)
-+ return string(b), err
-+ }
-+
-+ return "{}", nil
-+}
-+
-+// Int64Array represents a one-dimensional array of the PostgreSQL integer types.
-+type Int64Array []int64
-+
-+// Scan implements the sql.Scanner interface.
-+func (a *Int64Array) Scan(src interface{}) error {
-+ switch src := src.(type) {
-+ case []byte:
-+ return a.scanBytes(src)
-+ case string:
-+ return a.scanBytes([]byte(src))
-+ case nil:
-+ *a = nil
-+ return nil
-+ }
-+
-+ return fmt.Errorf("pq: cannot convert %T to Int64Array", src)
-+}
-+
-+func (a *Int64Array) scanBytes(src []byte) error {
-+ elems, err := scanLinearArray(src, []byte{','}, "Int64Array")
-+ if err != nil {
-+ return err
-+ }
-+ if *a != nil && len(elems) == 0 {
-+ *a = (*a)[:0]
-+ } else {
-+ b := make(Int64Array, len(elems))
-+ for i, v := range elems {
-+ if b[i], err = strconv.ParseInt(string(v), 10, 64); err != nil {
-+ return fmt.Errorf("pq: parsing array element index %d: %v", i, err)
-+ }
-+ }
-+ *a = b
-+ }
-+ return nil
-+}
-+
-+// Value implements the driver.Valuer interface.
-+func (a Int64Array) Value() (driver.Value, error) {
-+ if a == nil {
-+ return nil, nil
-+ }
-+
-+ if n := len(a); n > 0 {
-+ // There will be at least two curly brackets, N bytes of values,
-+ // and N-1 bytes of delimiters.
-+ b := make([]byte, 1, 1+2*n)
-+ b[0] = '{'
-+
-+ b = strconv.AppendInt(b, a[0], 10)
-+ for i := 1; i < n; i++ {
-+ b = append(b, ',')
-+ b = strconv.AppendInt(b, a[i], 10)
-+ }
-+
-+ return string(append(b, '}')), nil
-+ }
-+
-+ return "{}", nil
-+}
-+
-+// StringArray represents a one-dimensional array of the PostgreSQL character types.
-+type StringArray []string
-+
-+// Scan implements the sql.Scanner interface.
-+func (a *StringArray) Scan(src interface{}) error {
-+ switch src := src.(type) {
-+ case []byte:
-+ return a.scanBytes(src)
-+ case string:
-+ return a.scanBytes([]byte(src))
-+ case nil:
-+ *a = nil
-+ return nil
-+ }
-+
-+ return fmt.Errorf("pq: cannot convert %T to StringArray", src)
-+}
-+
-+func (a *StringArray) scanBytes(src []byte) error {
-+ elems, err := scanLinearArray(src, []byte{','}, "StringArray")
-+ if err != nil {
-+ return err
-+ }
-+ if *a != nil && len(elems) == 0 {
-+ *a = (*a)[:0]
-+ } else {
-+ b := make(StringArray, len(elems))
-+ for i, v := range elems {
-+ if b[i] = string(v); v == nil {
-+ return fmt.Errorf("pq: parsing array element index %d: cannot convert nil to string", i)
-+ }
-+ }
-+ *a = b
-+ }
-+ return nil
-+}
-+
-+// Value implements the driver.Valuer interface.
-+func (a StringArray) Value() (driver.Value, error) {
-+ if a == nil {
-+ return nil, nil
-+ }
-+
-+ if n := len(a); n > 0 {
-+ // There will be at least two curly brackets, 2*N bytes of quotes,
-+ // and N-1 bytes of delimiters.
-+ b := make([]byte, 1, 1+3*n)
-+ b[0] = '{'
-+
-+ b = appendArrayQuotedBytes(b, []byte(a[0]))
-+ for i := 1; i < n; i++ {
-+ b = append(b, ',')
-+ b = appendArrayQuotedBytes(b, []byte(a[i]))
-+ }
-+
-+ return string(append(b, '}')), nil
-+ }
-+
-+ return "{}", nil
-+}
-+
-+// appendArray appends rv to the buffer, returning the extended buffer and
-+// the delimiter used between elements.
-+//
-+// It panics when n <= 0 or rv's Kind is not reflect.Array nor reflect.Slice.
-+func appendArray(b []byte, rv reflect.Value, n int) ([]byte, string, error) {
-+ var del string
-+ var err error
-+
-+ b = append(b, '{')
-+
-+ if b, del, err = appendArrayElement(b, rv.Index(0)); err != nil {
-+ return b, del, err
-+ }
-+
-+ for i := 1; i < n; i++ {
-+ b = append(b, del...)
-+ if b, del, err = appendArrayElement(b, rv.Index(i)); err != nil {
-+ return b, del, err
-+ }
-+ }
-+
-+ return append(b, '}'), del, nil
-+}
-+
-+// appendArrayElement appends rv to the buffer, returning the extended buffer
-+// and the delimiter to use before the next element.
-+//
-+// When rv's Kind is neither reflect.Array nor reflect.Slice, it is converted
-+// using driver.DefaultParameterConverter and the resulting []byte or string
-+// is double-quoted.
-+//
-+// See http://www.postgresql.org/docs/current/static/arrays.html#ARRAYS-IO
-+func appendArrayElement(b []byte, rv reflect.Value) ([]byte, string, error) {
-+ if k := rv.Kind(); k == reflect.Array || k == reflect.Slice {
-+ if t := rv.Type(); t != typeByteSlice && !t.Implements(typeDriverValuer) {
-+ if n := rv.Len(); n > 0 {
-+ return appendArray(b, rv, n)
-+ }
-+
-+ return b, "", nil
-+ }
-+ }
-+
-+ var del = ","
-+ var err error
-+ var iv interface{} = rv.Interface()
-+
-+ if ad, ok := iv.(ArrayDelimiter); ok {
-+ del = ad.ArrayDelimiter()
-+ }
-+
-+ if iv, err = driver.DefaultParameterConverter.ConvertValue(iv); err != nil {
-+ return b, del, err
-+ }
-+
-+ switch v := iv.(type) {
-+ case nil:
-+ return append(b, "NULL"...), del, nil
-+ case []byte:
-+ return appendArrayQuotedBytes(b, v), del, nil
-+ case string:
-+ return appendArrayQuotedBytes(b, []byte(v)), del, nil
-+ }
-+
-+ b, err = appendValue(b, iv)
-+ return b, del, err
-+}
-+
-+func appendArrayQuotedBytes(b, v []byte) []byte {
-+ b = append(b, '"')
-+ for {
-+ i := bytes.IndexAny(v, `"\`)
-+ if i < 0 {
-+ b = append(b, v...)
-+ break
-+ }
-+ if i > 0 {
-+ b = append(b, v[:i]...)
-+ }
-+ b = append(b, '\\', v[i])
-+ v = v[i+1:]
-+ }
-+ return append(b, '"')
-+}
-+
-+func appendValue(b []byte, v driver.Value) ([]byte, error) {
-+ return append(b, encode(nil, v, 0)...), nil
-+}
-+
-+// parseArray extracts the dimensions and elements of an array represented in
-+// text format. Only representations emitted by the backend are supported.
-+// Notably, whitespace around brackets and delimiters is significant, and NULL
-+// is case-sensitive.
-+//
-+// See http://www.postgresql.org/docs/current/static/arrays.html#ARRAYS-IO
-+func parseArray(src, del []byte) (dims []int, elems [][]byte, err error) {
-+ var depth, i int
-+
-+ if len(src) < 1 || src[0] != '{' {
-+ return nil, nil, fmt.Errorf("pq: unable to parse array; expected %q at offset %d", '{', 0)
-+ }
-+
-+Open:
-+ for i < len(src) {
-+ switch src[i] {
-+ case '{':
-+ depth++
-+ i++
-+ case '}':
-+ elems = make([][]byte, 0)
-+ goto Close
-+ default:
-+ break Open
-+ }
-+ }
-+ dims = make([]int, i)
-+
-+Element:
-+ for i < len(src) {
-+ switch src[i] {
-+ case '{':
-+ if depth == len(dims) {
-+ break Element
-+ }
-+ depth++
-+ dims[depth-1] = 0
-+ i++
-+ case '"':
-+ var elem = []byte{}
-+ var escape bool
-+ for i++; i < len(src); i++ {
-+ if escape {
-+ elem = append(elem, src[i])
-+ escape = false
-+ } else {
-+ switch src[i] {
-+ default:
-+ elem = append(elem, src[i])
-+ case '\\':
-+ escape = true
-+ case '"':
-+ elems = append(elems, elem)
-+ i++
-+ break Element
-+ }
-+ }
-+ }
-+ default:
-+ for start := i; i < len(src); i++ {
-+ if bytes.HasPrefix(src[i:], del) || src[i] == '}' {
-+ elem := src[start:i]
-+ if len(elem) == 0 {
-+ return nil, nil, fmt.Errorf("pq: unable to parse array; unexpected %q at offset %d", src[i], i)
-+ }
-+ if bytes.Equal(elem, []byte("NULL")) {
-+ elem = nil
-+ }
-+ elems = append(elems, elem)
-+ break Element
-+ }
-+ }
-+ }
-+ }
-+
-+ for i < len(src) {
-+ if bytes.HasPrefix(src[i:], del) && depth > 0 {
-+ dims[depth-1]++
-+ i += len(del)
-+ goto Element
-+ } else if src[i] == '}' && depth > 0 {
-+ dims[depth-1]++
-+ depth--
-+ i++
-+ } else {
-+ return nil, nil, fmt.Errorf("pq: unable to parse array; unexpected %q at offset %d", src[i], i)
-+ }
-+ }
-+
-+Close:
-+ for i < len(src) {
-+ if src[i] == '}' && depth > 0 {
-+ depth--
-+ i++
-+ } else {
-+ return nil, nil, fmt.Errorf("pq: unable to parse array; unexpected %q at offset %d", src[i], i)
-+ }
-+ }
-+ if depth > 0 {
-+ err = fmt.Errorf("pq: unable to parse array; expected %q at offset %d", '}', i)
-+ }
-+ if err == nil {
-+ for _, d := range dims {
-+ if (len(elems) % d) != 0 {
-+ err = fmt.Errorf("pq: multidimensional arrays must have elements with matching dimensions")
-+ }
-+ }
-+ }
-+ return
-+}
-+
-+func scanLinearArray(src, del []byte, typ string) (elems [][]byte, err error) {
-+ dims, elems, err := parseArray(src, del)
-+ if err != nil {
-+ return nil, err
-+ }
-+ if len(dims) > 1 {
-+ return nil, fmt.Errorf("pq: cannot convert ARRAY%s to %s", strings.Replace(fmt.Sprint(dims), " ", "][", -1), typ)
-+ }
-+ return elems, err
-+}
-diff --git a/vendor/github.com/lib/pq/array_test.go b/vendor/github.com/lib/pq/array_test.go
-new file mode 100644
-index 00000000000..f724bcd882f
---- /dev/null
-+++ b/vendor/github.com/lib/pq/array_test.go
-@@ -0,0 +1,1311 @@
-+package pq
-+
-+import (
-+ "bytes"
-+ "database/sql"
-+ "database/sql/driver"
-+ "math/rand"
-+ "reflect"
-+ "strings"
-+ "testing"
-+)
-+
-+func TestParseArray(t *testing.T) {
-+ for _, tt := range []struct {
-+ input string
-+ delim string
-+ dims []int
-+ elems [][]byte
-+ }{
-+ {`{}`, `,`, nil, [][]byte{}},
-+ {`{NULL}`, `,`, []int{1}, [][]byte{nil}},
-+ {`{a}`, `,`, []int{1}, [][]byte{{'a'}}},
-+ {`{a,b}`, `,`, []int{2}, [][]byte{{'a'}, {'b'}}},
-+ {`{{a,b}}`, `,`, []int{1, 2}, [][]byte{{'a'}, {'b'}}},
-+ {`{{a},{b}}`, `,`, []int{2, 1}, [][]byte{{'a'}, {'b'}}},
-+ {`{{{a,b},{c,d},{e,f}}}`, `,`, []int{1, 3, 2}, [][]byte{
-+ {'a'}, {'b'}, {'c'}, {'d'}, {'e'}, {'f'},
-+ }},
-+ {`{""}`, `,`, []int{1}, [][]byte{{}}},
-+ {`{","}`, `,`, []int{1}, [][]byte{{','}}},
-+ {`{",",","}`, `,`, []int{2}, [][]byte{{','}, {','}}},
-+ {`{{",",","}}`, `,`, []int{1, 2}, [][]byte{{','}, {','}}},
-+ {`{{","},{","}}`, `,`, []int{2, 1}, [][]byte{{','}, {','}}},
-+ {`{{{",",","},{",",","},{",",","}}}`, `,`, []int{1, 3, 2}, [][]byte{
-+ {','}, {','}, {','}, {','}, {','}, {','},
-+ }},
-+ {`{"\"}"}`, `,`, []int{1}, [][]byte{{'"', '}'}}},
-+ {`{"\"","\""}`, `,`, []int{2}, [][]byte{{'"'}, {'"'}}},
-+ {`{{"\"","\""}}`, `,`, []int{1, 2}, [][]byte{{'"'}, {'"'}}},
-+ {`{{"\""},{"\""}}`, `,`, []int{2, 1}, [][]byte{{'"'}, {'"'}}},
-+ {`{{{"\"","\""},{"\"","\""},{"\"","\""}}}`, `,`, []int{1, 3, 2}, [][]byte{
-+ {'"'}, {'"'}, {'"'}, {'"'}, {'"'}, {'"'},
-+ }},
-+ {`{axyzb}`, `xyz`, []int{2}, [][]byte{{'a'}, {'b'}}},
-+ } {
-+ dims, elems, err := parseArray([]byte(tt.input), []byte(tt.delim))
-+
-+ if err != nil {
-+ t.Fatalf("Expected no error for %q, got %q", tt.input, err)
-+ }
-+ if !reflect.DeepEqual(dims, tt.dims) {
-+ t.Errorf("Expected %v dimensions for %q, got %v", tt.dims, tt.input, dims)
-+ }
-+ if !reflect.DeepEqual(elems, tt.elems) {
-+ t.Errorf("Expected %v elements for %q, got %v", tt.elems, tt.input, elems)
-+ }
-+ }
-+}
-+
-+func TestParseArrayError(t *testing.T) {
-+ for _, tt := range []struct {
-+ input, err string
-+ }{
-+ {``, "expected '{' at offset 0"},
-+ {`x`, "expected '{' at offset 0"},
-+ {`}`, "expected '{' at offset 0"},
-+ {`{`, "expected '}' at offset 1"},
-+ {`{{}`, "expected '}' at offset 3"},
-+ {`{}}`, "unexpected '}' at offset 2"},
-+ {`{,}`, "unexpected ',' at offset 1"},
-+ {`{,x}`, "unexpected ',' at offset 1"},
-+ {`{x,}`, "unexpected '}' at offset 3"},
-+ {`{x,{`, "unexpected '{' at offset 3"},
-+ {`{x},`, "unexpected ',' at offset 3"},
-+ {`{x}}`, "unexpected '}' at offset 3"},
-+ {`{{x}`, "expected '}' at offset 4"},
-+ {`{""x}`, "unexpected 'x' at offset 3"},
-+ {`{{a},{b,c}}`, "multidimensional arrays must have elements with matching dimensions"},
-+ } {
-+ _, _, err := parseArray([]byte(tt.input), []byte{','})
-+
-+ if err == nil {
-+ t.Fatalf("Expected error for %q, got none", tt.input)
-+ }
-+ if !strings.Contains(err.Error(), tt.err) {
-+ t.Errorf("Expected error to contain %q for %q, got %q", tt.err, tt.input, err)
-+ }
-+ }
-+}
-+
-+func TestArrayScanner(t *testing.T) {
-+ var s sql.Scanner = Array(&[]bool{})
-+ if _, ok := s.(*BoolArray); !ok {
-+ t.Errorf("Expected *BoolArray, got %T", s)
-+ }
-+
-+ s = Array(&[]float64{})
-+ if _, ok := s.(*Float64Array); !ok {
-+ t.Errorf("Expected *Float64Array, got %T", s)
-+ }
-+
-+ s = Array(&[]int64{})
-+ if _, ok := s.(*Int64Array); !ok {
-+ t.Errorf("Expected *Int64Array, got %T", s)
-+ }
-+
-+ s = Array(&[]string{})
-+ if _, ok := s.(*StringArray); !ok {
-+ t.Errorf("Expected *StringArray, got %T", s)
-+ }
-+
-+ for _, tt := range []interface{}{
-+ &[]sql.Scanner{},
-+ &[][]bool{},
-+ &[][]float64{},
-+ &[][]int64{},
-+ &[][]string{},
-+ } {
-+ s = Array(tt)
-+ if _, ok := s.(GenericArray); !ok {
-+ t.Errorf("Expected GenericArray for %T, got %T", tt, s)
-+ }
-+ }
-+}
-+
-+func TestArrayValuer(t *testing.T) {
-+ var v driver.Valuer = Array([]bool{})
-+ if _, ok := v.(*BoolArray); !ok {
-+ t.Errorf("Expected *BoolArray, got %T", v)
-+ }
-+
-+ v = Array([]float64{})
-+ if _, ok := v.(*Float64Array); !ok {
-+ t.Errorf("Expected *Float64Array, got %T", v)
-+ }
-+
-+ v = Array([]int64{})
-+ if _, ok := v.(*Int64Array); !ok {
-+ t.Errorf("Expected *Int64Array, got %T", v)
-+ }
-+
-+ v = Array([]string{})
-+ if _, ok := v.(*StringArray); !ok {
-+ t.Errorf("Expected *StringArray, got %T", v)
-+ }
-+
-+ for _, tt := range []interface{}{
-+ nil,
-+ []driver.Value{},
-+ [][]bool{},
-+ [][]float64{},
-+ [][]int64{},
-+ [][]string{},
-+ } {
-+ v = Array(tt)
-+ if _, ok := v.(GenericArray); !ok {
-+ t.Errorf("Expected GenericArray for %T, got %T", tt, v)
-+ }
-+ }
-+}
-+
-+func TestBoolArrayScanUnsupported(t *testing.T) {
-+ var arr BoolArray
-+ err := arr.Scan(1)
-+
-+ if err == nil {
-+ t.Fatal("Expected error when scanning from int")
-+ }
-+ if !strings.Contains(err.Error(), "int to BoolArray") {
-+ t.Errorf("Expected type to be mentioned when scanning, got %q", err)
-+ }
-+}
-+
-+func TestBoolArrayScanEmpty(t *testing.T) {
-+ var arr BoolArray
-+ err := arr.Scan(`{}`)
-+
-+ if err != nil {
-+ t.Fatalf("Expected no error, got %v", err)
-+ }
-+ if arr == nil || len(arr) != 0 {
-+ t.Errorf("Expected empty, got %#v", arr)
-+ }
-+}
-+
-+func TestBoolArrayScanNil(t *testing.T) {
-+ arr := BoolArray{true, true, true}
-+ err := arr.Scan(nil)
-+
-+ if err != nil {
-+ t.Fatalf("Expected no error, got %v", err)
-+ }
-+ if arr != nil {
-+ t.Errorf("Expected nil, got %+v", arr)
-+ }
-+}
-+
-+var BoolArrayStringTests = []struct {
-+ str string
-+ arr BoolArray
-+}{
-+ {`{}`, BoolArray{}},
-+ {`{t}`, BoolArray{true}},
-+ {`{f,t}`, BoolArray{false, true}},
-+}
-+
-+func TestBoolArrayScanBytes(t *testing.T) {
-+ for _, tt := range BoolArrayStringTests {
-+ bytes := []byte(tt.str)
-+ arr := BoolArray{true, true, true}
-+ err := arr.Scan(bytes)
-+
-+ if err != nil {
-+ t.Fatalf("Expected no error for %q, got %v", bytes, err)
-+ }
-+ if !reflect.DeepEqual(arr, tt.arr) {
-+ t.Errorf("Expected %+v for %q, got %+v", tt.arr, bytes, arr)
-+ }
-+ }
-+}
-+
-+func BenchmarkBoolArrayScanBytes(b *testing.B) {
-+ var a BoolArray
-+ var x interface{} = []byte(`{t,f,t,f,t,f,t,f,t,f}`)
-+
-+ for i := 0; i < b.N; i++ {
-+ a = BoolArray{}
-+ a.Scan(x)
-+ }
-+}
-+
-+func TestBoolArrayScanString(t *testing.T) {
-+ for _, tt := range BoolArrayStringTests {
-+ arr := BoolArray{true, true, true}
-+ err := arr.Scan(tt.str)
-+
-+ if err != nil {
-+ t.Fatalf("Expected no error for %q, got %v", tt.str, err)
-+ }
-+ if !reflect.DeepEqual(arr, tt.arr) {
-+ t.Errorf("Expected %+v for %q, got %+v", tt.arr, tt.str, arr)
-+ }
-+ }
-+}
-+
-+func TestBoolArrayScanError(t *testing.T) {
-+ for _, tt := range []struct {
-+ input, err string
-+ }{
-+ {``, "unable to parse array"},
-+ {`{`, "unable to parse array"},
-+ {`{{t},{f}}`, "cannot convert ARRAY[2][1] to BoolArray"},
-+ {`{NULL}`, `could not parse boolean array index 0: invalid boolean ""`},
-+ {`{a}`, `could not parse boolean array index 0: invalid boolean "a"`},
-+ {`{t,b}`, `could not parse boolean array index 1: invalid boolean "b"`},
-+ {`{t,f,cd}`, `could not parse boolean array index 2: invalid boolean "cd"`},
-+ } {
-+ arr := BoolArray{true, true, true}
-+ err := arr.Scan(tt.input)
-+
-+ if err == nil {
-+ t.Fatalf("Expected error for %q, got none", tt.input)
-+ }
-+ if !strings.Contains(err.Error(), tt.err) {
-+ t.Errorf("Expected error to contain %q for %q, got %q", tt.err, tt.input, err)
-+ }
-+ if !reflect.DeepEqual(arr, BoolArray{true, true, true}) {
-+ t.Errorf("Expected destination not to change for %q, got %+v", tt.input, arr)
-+ }
-+ }
-+}
-+
-+func TestBoolArrayValue(t *testing.T) {
-+ result, err := BoolArray(nil).Value()
-+
-+ if err != nil {
-+ t.Fatalf("Expected no error for nil, got %v", err)
-+ }
-+ if result != nil {
-+ t.Errorf("Expected nil, got %q", result)
-+ }
-+
-+ result, err = BoolArray([]bool{}).Value()
-+
-+ if err != nil {
-+ t.Fatalf("Expected no error for empty, got %v", err)
-+ }
-+ if expected := `{}`; !reflect.DeepEqual(result, expected) {
-+ t.Errorf("Expected empty, got %q", result)
-+ }
-+
-+ result, err = BoolArray([]bool{false, true, false}).Value()
-+
-+ if err != nil {
-+ t.Fatalf("Expected no error, got %v", err)
-+ }
-+ if expected := `{f,t,f}`; !reflect.DeepEqual(result, expected) {
-+ t.Errorf("Expected %q, got %q", expected, result)
-+ }
-+}
-+
-+func BenchmarkBoolArrayValue(b *testing.B) {
-+ rand.Seed(1)
-+ x := make([]bool, 10)
-+ for i := 0; i < len(x); i++ {
-+ x[i] = rand.Intn(2) == 0
-+ }
-+ a := BoolArray(x)
-+
-+ for i := 0; i < b.N; i++ {
-+ a.Value()
-+ }
-+}
-+
-+func TestByteaArrayScanUnsupported(t *testing.T) {
-+ var arr ByteaArray
-+ err := arr.Scan(1)
-+
-+ if err == nil {
-+ t.Fatal("Expected error when scanning from int")
-+ }
-+ if !strings.Contains(err.Error(), "int to ByteaArray") {
-+ t.Errorf("Expected type to be mentioned when scanning, got %q", err)
-+ }
-+}
-+
-+func TestByteaArrayScanEmpty(t *testing.T) {
-+ var arr ByteaArray
-+ err := arr.Scan(`{}`)
-+
-+ if err != nil {
-+ t.Fatalf("Expected no error, got %v", err)
-+ }
-+ if arr == nil || len(arr) != 0 {
-+ t.Errorf("Expected empty, got %#v", arr)
-+ }
-+}
-+
-+func TestByteaArrayScanNil(t *testing.T) {
-+ arr := ByteaArray{{2}, {6}, {0, 0}}
-+ err := arr.Scan(nil)
-+
-+ if err != nil {
-+ t.Fatalf("Expected no error, got %v", err)
-+ }
-+ if arr != nil {
-+ t.Errorf("Expected nil, got %+v", arr)
-+ }
-+}
-+
-+var ByteaArrayStringTests = []struct {
-+ str string
-+ arr ByteaArray
-+}{
-+ {`{}`, ByteaArray{}},
-+ {`{NULL}`, ByteaArray{nil}},
-+ {`{"\\xfeff"}`, ByteaArray{{'\xFE', '\xFF'}}},
-+ {`{"\\xdead","\\xbeef"}`, ByteaArray{{'\xDE', '\xAD'}, {'\xBE', '\xEF'}}},
-+}
-+
-+func TestByteaArrayScanBytes(t *testing.T) {
-+ for _, tt := range ByteaArrayStringTests {
-+ bytes := []byte(tt.str)
-+ arr := ByteaArray{{2}, {6}, {0, 0}}
-+ err := arr.Scan(bytes)
-+
-+ if err != nil {
-+ t.Fatalf("Expected no error for %q, got %v", bytes, err)
-+ }
-+ if !reflect.DeepEqual(arr, tt.arr) {
-+ t.Errorf("Expected %+v for %q, got %+v", tt.arr, bytes, arr)
-+ }
-+ }
-+}
-+
-+func BenchmarkByteaArrayScanBytes(b *testing.B) {
-+ var a ByteaArray
-+ var x interface{} = []byte(`{"\\xfe","\\xff","\\xdead","\\xbeef","\\xfe","\\xff","\\xdead","\\xbeef","\\xfe","\\xff"}`)
-+
-+ for i := 0; i < b.N; i++ {
-+ a = ByteaArray{}
-+ a.Scan(x)
-+ }
-+}
-+
-+func TestByteaArrayScanString(t *testing.T) {
-+ for _, tt := range ByteaArrayStringTests {
-+ arr := ByteaArray{{2}, {6}, {0, 0}}
-+ err := arr.Scan(tt.str)
-+
-+ if err != nil {
-+ t.Fatalf("Expected no error for %q, got %v", tt.str, err)
-+ }
-+ if !reflect.DeepEqual(arr, tt.arr) {
-+ t.Errorf("Expected %+v for %q, got %+v", tt.arr, tt.str, arr)
-+ }
-+ }
-+}
-+
-+func TestByteaArrayScanError(t *testing.T) {
-+ for _, tt := range []struct {
-+ input, err string
-+ }{
-+ {``, "unable to parse array"},
-+ {`{`, "unable to parse array"},
-+ {`{{"\\xfeff"},{"\\xbeef"}}`, "cannot convert ARRAY[2][1] to ByteaArray"},
-+ {`{"\\abc"}`, "could not parse bytea array index 0: could not parse bytea value"},
-+ } {
-+ arr := ByteaArray{{2}, {6}, {0, 0}}
-+ err := arr.Scan(tt.input)
-+
-+ if err == nil {
-+ t.Fatalf("Expected error for %q, got none", tt.input)
-+ }
-+ if !strings.Contains(err.Error(), tt.err) {
-+ t.Errorf("Expected error to contain %q for %q, got %q", tt.err, tt.input, err)
-+ }
-+ if !reflect.DeepEqual(arr, ByteaArray{{2}, {6}, {0, 0}}) {
-+ t.Errorf("Expected destination not to change for %q, got %+v", tt.input, arr)
-+ }
-+ }
-+}
-+
-+func TestByteaArrayValue(t *testing.T) {
-+ result, err := ByteaArray(nil).Value()
-+
-+ if err != nil {
-+ t.Fatalf("Expected no error for nil, got %v", err)
-+ }
-+ if result != nil {
-+ t.Errorf("Expected nil, got %q", result)
-+ }
-+
-+ result, err = ByteaArray([][]byte{}).Value()
-+
-+ if err != nil {
-+ t.Fatalf("Expected no error for empty, got %v", err)
-+ }
-+ if expected := `{}`; !reflect.DeepEqual(result, expected) {
-+ t.Errorf("Expected empty, got %q", result)
-+ }
-+
-+ result, err = ByteaArray([][]byte{{'\xDE', '\xAD', '\xBE', '\xEF'}, {'\xFE', '\xFF'}, {}}).Value()
-+
-+ if err != nil {
-+ t.Fatalf("Expected no error, got %v", err)
-+ }
-+ if expected := `{"\\xdeadbeef","\\xfeff","\\x"}`; !reflect.DeepEqual(result, expected) {
-+ t.Errorf("Expected %q, got %q", expected, result)
-+ }
-+}
-+
-+func BenchmarkByteaArrayValue(b *testing.B) {
-+ rand.Seed(1)
-+ x := make([][]byte, 10)
-+ for i := 0; i < len(x); i++ {
-+ x[i] = make([]byte, len(x))
-+ for j := 0; j < len(x); j++ {
-+ x[i][j] = byte(rand.Int())
-+ }
-+ }
-+ a := ByteaArray(x)
-+
-+ for i := 0; i < b.N; i++ {
-+ a.Value()
-+ }
-+}
-+
-+func TestFloat64ArrayScanUnsupported(t *testing.T) {
-+ var arr Float64Array
-+ err := arr.Scan(true)
-+
-+ if err == nil {
-+ t.Fatal("Expected error when scanning from bool")
-+ }
-+ if !strings.Contains(err.Error(), "bool to Float64Array") {
-+ t.Errorf("Expected type to be mentioned when scanning, got %q", err)
-+ }
-+}
-+
-+func TestFloat64ArrayScanEmpty(t *testing.T) {
-+ var arr Float64Array
-+ err := arr.Scan(`{}`)
-+
-+ if err != nil {
-+ t.Fatalf("Expected no error, got %v", err)
-+ }
-+ if arr == nil || len(arr) != 0 {
-+ t.Errorf("Expected empty, got %#v", arr)
-+ }
-+}
-+
-+func TestFloat64ArrayScanNil(t *testing.T) {
-+ arr := Float64Array{5, 5, 5}
-+ err := arr.Scan(nil)
-+
-+ if err != nil {
-+ t.Fatalf("Expected no error, got %v", err)
-+ }
-+ if arr != nil {
-+ t.Errorf("Expected nil, got %+v", arr)
-+ }
-+}
-+
-+var Float64ArrayStringTests = []struct {
-+ str string
-+ arr Float64Array
-+}{
-+ {`{}`, Float64Array{}},
-+ {`{1.2}`, Float64Array{1.2}},
-+ {`{3.456,7.89}`, Float64Array{3.456, 7.89}},
-+ {`{3,1,2}`, Float64Array{3, 1, 2}},
-+}
-+
-+func TestFloat64ArrayScanBytes(t *testing.T) {
-+ for _, tt := range Float64ArrayStringTests {
-+ bytes := []byte(tt.str)
-+ arr := Float64Array{5, 5, 5}
-+ err := arr.Scan(bytes)
-+
-+ if err != nil {
-+ t.Fatalf("Expected no error for %q, got %v", bytes, err)
-+ }
-+ if !reflect.DeepEqual(arr, tt.arr) {
-+ t.Errorf("Expected %+v for %q, got %+v", tt.arr, bytes, arr)
-+ }
-+ }
-+}
-+
-+func BenchmarkFloat64ArrayScanBytes(b *testing.B) {
-+ var a Float64Array
-+ var x interface{} = []byte(`{1.2,3.4,5.6,7.8,9.01,2.34,5.67,8.90,1.234,5.678}`)
-+
-+ for i := 0; i < b.N; i++ {
-+ a = Float64Array{}
-+ a.Scan(x)
-+ }
-+}
-+
-+func TestFloat64ArrayScanString(t *testing.T) {
-+ for _, tt := range Float64ArrayStringTests {
-+ arr := Float64Array{5, 5, 5}
-+ err := arr.Scan(tt.str)
-+
-+ if err != nil {
-+ t.Fatalf("Expected no error for %q, got %v", tt.str, err)
-+ }
-+ if !reflect.DeepEqual(arr, tt.arr) {
-+ t.Errorf("Expected %+v for %q, got %+v", tt.arr, tt.str, arr)
-+ }
-+ }
-+}
-+
-+func TestFloat64ArrayScanError(t *testing.T) {
-+ for _, tt := range []struct {
-+ input, err string
-+ }{
-+ {``, "unable to parse array"},
-+ {`{`, "unable to parse array"},
-+ {`{{5.6},{7.8}}`, "cannot convert ARRAY[2][1] to Float64Array"},
-+ {`{NULL}`, "parsing array element index 0:"},
-+ {`{a}`, "parsing array element index 0:"},
-+ {`{5.6,a}`, "parsing array element index 1:"},
-+ {`{5.6,7.8,a}`, "parsing array element index 2:"},
-+ } {
-+ arr := Float64Array{5, 5, 5}
-+ err := arr.Scan(tt.input)
-+
-+ if err == nil {
-+ t.Fatalf("Expected error for %q, got none", tt.input)
-+ }
-+ if !strings.Contains(err.Error(), tt.err) {
-+ t.Errorf("Expected error to contain %q for %q, got %q", tt.err, tt.input, err)
-+ }
-+ if !reflect.DeepEqual(arr, Float64Array{5, 5, 5}) {
-+ t.Errorf("Expected destination not to change for %q, got %+v", tt.input, arr)
-+ }
-+ }
-+}
-+
-+func TestFloat64ArrayValue(t *testing.T) {
-+ result, err := Float64Array(nil).Value()
-+
-+ if err != nil {
-+ t.Fatalf("Expected no error for nil, got %v", err)
-+ }
-+ if result != nil {
-+ t.Errorf("Expected nil, got %q", result)
-+ }
-+
-+ result, err = Float64Array([]float64{}).Value()
-+
-+ if err != nil {
-+ t.Fatalf("Expected no error for empty, got %v", err)
-+ }
-+ if expected := `{}`; !reflect.DeepEqual(result, expected) {
-+ t.Errorf("Expected empty, got %q", result)
-+ }
-+
-+ result, err = Float64Array([]float64{1.2, 3.4, 5.6}).Value()
-+
-+ if err != nil {
-+ t.Fatalf("Expected no error, got %v", err)
-+ }
-+ if expected := `{1.2,3.4,5.6}`; !reflect.DeepEqual(result, expected) {
-+ t.Errorf("Expected %q, got %q", expected, result)
-+ }
-+}
-+
-+func BenchmarkFloat64ArrayValue(b *testing.B) {
-+ rand.Seed(1)
-+ x := make([]float64, 10)
-+ for i := 0; i < len(x); i++ {
-+ x[i] = rand.NormFloat64()
-+ }
-+ a := Float64Array(x)
-+
-+ for i := 0; i < b.N; i++ {
-+ a.Value()
-+ }
-+}
-+
-+func TestInt64ArrayScanUnsupported(t *testing.T) {
-+ var arr Int64Array
-+ err := arr.Scan(true)
-+
-+ if err == nil {
-+ t.Fatal("Expected error when scanning from bool")
-+ }
-+ if !strings.Contains(err.Error(), "bool to Int64Array") {
-+ t.Errorf("Expected type to be mentioned when scanning, got %q", err)
-+ }
-+}
-+
-+func TestInt64ArrayScanEmpty(t *testing.T) {
-+ var arr Int64Array
-+ err := arr.Scan(`{}`)
-+
-+ if err != nil {
-+ t.Fatalf("Expected no error, got %v", err)
-+ }
-+ if arr == nil || len(arr) != 0 {
-+ t.Errorf("Expected empty, got %#v", arr)
-+ }
-+}
-+
-+func TestInt64ArrayScanNil(t *testing.T) {
-+ arr := Int64Array{5, 5, 5}
-+ err := arr.Scan(nil)
-+
-+ if err != nil {
-+ t.Fatalf("Expected no error, got %v", err)
-+ }
-+ if arr != nil {
-+ t.Errorf("Expected nil, got %+v", arr)
-+ }
-+}
-+
-+var Int64ArrayStringTests = []struct {
-+ str string
-+ arr Int64Array
-+}{
-+ {`{}`, Int64Array{}},
-+ {`{12}`, Int64Array{12}},
-+ {`{345,678}`, Int64Array{345, 678}},
-+}
-+
-+func TestInt64ArrayScanBytes(t *testing.T) {
-+ for _, tt := range Int64ArrayStringTests {
-+ bytes := []byte(tt.str)
-+ arr := Int64Array{5, 5, 5}
-+ err := arr.Scan(bytes)
-+
-+ if err != nil {
-+ t.Fatalf("Expected no error for %q, got %v", bytes, err)
-+ }
-+ if !reflect.DeepEqual(arr, tt.arr) {
-+ t.Errorf("Expected %+v for %q, got %+v", tt.arr, bytes, arr)
-+ }
-+ }
-+}
-+
-+func BenchmarkInt64ArrayScanBytes(b *testing.B) {
-+ var a Int64Array
-+ var x interface{} = []byte(`{1,2,3,4,5,6,7,8,9,0}`)
-+
-+ for i := 0; i < b.N; i++ {
-+ a = Int64Array{}
-+ a.Scan(x)
-+ }
-+}
-+
-+func TestInt64ArrayScanString(t *testing.T) {
-+ for _, tt := range Int64ArrayStringTests {
-+ arr := Int64Array{5, 5, 5}
-+ err := arr.Scan(tt.str)
-+
-+ if err != nil {
-+ t.Fatalf("Expected no error for %q, got %v", tt.str, err)
-+ }
-+ if !reflect.DeepEqual(arr, tt.arr) {
-+ t.Errorf("Expected %+v for %q, got %+v", tt.arr, tt.str, arr)
-+ }
-+ }
-+}
-+
-+func TestInt64ArrayScanError(t *testing.T) {
-+ for _, tt := range []struct {
-+ input, err string
-+ }{
-+ {``, "unable to parse array"},
-+ {`{`, "unable to parse array"},
-+ {`{{5},{6}}`, "cannot convert ARRAY[2][1] to Int64Array"},
-+ {`{NULL}`, "parsing array element index 0:"},
-+ {`{a}`, "parsing array element index 0:"},
-+ {`{5,a}`, "parsing array element index 1:"},
-+ {`{5,6,a}`, "parsing array element index 2:"},
-+ } {
-+ arr := Int64Array{5, 5, 5}
-+ err := arr.Scan(tt.input)
-+
-+ if err == nil {
-+ t.Fatalf("Expected error for %q, got none", tt.input)
-+ }
-+ if !strings.Contains(err.Error(), tt.err) {
-+ t.Errorf("Expected error to contain %q for %q, got %q", tt.err, tt.input, err)
-+ }
-+ if !reflect.DeepEqual(arr, Int64Array{5, 5, 5}) {
-+ t.Errorf("Expected destination not to change for %q, got %+v", tt.input, arr)
-+ }
-+ }
-+}
-+
-+func TestInt64ArrayValue(t *testing.T) {
-+ result, err := Int64Array(nil).Value()
-+
-+ if err != nil {
-+ t.Fatalf("Expected no error for nil, got %v", err)
-+ }
-+ if result != nil {
-+ t.Errorf("Expected nil, got %q", result)
-+ }
-+
-+ result, err = Int64Array([]int64{}).Value()
-+
-+ if err != nil {
-+ t.Fatalf("Expected no error for empty, got %v", err)
-+ }
-+ if expected := `{}`; !reflect.DeepEqual(result, expected) {
-+ t.Errorf("Expected empty, got %q", result)
-+ }
-+
-+ result, err = Int64Array([]int64{1, 2, 3}).Value()
-+
-+ if err != nil {
-+ t.Fatalf("Expected no error, got %v", err)
-+ }
-+ if expected := `{1,2,3}`; !reflect.DeepEqual(result, expected) {
-+ t.Errorf("Expected %q, got %q", expected, result)
-+ }
-+}
-+
-+func BenchmarkInt64ArrayValue(b *testing.B) {
-+ rand.Seed(1)
-+ x := make([]int64, 10)
-+ for i := 0; i < len(x); i++ {
-+ x[i] = rand.Int63()
-+ }
-+ a := Int64Array(x)
-+
-+ for i := 0; i < b.N; i++ {
-+ a.Value()
-+ }
-+}
-+
-+func TestStringArrayScanUnsupported(t *testing.T) {
-+ var arr StringArray
-+ err := arr.Scan(true)
-+
-+ if err == nil {
-+ t.Fatal("Expected error when scanning from bool")
-+ }
-+ if !strings.Contains(err.Error(), "bool to StringArray") {
-+ t.Errorf("Expected type to be mentioned when scanning, got %q", err)
-+ }
-+}
-+
-+func TestStringArrayScanEmpty(t *testing.T) {
-+ var arr StringArray
-+ err := arr.Scan(`{}`)
-+
-+ if err != nil {
-+ t.Fatalf("Expected no error, got %v", err)
-+ }
-+ if arr == nil || len(arr) != 0 {
-+ t.Errorf("Expected empty, got %#v", arr)
-+ }
-+}
-+
-+func TestStringArrayScanNil(t *testing.T) {
-+ arr := StringArray{"x", "x", "x"}
-+ err := arr.Scan(nil)
-+
-+ if err != nil {
-+ t.Fatalf("Expected no error, got %v", err)
-+ }
-+ if arr != nil {
-+ t.Errorf("Expected nil, got %+v", arr)
-+ }
-+}
-+
-+var StringArrayStringTests = []struct {
-+ str string
-+ arr StringArray
-+}{
-+ {`{}`, StringArray{}},
-+ {`{t}`, StringArray{"t"}},
-+ {`{f,1}`, StringArray{"f", "1"}},
-+ {`{"a\\b","c d",","}`, StringArray{"a\\b", "c d", ","}},
-+}
-+
-+func TestStringArrayScanBytes(t *testing.T) {
-+ for _, tt := range StringArrayStringTests {
-+ bytes := []byte(tt.str)
-+ arr := StringArray{"x", "x", "x"}
-+ err := arr.Scan(bytes)
-+
-+ if err != nil {
-+ t.Fatalf("Expected no error for %q, got %v", bytes, err)
-+ }
-+ if !reflect.DeepEqual(arr, tt.arr) {
-+ t.Errorf("Expected %+v for %q, got %+v", tt.arr, bytes, arr)
-+ }
-+ }
-+}
-+
-+func BenchmarkStringArrayScanBytes(b *testing.B) {
-+ var a StringArray
-+ var x interface{} = []byte(`{a,b,c,d,e,f,g,h,i,j}`)
-+ var y interface{} = []byte(`{"\a","\b","\c","\d","\e","\f","\g","\h","\i","\j"}`)
-+
-+ for i := 0; i < b.N; i++ {
-+ a = StringArray{}
-+ a.Scan(x)
-+ a = StringArray{}
-+ a.Scan(y)
-+ }
-+}
-+
-+func TestStringArrayScanString(t *testing.T) {
-+ for _, tt := range StringArrayStringTests {
-+ arr := StringArray{"x", "x", "x"}
-+ err := arr.Scan(tt.str)
-+
-+ if err != nil {
-+ t.Fatalf("Expected no error for %q, got %v", tt.str, err)
-+ }
-+ if !reflect.DeepEqual(arr, tt.arr) {
-+ t.Errorf("Expected %+v for %q, got %+v", tt.arr, tt.str, arr)
-+ }
-+ }
-+}
-+
-+func TestStringArrayScanError(t *testing.T) {
-+ for _, tt := range []struct {
-+ input, err string
-+ }{
-+ {``, "unable to parse array"},
-+ {`{`, "unable to parse array"},
-+ {`{{a},{b}}`, "cannot convert ARRAY[2][1] to StringArray"},
-+ {`{NULL}`, "parsing array element index 0: cannot convert nil to string"},
-+ {`{a,NULL}`, "parsing array element index 1: cannot convert nil to string"},
-+ {`{a,b,NULL}`, "parsing array element index 2: cannot convert nil to string"},
-+ } {
-+ arr := StringArray{"x", "x", "x"}
-+ err := arr.Scan(tt.input)
-+
-+ if err == nil {
-+ t.Fatalf("Expected error for %q, got none", tt.input)
-+ }
-+ if !strings.Contains(err.Error(), tt.err) {
-+ t.Errorf("Expected error to contain %q for %q, got %q", tt.err, tt.input, err)
-+ }
-+ if !reflect.DeepEqual(arr, StringArray{"x", "x", "x"}) {
-+ t.Errorf("Expected destination not to change for %q, got %+v", tt.input, arr)
-+ }
-+ }
-+}
-+
-+func TestStringArrayValue(t *testing.T) {
-+ result, err := StringArray(nil).Value()
-+
-+ if err != nil {
-+ t.Fatalf("Expected no error for nil, got %v", err)
-+ }
-+ if result != nil {
-+ t.Errorf("Expected nil, got %q", result)
-+ }
-+
-+ result, err = StringArray([]string{}).Value()
-+
-+ if err != nil {
-+ t.Fatalf("Expected no error for empty, got %v", err)
-+ }
-+ if expected := `{}`; !reflect.DeepEqual(result, expected) {
-+ t.Errorf("Expected empty, got %q", result)
-+ }
-+
-+ result, err = StringArray([]string{`a`, `\b`, `c"`, `d,e`}).Value()
-+
-+ if err != nil {
-+ t.Fatalf("Expected no error, got %v", err)
-+ }
-+ if expected := `{"a","\\b","c\"","d,e"}`; !reflect.DeepEqual(result, expected) {
-+ t.Errorf("Expected %q, got %q", expected, result)
-+ }
-+}
-+
-+func BenchmarkStringArrayValue(b *testing.B) {
-+ x := make([]string, 10)
-+ for i := 0; i < len(x); i++ {
-+ x[i] = strings.Repeat(`abc"def\ghi`, 5)
-+ }
-+ a := StringArray(x)
-+
-+ for i := 0; i < b.N; i++ {
-+ a.Value()
-+ }
-+}
-+
-+func TestGenericArrayScanUnsupported(t *testing.T) {
-+ var s string
-+ var ss []string
-+ var nsa [1]sql.NullString
-+
-+ for _, tt := range []struct {
-+ src, dest interface{}
-+ err string
-+ }{
-+ {nil, nil, "destination is not a pointer to array or slice"},
-+ {nil, true, "destination bool is not a pointer to array or slice"},
-+ {nil, &s, "destination *string is not a pointer to array or slice"},
-+ {nil, ss, "destination []string is not a pointer to array or slice"},
-+ {nil, &nsa, " to [1]sql.NullString"},
-+ {true, &ss, "bool to []string"},
-+ {`{{x}}`, &ss, "multidimensional ARRAY[1][1] is not implemented"},
-+ {`{{x},{x}}`, &ss, "multidimensional ARRAY[2][1] is not implemented"},
-+ {`{x}`, &ss, "scanning to string is not implemented"},
-+ } {
-+ err := GenericArray{tt.dest}.Scan(tt.src)
-+
-+ if err == nil {
-+ t.Fatalf("Expected error for [%#v %#v]", tt.src, tt.dest)
-+ }
-+ if !strings.Contains(err.Error(), tt.err) {
-+ t.Errorf("Expected error to contain %q for [%#v %#v], got %q", tt.err, tt.src, tt.dest, err)
-+ }
-+ }
-+}
-+
-+func TestGenericArrayScanScannerArrayBytes(t *testing.T) {
-+ src, expected, nsa := []byte(`{NULL,abc,"\""}`),
-+ [3]sql.NullString{{}, {String: `abc`, Valid: true}, {String: `"`, Valid: true}},
-+ [3]sql.NullString{{String: ``, Valid: true}, {}, {}}
-+
-+ if err := (GenericArray{&nsa}).Scan(src); err != nil {
-+ t.Fatalf("Expected no error, got %v", err)
-+ }
-+ if !reflect.DeepEqual(nsa, expected) {
-+ t.Errorf("Expected %v, got %v", expected, nsa)
-+ }
-+}
-+
-+func TestGenericArrayScanScannerArrayString(t *testing.T) {
-+ src, expected, nsa := `{NULL,"\"",xyz}`,
-+ [3]sql.NullString{{}, {String: `"`, Valid: true}, {String: `xyz`, Valid: true}},
-+ [3]sql.NullString{{String: ``, Valid: true}, {}, {}}
-+
-+ if err := (GenericArray{&nsa}).Scan(src); err != nil {
-+ t.Fatalf("Expected no error, got %v", err)
-+ }
-+ if !reflect.DeepEqual(nsa, expected) {
-+ t.Errorf("Expected %v, got %v", expected, nsa)
-+ }
-+}
-+
-+func TestGenericArrayScanScannerSliceEmpty(t *testing.T) {
-+ var nss []sql.NullString
-+
-+ if err := (GenericArray{&nss}).Scan(`{}`); err != nil {
-+ t.Fatalf("Expected no error, got %v", err)
-+ }
-+ if nss == nil || len(nss) != 0 {
-+ t.Errorf("Expected empty, got %#v", nss)
-+ }
-+}
-+
-+func TestGenericArrayScanScannerSliceNil(t *testing.T) {
-+ nss := []sql.NullString{{String: ``, Valid: true}, {}}
-+
-+ if err := (GenericArray{&nss}).Scan(nil); err != nil {
-+ t.Fatalf("Expected no error, got %v", err)
-+ }
-+ if nss != nil {
-+ t.Errorf("Expected nil, got %+v", nss)
-+ }
-+}
-+
-+func TestGenericArrayScanScannerSliceBytes(t *testing.T) {
-+ src, expected, nss := []byte(`{NULL,abc,"\""}`),
-+ []sql.NullString{{}, {String: `abc`, Valid: true}, {String: `"`, Valid: true}},
-+ []sql.NullString{{String: ``, Valid: true}, {}, {}, {}, {}}
-+
-+ if err := (GenericArray{&nss}).Scan(src); err != nil {
-+ t.Fatalf("Expected no error, got %v", err)
-+ }
-+ if !reflect.DeepEqual(nss, expected) {
-+ t.Errorf("Expected %v, got %v", expected, nss)
-+ }
-+}
-+
-+func BenchmarkGenericArrayScanScannerSliceBytes(b *testing.B) {
-+ var a GenericArray
-+ var x interface{} = []byte(`{a,b,c,d,e,f,g,h,i,j}`)
-+ var y interface{} = []byte(`{"\a","\b","\c","\d","\e","\f","\g","\h","\i","\j"}`)
-+
-+ for i := 0; i < b.N; i++ {
-+ a = GenericArray{new([]sql.NullString)}
-+ a.Scan(x)
-+ a = GenericArray{new([]sql.NullString)}
-+ a.Scan(y)
-+ }
-+}
-+
-+func TestGenericArrayScanScannerSliceString(t *testing.T) {
-+ src, expected, nss := `{NULL,"\"",xyz}`,
-+ []sql.NullString{{}, {String: `"`, Valid: true}, {String: `xyz`, Valid: true}},
-+ []sql.NullString{{String: ``, Valid: true}, {}, {}}
-+
-+ if err := (GenericArray{&nss}).Scan(src); err != nil {
-+ t.Fatalf("Expected no error, got %v", err)
-+ }
-+ if !reflect.DeepEqual(nss, expected) {
-+ t.Errorf("Expected %v, got %v", expected, nss)
-+ }
-+}
-+
-+type TildeNullInt64 struct{ sql.NullInt64 }
-+
-+func (TildeNullInt64) ArrayDelimiter() string { return "~" }
-+
-+func TestGenericArrayScanDelimiter(t *testing.T) {
-+ src, expected, tnis := `{12~NULL~76}`,
-+ []TildeNullInt64{{sql.NullInt64{Int64: 12, Valid: true}}, {}, {sql.NullInt64{Int64: 76, Valid: true}}},
-+ []TildeNullInt64{{sql.NullInt64{Int64: 0, Valid: true}}, {}}
-+
-+ if err := (GenericArray{&tnis}).Scan(src); err != nil {
-+ t.Fatalf("Expected no error for %#v, got %v", src, err)
-+ }
-+ if !reflect.DeepEqual(tnis, expected) {
-+ t.Errorf("Expected %v for %#v, got %v", expected, src, tnis)
-+ }
-+}
-+
-+func TestGenericArrayScanErrors(t *testing.T) {
-+ var sa [1]string
-+ var nis []sql.NullInt64
-+ var pss *[]string
-+
-+ for _, tt := range []struct {
-+ src, dest interface{}
-+ err string
-+ }{
-+ {nil, pss, "destination *[]string is nil"},
-+ {`{`, &sa, "unable to parse"},
-+ {`{}`, &sa, "cannot convert ARRAY[0] to [1]string"},
-+ {`{x,x}`, &sa, "cannot convert ARRAY[2] to [1]string"},
-+ {`{x}`, &nis, `parsing array element index 0: converting`},
-+ } {
-+ err := GenericArray{tt.dest}.Scan(tt.src)
-+
-+ if err == nil {
-+ t.Fatalf("Expected error for [%#v %#v]", tt.src, tt.dest)
-+ }
-+ if !strings.Contains(err.Error(), tt.err) {
-+ t.Errorf("Expected error to contain %q for [%#v %#v], got %q", tt.err, tt.src, tt.dest, err)
-+ }
-+ }
-+}
-+
-+func TestGenericArrayValueUnsupported(t *testing.T) {
-+ _, err := GenericArray{true}.Value()
-+
-+ if err == nil {
-+ t.Fatal("Expected error for bool")
-+ }
-+ if !strings.Contains(err.Error(), "bool to array") {
-+ t.Errorf("Expected type to be mentioned, got %q", err)
-+ }
-+}
-+
-+type ByteArrayValuer [1]byte
-+type ByteSliceValuer []byte
-+type FuncArrayValuer struct {
-+ delimiter func() string
-+ value func() (driver.Value, error)
-+}
-+
-+func (a ByteArrayValuer) Value() (driver.Value, error) { return a[:], nil }
-+func (b ByteSliceValuer) Value() (driver.Value, error) { return []byte(b), nil }
-+func (f FuncArrayValuer) ArrayDelimiter() string { return f.delimiter() }
-+func (f FuncArrayValuer) Value() (driver.Value, error) { return f.value() }
-+
-+func TestGenericArrayValue(t *testing.T) {
-+ result, err := GenericArray{nil}.Value()
-+
-+ if err != nil {
-+ t.Fatalf("Expected no error for nil, got %v", err)
-+ }
-+ if result != nil {
-+ t.Errorf("Expected nil, got %q", result)
-+ }
-+
-+ for _, tt := range []interface{}{
-+ []bool(nil),
-+ [][]int(nil),
-+ []*int(nil),
-+ []sql.NullString(nil),
-+ } {
-+ result, err := GenericArray{tt}.Value()
-+
-+ if err != nil {
-+ t.Fatalf("Expected no error for %#v, got %v", tt, err)
-+ }
-+ if result != nil {
-+ t.Errorf("Expected nil for %#v, got %q", tt, result)
-+ }
-+ }
-+
-+ Tilde := func(v driver.Value) FuncArrayValuer {
-+ return FuncArrayValuer{
-+ func() string { return "~" },
-+ func() (driver.Value, error) { return v, nil }}
-+ }
-+
-+ for _, tt := range []struct {
-+ result string
-+ input interface{}
-+ }{
-+ {`{}`, []bool{}},
-+ {`{true}`, []bool{true}},
-+ {`{true,false}`, []bool{true, false}},
-+ {`{true,false}`, [2]bool{true, false}},
-+
-+ {`{}`, [][]int{{}}},
-+ {`{}`, [][]int{{}, {}}},
-+ {`{{1}}`, [][]int{{1}}},
-+ {`{{1},{2}}`, [][]int{{1}, {2}}},
-+ {`{{1,2},{3,4}}`, [][]int{{1, 2}, {3, 4}}},
-+ {`{{1,2},{3,4}}`, [2][2]int{{1, 2}, {3, 4}}},
-+
-+ {`{"a","\\b","c\"","d,e"}`, []string{`a`, `\b`, `c"`, `d,e`}},
-+ {`{"a","\\b","c\"","d,e"}`, [][]byte{{'a'}, {'\\', 'b'}, {'c', '"'}, {'d', ',', 'e'}}},
-+
-+ {`{NULL}`, []*int{nil}},
-+ {`{0,NULL}`, []*int{new(int), nil}},
-+
-+ {`{NULL}`, []sql.NullString{{}}},
-+ {`{"\"",NULL}`, []sql.NullString{{String: `"`, Valid: true}, {}}},
-+
-+ {`{"a","b"}`, []ByteArrayValuer{{'a'}, {'b'}}},
-+ {`{{"a","b"},{"c","d"}}`, [][]ByteArrayValuer{{{'a'}, {'b'}}, {{'c'}, {'d'}}}},
-+
-+ {`{"e","f"}`, []ByteSliceValuer{{'e'}, {'f'}}},
-+ {`{{"e","f"},{"g","h"}}`, [][]ByteSliceValuer{{{'e'}, {'f'}}, {{'g'}, {'h'}}}},
-+
-+ {`{1~2}`, []FuncArrayValuer{Tilde(int64(1)), Tilde(int64(2))}},
-+ {`{{1~2}~{3~4}}`, [][]FuncArrayValuer{{Tilde(int64(1)), Tilde(int64(2))}, {Tilde(int64(3)), Tilde(int64(4))}}},
-+ } {
-+ result, err := GenericArray{tt.input}.Value()
-+
-+ if err != nil {
-+ t.Fatalf("Expected no error for %q, got %v", tt.input, err)
-+ }
-+ if !reflect.DeepEqual(result, tt.result) {
-+ t.Errorf("Expected %q for %q, got %q", tt.result, tt.input, result)
-+ }
-+ }
-+}
-+
-+func TestGenericArrayValueErrors(t *testing.T) {
-+ v := []interface{}{func() {}}
-+ if _, err := (GenericArray{v}).Value(); err == nil {
-+ t.Errorf("Expected error for %q, got nil", v)
-+ }
-+
-+ v = []interface{}{nil, func() {}}
-+ if _, err := (GenericArray{v}).Value(); err == nil {
-+ t.Errorf("Expected error for %q, got nil", v)
-+ }
-+}
-+
-+func BenchmarkGenericArrayValueBools(b *testing.B) {
-+ rand.Seed(1)
-+ x := make([]bool, 10)
-+ for i := 0; i < len(x); i++ {
-+ x[i] = rand.Intn(2) == 0
-+ }
-+ a := GenericArray{x}
-+
-+ for i := 0; i < b.N; i++ {
-+ a.Value()
-+ }
-+}
-+
-+func BenchmarkGenericArrayValueFloat64s(b *testing.B) {
-+ rand.Seed(1)
-+ x := make([]float64, 10)
-+ for i := 0; i < len(x); i++ {
-+ x[i] = rand.NormFloat64()
-+ }
-+ a := GenericArray{x}
-+
-+ for i := 0; i < b.N; i++ {
-+ a.Value()
-+ }
-+}
-+
-+func BenchmarkGenericArrayValueInt64s(b *testing.B) {
-+ rand.Seed(1)
-+ x := make([]int64, 10)
-+ for i := 0; i < len(x); i++ {
-+ x[i] = rand.Int63()
-+ }
-+ a := GenericArray{x}
-+
-+ for i := 0; i < b.N; i++ {
-+ a.Value()
-+ }
-+}
-+
-+func BenchmarkGenericArrayValueByteSlices(b *testing.B) {
-+ x := make([][]byte, 10)
-+ for i := 0; i < len(x); i++ {
-+ x[i] = bytes.Repeat([]byte(`abc"def\ghi`), 5)
-+ }
-+ a := GenericArray{x}
-+
-+ for i := 0; i < b.N; i++ {
-+ a.Value()
-+ }
-+}
-+
-+func BenchmarkGenericArrayValueStrings(b *testing.B) {
-+ x := make([]string, 10)
-+ for i := 0; i < len(x); i++ {
-+ x[i] = strings.Repeat(`abc"def\ghi`, 5)
-+ }
-+ a := GenericArray{x}
-+
-+ for i := 0; i < b.N; i++ {
-+ a.Value()
-+ }
-+}
-+
-+func TestArrayScanBackend(t *testing.T) {
-+ db := openTestConn(t)
-+ defer db.Close()
-+
-+ for _, tt := range []struct {
-+ s string
-+ d sql.Scanner
-+ e interface{}
-+ }{
-+ {`ARRAY[true, false]`, new(BoolArray), &BoolArray{true, false}},
-+ {`ARRAY[E'\\xdead', E'\\xbeef']`, new(ByteaArray), &ByteaArray{{'\xDE', '\xAD'}, {'\xBE', '\xEF'}}},
-+ {`ARRAY[1.2, 3.4]`, new(Float64Array), &Float64Array{1.2, 3.4}},
-+ {`ARRAY[1, 2, 3]`, new(Int64Array), &Int64Array{1, 2, 3}},
-+ {`ARRAY['a', E'\\b', 'c"', 'd,e']`, new(StringArray), &StringArray{`a`, `\b`, `c"`, `d,e`}},
-+ } {
-+ err := db.QueryRow(`SELECT ` + tt.s).Scan(tt.d)
-+ if err != nil {
-+ t.Errorf("Expected no error when scanning %s into %T, got %v", tt.s, tt.d, err)
-+ }
-+ if !reflect.DeepEqual(tt.d, tt.e) {
-+ t.Errorf("Expected %v when scanning %s into %T, got %v", tt.e, tt.s, tt.d, tt.d)
-+ }
-+ }
-+}
-+
-+func TestArrayValueBackend(t *testing.T) {
-+ db := openTestConn(t)
-+ defer db.Close()
-+
-+ for _, tt := range []struct {
-+ s string
-+ v driver.Valuer
-+ }{
-+ {`ARRAY[true, false]`, BoolArray{true, false}},
-+ {`ARRAY[E'\\xdead', E'\\xbeef']`, ByteaArray{{'\xDE', '\xAD'}, {'\xBE', '\xEF'}}},
-+ {`ARRAY[1.2, 3.4]`, Float64Array{1.2, 3.4}},
-+ {`ARRAY[1, 2, 3]`, Int64Array{1, 2, 3}},
-+ {`ARRAY['a', E'\\b', 'c"', 'd,e']`, StringArray{`a`, `\b`, `c"`, `d,e`}},
-+ } {
-+ var x int
-+ err := db.QueryRow(`SELECT 1 WHERE `+tt.s+` <> $1`, tt.v).Scan(&x)
-+ if err != sql.ErrNoRows {
-+ t.Errorf("Expected %v to equal %s, got %v", tt.v, tt.s, err)
-+ }
-+ }
-+}
-diff --git a/vendor/github.com/lib/pq/bench_test.go b/vendor/github.com/lib/pq/bench_test.go
-new file mode 100644
-index 00000000000..b3754980ac1
---- /dev/null
-+++ b/vendor/github.com/lib/pq/bench_test.go
-@@ -0,0 +1,434 @@
-+package pq
-+
-+import (
-+ "bufio"
-+ "bytes"
-+ "context"
-+ "database/sql"
-+ "database/sql/driver"
-+ "io"
-+ "math/rand"
-+ "net"
-+ "runtime"
-+ "strconv"
-+ "strings"
-+ "sync"
-+ "testing"
-+ "time"
-+
-+ "github.com/lib/pq/oid"
-+)
-+
-+var (
-+ selectStringQuery = "SELECT '" + strings.Repeat("0123456789", 10) + "'"
-+ selectSeriesQuery = "SELECT generate_series(1, 100)"
-+)
-+
-+func BenchmarkSelectString(b *testing.B) {
-+ var result string
-+ benchQuery(b, selectStringQuery, &result)
-+}
-+
-+func BenchmarkSelectSeries(b *testing.B) {
-+ var result int
-+ benchQuery(b, selectSeriesQuery, &result)
-+}
-+
-+func benchQuery(b *testing.B, query string, result interface{}) {
-+ b.StopTimer()
-+ db := openTestConn(b)
-+ defer db.Close()
-+ b.StartTimer()
-+
-+ for i := 0; i < b.N; i++ {
-+ benchQueryLoop(b, db, query, result)
-+ }
-+}
-+
-+func benchQueryLoop(b *testing.B, db *sql.DB, query string, result interface{}) {
-+ rows, err := db.Query(query)
-+ if err != nil {
-+ b.Fatal(err)
-+ }
-+ defer rows.Close()
-+ for rows.Next() {
-+ err = rows.Scan(result)
-+ if err != nil {
-+ b.Fatal("failed to scan", err)
-+ }
-+ }
-+}
-+
-+// reading from circularConn yields content[:prefixLen] once, followed by
-+// content[prefixLen:] over and over again. It never returns EOF.
-+type circularConn struct {
-+ content string
-+ prefixLen int
-+ pos int
-+ net.Conn // for all other net.Conn methods that will never be called
-+}
-+
-+func (r *circularConn) Read(b []byte) (n int, err error) {
-+ n = copy(b, r.content[r.pos:])
-+ r.pos += n
-+ if r.pos >= len(r.content) {
-+ r.pos = r.prefixLen
-+ }
-+ return
-+}
-+
-+func (r *circularConn) Write(b []byte) (n int, err error) { return len(b), nil }
-+
-+func (r *circularConn) Close() error { return nil }
-+
-+func fakeConn(content string, prefixLen int) *conn {
-+ c := &circularConn{content: content, prefixLen: prefixLen}
-+ return &conn{buf: bufio.NewReader(c), c: c}
-+}
-+
-+// This benchmark is meant to be the same as BenchmarkSelectString, but takes
-+// out some of the factors this package can't control. The numbers are less noisy,
-+// but also the costs of network communication aren't accurately represented.
-+func BenchmarkMockSelectString(b *testing.B) {
-+ b.StopTimer()
-+ // taken from a recorded run of BenchmarkSelectString
-+ // See: http://www.postgresql.org/docs/current/static/protocol-message-formats.html
-+ const response = "1\x00\x00\x00\x04" +
-+ "t\x00\x00\x00\x06\x00\x00" +
-+ "T\x00\x00\x00!\x00\x01?column?\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\xc1\xff\xfe\xff\xff\xff\xff\x00\x00" +
-+ "Z\x00\x00\x00\x05I" +
-+ "2\x00\x00\x00\x04" +
-+ "D\x00\x00\x00n\x00\x01\x00\x00\x00d0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789" +
-+ "C\x00\x00\x00\rSELECT 1\x00" +
-+ "Z\x00\x00\x00\x05I" +
-+ "3\x00\x00\x00\x04" +
-+ "Z\x00\x00\x00\x05I"
-+ c := fakeConn(response, 0)
-+ b.StartTimer()
-+
-+ for i := 0; i < b.N; i++ {
-+ benchMockQuery(b, c, selectStringQuery)
-+ }
-+}
-+
-+var seriesRowData = func() string {
-+ var buf bytes.Buffer
-+ for i := 1; i <= 100; i++ {
-+ digits := byte(2)
-+ if i >= 100 {
-+ digits = 3
-+ } else if i < 10 {
-+ digits = 1
-+ }
-+ buf.WriteString("D\x00\x00\x00")
-+ buf.WriteByte(10 + digits)
-+ buf.WriteString("\x00\x01\x00\x00\x00")
-+ buf.WriteByte(digits)
-+ buf.WriteString(strconv.Itoa(i))
-+ }
-+ return buf.String()
-+}()
-+
-+func BenchmarkMockSelectSeries(b *testing.B) {
-+ b.StopTimer()
-+ var response = "1\x00\x00\x00\x04" +
-+ "t\x00\x00\x00\x06\x00\x00" +
-+ "T\x00\x00\x00!\x00\x01?column?\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\xc1\xff\xfe\xff\xff\xff\xff\x00\x00" +
-+ "Z\x00\x00\x00\x05I" +
-+ "2\x00\x00\x00\x04" +
-+ seriesRowData +
-+ "C\x00\x00\x00\x0fSELECT 100\x00" +
-+ "Z\x00\x00\x00\x05I" +
-+ "3\x00\x00\x00\x04" +
-+ "Z\x00\x00\x00\x05I"
-+ c := fakeConn(response, 0)
-+ b.StartTimer()
-+
-+ for i := 0; i < b.N; i++ {
-+ benchMockQuery(b, c, selectSeriesQuery)
-+ }
-+}
-+
-+func benchMockQuery(b *testing.B, c *conn, query string) {
-+ stmt, err := c.Prepare(query)
-+ if err != nil {
-+ b.Fatal(err)
-+ }
-+ defer stmt.Close()
-+ rows, err := stmt.(driver.StmtQueryContext).QueryContext(context.Background(), nil)
-+ if err != nil {
-+ b.Fatal(err)
-+ }
-+ defer rows.Close()
-+ var dest [1]driver.Value
-+ for {
-+ if err := rows.Next(dest[:]); err != nil {
-+ if err == io.EOF {
-+ break
-+ }
-+ b.Fatal(err)
-+ }
-+ }
-+}
-+
-+func BenchmarkPreparedSelectString(b *testing.B) {
-+ var result string
-+ benchPreparedQuery(b, selectStringQuery, &result)
-+}
-+
-+func BenchmarkPreparedSelectSeries(b *testing.B) {
-+ var result int
-+ benchPreparedQuery(b, selectSeriesQuery, &result)
-+}
-+
-+func benchPreparedQuery(b *testing.B, query string, result interface{}) {
-+ b.StopTimer()
-+ db := openTestConn(b)
-+ defer db.Close()
-+ stmt, err := db.Prepare(query)
-+ if err != nil {
-+ b.Fatal(err)
-+ }
-+ defer stmt.Close()
-+ b.StartTimer()
-+
-+ for i := 0; i < b.N; i++ {
-+ benchPreparedQueryLoop(b, db, stmt, result)
-+ }
-+}
-+
-+func benchPreparedQueryLoop(b *testing.B, db *sql.DB, stmt *sql.Stmt, result interface{}) {
-+ rows, err := stmt.Query()
-+ if err != nil {
-+ b.Fatal(err)
-+ }
-+ if !rows.Next() {
-+ rows.Close()
-+ b.Fatal("no rows")
-+ }
-+ defer rows.Close()
-+ for rows.Next() {
-+ err = rows.Scan(&result)
-+ if err != nil {
-+ b.Fatal("failed to scan")
-+ }
-+ }
-+}
-+
-+// See the comment for BenchmarkMockSelectString.
-+func BenchmarkMockPreparedSelectString(b *testing.B) {
-+ b.StopTimer()
-+ const parseResponse = "1\x00\x00\x00\x04" +
-+ "t\x00\x00\x00\x06\x00\x00" +
-+ "T\x00\x00\x00!\x00\x01?column?\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\xc1\xff\xfe\xff\xff\xff\xff\x00\x00" +
-+ "Z\x00\x00\x00\x05I"
-+ const responses = parseResponse +
-+ "2\x00\x00\x00\x04" +
-+ "D\x00\x00\x00n\x00\x01\x00\x00\x00d0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789" +
-+ "C\x00\x00\x00\rSELECT 1\x00" +
-+ "Z\x00\x00\x00\x05I"
-+ c := fakeConn(responses, len(parseResponse))
-+
-+ stmt, err := c.Prepare(selectStringQuery)
-+ if err != nil {
-+ b.Fatal(err)
-+ }
-+ b.StartTimer()
-+
-+ for i := 0; i < b.N; i++ {
-+ benchPreparedMockQuery(b, c, stmt)
-+ }
-+}
-+
-+func BenchmarkMockPreparedSelectSeries(b *testing.B) {
-+ b.StopTimer()
-+ const parseResponse = "1\x00\x00\x00\x04" +
-+ "t\x00\x00\x00\x06\x00\x00" +
-+ "T\x00\x00\x00!\x00\x01?column?\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\xc1\xff\xfe\xff\xff\xff\xff\x00\x00" +
-+ "Z\x00\x00\x00\x05I"
-+ var responses = parseResponse +
-+ "2\x00\x00\x00\x04" +
-+ seriesRowData +
-+ "C\x00\x00\x00\x0fSELECT 100\x00" +
-+ "Z\x00\x00\x00\x05I"
-+ c := fakeConn(responses, len(parseResponse))
-+
-+ stmt, err := c.Prepare(selectSeriesQuery)
-+ if err != nil {
-+ b.Fatal(err)
-+ }
-+ b.StartTimer()
-+
-+ for i := 0; i < b.N; i++ {
-+ benchPreparedMockQuery(b, c, stmt)
-+ }
-+}
-+
-+func benchPreparedMockQuery(b *testing.B, c *conn, stmt driver.Stmt) {
-+ rows, err := stmt.(driver.StmtQueryContext).QueryContext(context.Background(), nil)
-+ if err != nil {
-+ b.Fatal(err)
-+ }
-+ defer rows.Close()
-+ var dest [1]driver.Value
-+ for {
-+ if err := rows.Next(dest[:]); err != nil {
-+ if err == io.EOF {
-+ break
-+ }
-+ b.Fatal(err)
-+ }
-+ }
-+}
-+
-+func BenchmarkEncodeInt64(b *testing.B) {
-+ for i := 0; i < b.N; i++ {
-+ encode(¶meterStatus{}, int64(1234), oid.T_int8)
-+ }
-+}
-+
-+func BenchmarkEncodeFloat64(b *testing.B) {
-+ for i := 0; i < b.N; i++ {
-+ encode(¶meterStatus{}, 3.14159, oid.T_float8)
-+ }
-+}
-+
-+var testByteString = []byte("abcdefghijklmnopqrstuvwxyz")
-+
-+func BenchmarkEncodeByteaHex(b *testing.B) {
-+ for i := 0; i < b.N; i++ {
-+ encode(¶meterStatus{serverVersion: 90000}, testByteString, oid.T_bytea)
-+ }
-+}
-+func BenchmarkEncodeByteaEscape(b *testing.B) {
-+ for i := 0; i < b.N; i++ {
-+ encode(¶meterStatus{serverVersion: 84000}, testByteString, oid.T_bytea)
-+ }
-+}
-+
-+func BenchmarkEncodeBool(b *testing.B) {
-+ for i := 0; i < b.N; i++ {
-+ encode(¶meterStatus{}, true, oid.T_bool)
-+ }
-+}
-+
-+var testTimestamptz = time.Date(2001, time.January, 1, 0, 0, 0, 0, time.Local)
-+
-+func BenchmarkEncodeTimestamptz(b *testing.B) {
-+ for i := 0; i < b.N; i++ {
-+ encode(¶meterStatus{}, testTimestamptz, oid.T_timestamptz)
-+ }
-+}
-+
-+var testIntBytes = []byte("1234")
-+
-+func BenchmarkDecodeInt64(b *testing.B) {
-+ for i := 0; i < b.N; i++ {
-+ decode(¶meterStatus{}, testIntBytes, oid.T_int8, formatText)
-+ }
-+}
-+
-+var testFloatBytes = []byte("3.14159")
-+
-+func BenchmarkDecodeFloat64(b *testing.B) {
-+ for i := 0; i < b.N; i++ {
-+ decode(¶meterStatus{}, testFloatBytes, oid.T_float8, formatText)
-+ }
-+}
-+
-+var testBoolBytes = []byte{'t'}
-+
-+func BenchmarkDecodeBool(b *testing.B) {
-+ for i := 0; i < b.N; i++ {
-+ decode(¶meterStatus{}, testBoolBytes, oid.T_bool, formatText)
-+ }
-+}
-+
-+func TestDecodeBool(t *testing.T) {
-+ db := openTestConn(t)
-+ rows, err := db.Query("select true")
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+ rows.Close()
-+}
-+
-+var testTimestamptzBytes = []byte("2013-09-17 22:15:32.360754-07")
-+
-+func BenchmarkDecodeTimestamptz(b *testing.B) {
-+ for i := 0; i < b.N; i++ {
-+ decode(¶meterStatus{}, testTimestamptzBytes, oid.T_timestamptz, formatText)
-+ }
-+}
-+
-+func BenchmarkDecodeTimestamptzMultiThread(b *testing.B) {
-+ oldProcs := runtime.GOMAXPROCS(0)
-+ defer runtime.GOMAXPROCS(oldProcs)
-+ runtime.GOMAXPROCS(runtime.NumCPU())
-+ globalLocationCache = newLocationCache()
-+
-+ f := func(wg *sync.WaitGroup, loops int) {
-+ defer wg.Done()
-+ for i := 0; i < loops; i++ {
-+ decode(¶meterStatus{}, testTimestamptzBytes, oid.T_timestamptz, formatText)
-+ }
-+ }
-+
-+ wg := &sync.WaitGroup{}
-+ b.ResetTimer()
-+ for j := 0; j < 10; j++ {
-+ wg.Add(1)
-+ go f(wg, b.N/10)
-+ }
-+ wg.Wait()
-+}
-+
-+func BenchmarkLocationCache(b *testing.B) {
-+ globalLocationCache = newLocationCache()
-+ for i := 0; i < b.N; i++ {
-+ globalLocationCache.getLocation(rand.Intn(10000))
-+ }
-+}
-+
-+func BenchmarkLocationCacheMultiThread(b *testing.B) {
-+ oldProcs := runtime.GOMAXPROCS(0)
-+ defer runtime.GOMAXPROCS(oldProcs)
-+ runtime.GOMAXPROCS(runtime.NumCPU())
-+ globalLocationCache = newLocationCache()
-+
-+ f := func(wg *sync.WaitGroup, loops int) {
-+ defer wg.Done()
-+ for i := 0; i < loops; i++ {
-+ globalLocationCache.getLocation(rand.Intn(10000))
-+ }
-+ }
-+
-+ wg := &sync.WaitGroup{}
-+ b.ResetTimer()
-+ for j := 0; j < 10; j++ {
-+ wg.Add(1)
-+ go f(wg, b.N/10)
-+ }
-+ wg.Wait()
-+}
-+
-+// Stress test the performance of parsing results from the wire.
-+func BenchmarkResultParsing(b *testing.B) {
-+ b.StopTimer()
-+
-+ db := openTestConn(b)
-+ defer db.Close()
-+ _, err := db.Exec("BEGIN")
-+ if err != nil {
-+ b.Fatal(err)
-+ }
-+
-+ b.StartTimer()
-+ for i := 0; i < b.N; i++ {
-+ res, err := db.Query("SELECT generate_series(1, 50000)")
-+ if err != nil {
-+ b.Fatal(err)
-+ }
-+ res.Close()
-+ }
-+}
-diff --git a/vendor/github.com/lib/pq/buf.go b/vendor/github.com/lib/pq/buf.go
-new file mode 100644
-index 00000000000..4b0a0a8f7e9
---- /dev/null
-+++ b/vendor/github.com/lib/pq/buf.go
-@@ -0,0 +1,91 @@
-+package pq
-+
-+import (
-+ "bytes"
-+ "encoding/binary"
-+
-+ "github.com/lib/pq/oid"
-+)
-+
-+type readBuf []byte
-+
-+func (b *readBuf) int32() (n int) {
-+ n = int(int32(binary.BigEndian.Uint32(*b)))
-+ *b = (*b)[4:]
-+ return
-+}
-+
-+func (b *readBuf) oid() (n oid.Oid) {
-+ n = oid.Oid(binary.BigEndian.Uint32(*b))
-+ *b = (*b)[4:]
-+ return
-+}
-+
-+// N.B: this is actually an unsigned 16-bit integer, unlike int32
-+func (b *readBuf) int16() (n int) {
-+ n = int(binary.BigEndian.Uint16(*b))
-+ *b = (*b)[2:]
-+ return
-+}
-+
-+func (b *readBuf) string() string {
-+ i := bytes.IndexByte(*b, 0)
-+ if i < 0 {
-+ errorf("invalid message format; expected string terminator")
-+ }
-+ s := (*b)[:i]
-+ *b = (*b)[i+1:]
-+ return string(s)
-+}
-+
-+func (b *readBuf) next(n int) (v []byte) {
-+ v = (*b)[:n]
-+ *b = (*b)[n:]
-+ return
-+}
-+
-+func (b *readBuf) byte() byte {
-+ return b.next(1)[0]
-+}
-+
-+type writeBuf struct {
-+ buf []byte
-+ pos int
-+}
-+
-+func (b *writeBuf) int32(n int) {
-+ x := make([]byte, 4)
-+ binary.BigEndian.PutUint32(x, uint32(n))
-+ b.buf = append(b.buf, x...)
-+}
-+
-+func (b *writeBuf) int16(n int) {
-+ x := make([]byte, 2)
-+ binary.BigEndian.PutUint16(x, uint16(n))
-+ b.buf = append(b.buf, x...)
-+}
-+
-+func (b *writeBuf) string(s string) {
-+ b.buf = append(append(b.buf, s...), '\000')
-+}
-+
-+func (b *writeBuf) byte(c byte) {
-+ b.buf = append(b.buf, c)
-+}
-+
-+func (b *writeBuf) bytes(v []byte) {
-+ b.buf = append(b.buf, v...)
-+}
-+
-+func (b *writeBuf) wrap() []byte {
-+ p := b.buf[b.pos:]
-+ binary.BigEndian.PutUint32(p, uint32(len(p)))
-+ return b.buf
-+}
-+
-+func (b *writeBuf) next(c byte) {
-+ p := b.buf[b.pos:]
-+ binary.BigEndian.PutUint32(p, uint32(len(p)))
-+ b.pos = len(b.buf) + 1
-+ b.buf = append(b.buf, c, 0, 0, 0, 0)
-+}
-diff --git a/vendor/github.com/lib/pq/buf_test.go b/vendor/github.com/lib/pq/buf_test.go
-new file mode 100644
-index 00000000000..df88d38b427
---- /dev/null
-+++ b/vendor/github.com/lib/pq/buf_test.go
-@@ -0,0 +1,16 @@
-+package pq
-+
-+import "testing"
-+
-+func Benchmark_writeBuf_string(b *testing.B) {
-+ var buf writeBuf
-+ const s = "foo"
-+
-+ b.ReportAllocs()
-+ b.ResetTimer()
-+
-+ for i := 0; i < b.N; i++ {
-+ buf.string(s)
-+ buf.buf = buf.buf[:0]
-+ }
-+}
-diff --git a/vendor/github.com/lib/pq/certs/README b/vendor/github.com/lib/pq/certs/README
-new file mode 100644
-index 00000000000..24ab7b25698
---- /dev/null
-+++ b/vendor/github.com/lib/pq/certs/README
-@@ -0,0 +1,3 @@
-+This directory contains certificates and private keys for testing some
-+SSL-related functionality in Travis. Do NOT use these certificates for
-+anything other than testing.
-diff --git a/vendor/github.com/lib/pq/certs/bogus_root.crt b/vendor/github.com/lib/pq/certs/bogus_root.crt
-new file mode 100644
-index 00000000000..1239db3a482
---- /dev/null
-+++ b/vendor/github.com/lib/pq/certs/bogus_root.crt
-@@ -0,0 +1,19 @@
-+-----BEGIN CERTIFICATE-----
-+MIIDBjCCAe6gAwIBAgIQSnDYp/Naet9HOZljF5PuwDANBgkqhkiG9w0BAQsFADAr
-+MRIwEAYDVQQKEwlDb2Nrcm9hY2gxFTATBgNVBAMTDENvY2tyb2FjaCBDQTAeFw0x
-+NjAyMDcxNjQ0MzdaFw0xNzAyMDYxNjQ0MzdaMCsxEjAQBgNVBAoTCUNvY2tyb2Fj
-+aDEVMBMGA1UEAxMMQ29ja3JvYWNoIENBMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8A
-+MIIBCgKCAQEAxdln3/UdgP7ayA/G1kT7upjLe4ERwQjYQ25q0e1+vgsB5jhiirxJ
-+e0+WkhhYu/mwoSAXzvlsbZ2PWFyfdanZeD/Lh6SvIeWXVVaPcWVWL1TEcoN2jr5+
-+E85MMHmbbmaT2he8s6br2tM/UZxyTQ2XRprIzApbDssyw1c0Yufcpu3C6267FLEl
-+IfcWrzDhnluFhthhtGXv3ToD8IuMScMC5qlKBXtKmD1B5x14ngO/ecNJ+OlEi0HU
-+mavK4KWgI2rDXRZ2EnCpyTZdkc3kkRnzKcg653oOjMDRZdrhfIrha+Jq38ACsUmZ
-+Su7Sp5jkIHOCO8Zg+l6GKVSq37dKMapD8wIDAQABoyYwJDAOBgNVHQ8BAf8EBAMC
-+AuQwEgYDVR0TAQH/BAgwBgEB/wIBATANBgkqhkiG9w0BAQsFAAOCAQEAwZ2Tu0Yu
-+rrSVdMdoPEjT1IZd+5OhM/SLzL0ddtvTithRweLHsw2lDQYlXFqr24i3UGZJQ1sp
-+cqSrNwswgLUQT3vWyTjmM51HEb2vMYWKmjZ+sBQYAUP1CadrN/+OTfNGnlF1+B4w
-+IXOzh7EvQmJJnNybLe4a/aRvj1NE2n8Z898B76SVU9WbfKKz8VwLzuIPDqkKcZda
-+lMy5yzthyztV9YjcWs2zVOUGZvGdAhDrvZuUq6mSmxrBEvR2LBOggmVf3tGRT+Ls
-+lW7c9Lrva5zLHuqmoPP07A+vuI9a0D1X44jwGDuPWJ5RnTOQ63Uez12mKNjqleHw
-+DnkwNanuO8dhAA==
-+-----END CERTIFICATE-----
-diff --git a/vendor/github.com/lib/pq/certs/postgresql.crt b/vendor/github.com/lib/pq/certs/postgresql.crt
-new file mode 100644
-index 00000000000..54ec487e990
---- /dev/null
-+++ b/vendor/github.com/lib/pq/certs/postgresql.crt
-@@ -0,0 +1,69 @@
-+Certificate:
-+ Data:
-+ Version: 3 (0x2)
-+ Serial Number: 2 (0x2)
-+ Signature Algorithm: sha256WithRSAEncryption
-+ Issuer: C=US, ST=Nevada, L=Las Vegas, O=github.com/lib/pq, CN=pq CA
-+ Validity
-+ Not Before: Oct 11 15:10:11 2014 GMT
-+ Not After : Oct 8 15:10:11 2024 GMT
-+ Subject: C=US, ST=Nevada, L=Las Vegas, O=github.com/lib/pq, CN=pqgosslcert
-+ Subject Public Key Info:
-+ Public Key Algorithm: rsaEncryption
-+ RSA Public Key: (1024 bit)
-+ Modulus (1024 bit):
-+ 00:e3:8c:06:9a:70:54:51:d1:34:34:83:39:cd:a2:
-+ 59:0f:05:ed:8d:d8:0e:34:d0:92:f4:09:4d:ee:8c:
-+ 78:55:49:24:f8:3c:e0:34:58:02:b2:e7:94:58:c1:
-+ e8:e5:bb:d1:af:f6:54:c1:40:b1:90:70:79:0d:35:
-+ 54:9c:8f:16:e9:c2:f0:92:e6:64:49:38:c1:76:f8:
-+ 47:66:c4:5b:4a:b6:a9:43:ce:c8:be:6c:4d:2b:94:
-+ 97:3c:55:bc:d1:d0:6e:b7:53:ae:89:5c:4b:6b:86:
-+ 40:be:c1:ae:1e:64:ce:9c:ae:87:0a:69:e5:c8:21:
-+ 12:be:ae:1d:f6:45:df:16:a7
-+ Exponent: 65537 (0x10001)
-+ X509v3 extensions:
-+ X509v3 Subject Key Identifier:
-+ 9B:25:31:63:A2:D8:06:FF:CB:E3:E9:96:FF:0D:BA:DC:12:7D:04:CF
-+ X509v3 Authority Key Identifier:
-+ keyid:52:93:ED:1E:76:0A:9F:65:4F:DE:19:66:C1:D5:22:40:35:CB:A0:72
-+
-+ X509v3 Basic Constraints:
-+ CA:FALSE
-+ X509v3 Key Usage:
-+ Digital Signature, Non Repudiation, Key Encipherment
-+ Signature Algorithm: sha256WithRSAEncryption
-+ 3e:f5:f8:0b:4e:11:bd:00:86:1f:ce:dc:97:02:98:91:11:f5:
-+ 65:f6:f2:8a:b2:3e:47:92:05:69:28:c9:e9:b4:f7:cf:93:d1:
-+ 2d:81:5d:00:3c:23:be:da:70:ea:59:e1:2c:d3:25:49:ae:a6:
-+ 95:54:c1:10:df:23:e3:fe:d6:e4:76:c7:6b:73:ad:1b:34:7c:
-+ e2:56:cc:c0:37:ae:c5:7a:11:20:6c:3d:05:0e:99:cd:22:6c:
-+ cf:59:a1:da:28:d4:65:ba:7d:2f:2b:3d:69:6d:a6:c1:ae:57:
-+ bf:56:64:13:79:f8:48:46:65:eb:81:67:28:0b:7b:de:47:10:
-+ b3:80:3c:31:d1:58:94:01:51:4a:c7:c8:1a:01:a8:af:c4:cd:
-+ bb:84:a5:d9:8b:b4:b9:a1:64:3e:95:d9:90:1d:d5:3f:67:cc:
-+ 3b:ba:f5:b4:d1:33:77:ee:c2:d2:3e:7e:c5:66:6e:b7:35:4c:
-+ 60:57:b0:b8:be:36:c8:f3:d3:95:8c:28:4a:c9:f7:27:a4:0d:
-+ e5:96:99:eb:f5:c8:bd:f3:84:6d:ef:02:f9:8a:36:7d:6b:5f:
-+ 36:68:37:41:d9:74:ae:c6:78:2e:44:86:a1:ad:43:ca:fb:b5:
-+ 3e:ba:10:23:09:02:ac:62:d1:d0:83:c8:95:b9:e3:5e:30:ff:
-+ 5b:2b:38:fa
-+-----BEGIN CERTIFICATE-----
-+MIIDEzCCAfugAwIBAgIBAjANBgkqhkiG9w0BAQsFADBeMQswCQYDVQQGEwJVUzEP
-+MA0GA1UECBMGTmV2YWRhMRIwEAYDVQQHEwlMYXMgVmVnYXMxGjAYBgNVBAoTEWdp
-+dGh1Yi5jb20vbGliL3BxMQ4wDAYDVQQDEwVwcSBDQTAeFw0xNDEwMTExNTEwMTFa
-+Fw0yNDEwMDgxNTEwMTFaMGQxCzAJBgNVBAYTAlVTMQ8wDQYDVQQIEwZOZXZhZGEx
-+EjAQBgNVBAcTCUxhcyBWZWdhczEaMBgGA1UEChMRZ2l0aHViLmNvbS9saWIvcHEx
-+FDASBgNVBAMTC3BxZ29zc2xjZXJ0MIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKB
-+gQDjjAaacFRR0TQ0gznNolkPBe2N2A400JL0CU3ujHhVSST4POA0WAKy55RYwejl
-+u9Gv9lTBQLGQcHkNNVScjxbpwvCS5mRJOMF2+EdmxFtKtqlDzsi+bE0rlJc8VbzR
-+0G63U66JXEtrhkC+wa4eZM6crocKaeXIIRK+rh32Rd8WpwIDAQABo1owWDAdBgNV
-+HQ4EFgQUmyUxY6LYBv/L4+mW/w263BJ9BM8wHwYDVR0jBBgwFoAUUpPtHnYKn2VP
-+3hlmwdUiQDXLoHIwCQYDVR0TBAIwADALBgNVHQ8EBAMCBeAwDQYJKoZIhvcNAQEL
-+BQADggEBAD71+AtOEb0Ahh/O3JcCmJER9WX28oqyPkeSBWkoyem098+T0S2BXQA8
-+I77acOpZ4SzTJUmuppVUwRDfI+P+1uR2x2tzrRs0fOJWzMA3rsV6ESBsPQUOmc0i
-+bM9Zodoo1GW6fS8rPWltpsGuV79WZBN5+EhGZeuBZygLe95HELOAPDHRWJQBUUrH
-+yBoBqK/EzbuEpdmLtLmhZD6V2ZAd1T9nzDu69bTRM3fuwtI+fsVmbrc1TGBXsLi+
-+Nsjz05WMKErJ9yekDeWWmev1yL3zhG3vAvmKNn1rXzZoN0HZdK7GeC5EhqGtQ8r7
-+tT66ECMJAqxi0dCDyJW5414w/1srOPo=
-+-----END CERTIFICATE-----
-diff --git a/vendor/github.com/lib/pq/certs/postgresql.key b/vendor/github.com/lib/pq/certs/postgresql.key
-new file mode 100644
-index 00000000000..eb8b20be96d
---- /dev/null
-+++ b/vendor/github.com/lib/pq/certs/postgresql.key
-@@ -0,0 +1,15 @@
-+-----BEGIN RSA PRIVATE KEY-----
-+MIICWwIBAAKBgQDjjAaacFRR0TQ0gznNolkPBe2N2A400JL0CU3ujHhVSST4POA0
-+WAKy55RYwejlu9Gv9lTBQLGQcHkNNVScjxbpwvCS5mRJOMF2+EdmxFtKtqlDzsi+
-+bE0rlJc8VbzR0G63U66JXEtrhkC+wa4eZM6crocKaeXIIRK+rh32Rd8WpwIDAQAB
-+AoGAM5dM6/kp9P700i8qjOgRPym96Zoh5nGfz/rIE5z/r36NBkdvIg8OVZfR96nH
-+b0b9TOMR5lsPp0sI9yivTWvX6qyvLJRWy2vvx17hXK9NxXUNTAm0PYZUTvCtcPeX
-+RnJpzQKNZQPkFzF0uXBc4CtPK2Vz0+FGvAelrhYAxnw1dIkCQQD+9qaW5QhXjsjb
-+Nl85CmXgxPmGROcgLQCO+omfrjf9UXrituU9Dz6auym5lDGEdMFnkzfr+wpasEy9
-+mf5ZZOhDAkEA5HjXfVGaCtpydOt6hDon/uZsyssCK2lQ7NSuE3vP+sUsYMzIpEoy
-+t3VWXqKbo+g9KNDTP4WEliqp1aiSIylzzQJANPeqzihQnlgEdD4MdD4rwhFJwVIp
-+Le8Lcais1KaN7StzOwxB/XhgSibd2TbnPpw+3bSg5n5lvUdo+e62/31OHwJAU1jS
-+I+F09KikQIr28u3UUWT2IzTT4cpVv1AHAQyV3sG3YsjSGT0IK20eyP9BEBZU2WL0
-+7aNjrvR5aHxKc5FXsQJABsFtyGpgI5X4xufkJZVZ+Mklz2n7iXa+XPatMAHFxAtb
-+EEMt60rngwMjXAzBSC6OYuYogRRAY3UCacNC5VhLYQ==
-+-----END RSA PRIVATE KEY-----
-diff --git a/vendor/github.com/lib/pq/certs/root.crt b/vendor/github.com/lib/pq/certs/root.crt
-new file mode 100644
-index 00000000000..aecf8f6213b
---- /dev/null
-+++ b/vendor/github.com/lib/pq/certs/root.crt
-@@ -0,0 +1,24 @@
-+-----BEGIN CERTIFICATE-----
-+MIIEAzCCAuugAwIBAgIJANmheROCdW1NMA0GCSqGSIb3DQEBBQUAMF4xCzAJBgNV
-+BAYTAlVTMQ8wDQYDVQQIEwZOZXZhZGExEjAQBgNVBAcTCUxhcyBWZWdhczEaMBgG
-+A1UEChMRZ2l0aHViLmNvbS9saWIvcHExDjAMBgNVBAMTBXBxIENBMB4XDTE0MTAx
-+MTE1MDQyOVoXDTI0MTAwODE1MDQyOVowXjELMAkGA1UEBhMCVVMxDzANBgNVBAgT
-+Bk5ldmFkYTESMBAGA1UEBxMJTGFzIFZlZ2FzMRowGAYDVQQKExFnaXRodWIuY29t
-+L2xpYi9wcTEOMAwGA1UEAxMFcHEgQ0EwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAw
-+ggEKAoIBAQCV4PxP7ShzWBzUCThcKk3qZtOLtHmszQVtbqhvgTpm1kTRtKBdVMu0
-+pLAHQ3JgJCnAYgH0iZxVGoMP16T3irdgsdC48+nNTFM2T0cCdkfDURGIhSFN47cb
-+Pgy306BcDUD2q7ucW33+dlFSRuGVewocoh4BWM/vMtMvvWzdi4Ag/L/jhb+5wZxZ
-+sWymsadOVSDePEMKOvlCa3EdVwVFV40TVyDb+iWBUivDAYsS2a3KajuJrO6MbZiE
-+Sp2RCIkZS2zFmzWxVRi9ZhzIZhh7EVF9JAaNC3T52jhGUdlRq3YpBTMnd89iOh74
-+6jWXG7wSuPj3haFzyNhmJ0ZUh+2Ynoh1AgMBAAGjgcMwgcAwHQYDVR0OBBYEFFKT
-+7R52Cp9lT94ZZsHVIkA1y6ByMIGQBgNVHSMEgYgwgYWAFFKT7R52Cp9lT94ZZsHV
-+IkA1y6ByoWKkYDBeMQswCQYDVQQGEwJVUzEPMA0GA1UECBMGTmV2YWRhMRIwEAYD
-+VQQHEwlMYXMgVmVnYXMxGjAYBgNVBAoTEWdpdGh1Yi5jb20vbGliL3BxMQ4wDAYD
-+VQQDEwVwcSBDQYIJANmheROCdW1NMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEF
-+BQADggEBAAEhCLWkqJNMI8b4gkbmj5fqQ/4+oO83bZ3w2Oqf6eZ8I8BC4f2NOyE6
-+tRUlq5+aU7eqC1cOAvGjO+YHN/bF/DFpwLlzvUSXt+JP/pYcUjL7v+pIvwqec9hD
-+ndvM4iIbkD/H/OYQ3L+N3W+G1x7AcFIX+bGCb3PzYVQAjxreV6//wgKBosMGFbZo
-+HPxT9RPMun61SViF04H5TNs0derVn1+5eiiYENeAhJzQNyZoOOUuX1X/Inx9bEPh
-+C5vFBtSMgIytPgieRJVWAiMLYsfpIAStrHztRAbBs2DU01LmMgRvHdxgFEKinC/d
-+UHZZQDP+6pT+zADrGhQGXe4eThaO6f0=
-+-----END CERTIFICATE-----
-diff --git a/vendor/github.com/lib/pq/certs/server.crt b/vendor/github.com/lib/pq/certs/server.crt
-new file mode 100644
-index 00000000000..f90934c1cd4
---- /dev/null
-+++ b/vendor/github.com/lib/pq/certs/server.crt
-@@ -0,0 +1,81 @@
-+Certificate:
-+ Data:
-+ Version: 3 (0x2)
-+ Serial Number: 1 (0x1)
-+ Signature Algorithm: sha256WithRSAEncryption
-+ Issuer: C=US, ST=Nevada, L=Las Vegas, O=github.com/lib/pq, CN=pq CA
-+ Validity
-+ Not Before: Oct 11 15:05:15 2014 GMT
-+ Not After : Oct 8 15:05:15 2024 GMT
-+ Subject: C=US, ST=Nevada, L=Las Vegas, O=github.com/lib/pq, CN=postgres
-+ Subject Public Key Info:
-+ Public Key Algorithm: rsaEncryption
-+ RSA Public Key: (2048 bit)
-+ Modulus (2048 bit):
-+ 00:d7:8a:4c:85:fb:17:a5:3c:8f:e0:72:11:29:ce:
-+ 3f:b0:1f:3f:7d:c6:ee:7f:a7:fc:02:2b:35:47:08:
-+ a6:3d:90:df:5c:56:14:94:00:c7:6d:d1:d2:e2:61:
-+ 95:77:b8:e3:a6:66:31:f9:1f:21:7d:62:e1:27:da:
-+ 94:37:61:4a:ea:63:53:a0:61:b8:9c:bb:a5:e2:e7:
-+ b7:a6:d8:0f:05:04:c7:29:e2:ea:49:2b:7f:de:15:
-+ 00:a6:18:70:50:c7:0c:de:9a:f9:5a:96:b0:e1:94:
-+ 06:c6:6d:4a:21:3b:b4:0f:a5:6d:92:86:34:b2:4e:
-+ d7:0e:a7:19:c0:77:0b:7b:87:c8:92:de:42:ff:86:
-+ d2:b7:9a:a4:d4:15:23:ca:ad:a5:69:21:b8:ce:7e:
-+ 66:cb:85:5d:b9:ed:8b:2d:09:8d:94:e4:04:1e:72:
-+ ec:ef:d0:76:90:15:5a:a4:f7:91:4b:e9:ce:4e:9d:
-+ 5d:9a:70:17:9c:d8:e9:73:83:ea:3d:61:99:a6:cd:
-+ ac:91:40:5a:88:77:e5:4e:2a:8e:3d:13:f3:f9:38:
-+ 6f:81:6b:8a:95:ca:0e:07:ab:6f:da:b4:8c:d9:ff:
-+ aa:78:03:aa:c7:c2:cf:6f:64:92:d3:d8:83:d5:af:
-+ f1:23:18:a7:2e:7b:17:0b:e7:7d:f1:fa:a8:41:a3:
-+ 04:57
-+ Exponent: 65537 (0x10001)
-+ X509v3 extensions:
-+ X509v3 Subject Key Identifier:
-+ EE:F0:B3:46:DC:C7:09:EB:0E:B6:2F:E5:FE:62:60:45:44:9F:59:CC
-+ X509v3 Authority Key Identifier:
-+ keyid:52:93:ED:1E:76:0A:9F:65:4F:DE:19:66:C1:D5:22:40:35:CB:A0:72
-+
-+ X509v3 Basic Constraints:
-+ CA:FALSE
-+ X509v3 Key Usage:
-+ Digital Signature, Non Repudiation, Key Encipherment
-+ Signature Algorithm: sha256WithRSAEncryption
-+ 7e:5a:6e:be:bf:d2:6c:c1:d6:fa:b6:fb:3f:06:53:36:08:87:
-+ 9d:95:b1:39:af:9e:f6:47:38:17:39:da:25:7c:f2:ad:0c:e3:
-+ ab:74:19:ca:fb:8c:a0:50:c0:1d:19:8a:9c:21:ed:0f:3a:d1:
-+ 96:54:2e:10:09:4f:b8:70:f7:2b:99:43:d2:c6:15:bc:3f:24:
-+ 7d:28:39:32:3f:8d:a4:4f:40:75:7f:3e:0d:1c:d1:69:f2:4e:
-+ 98:83:47:97:d2:25:ac:c9:36:86:2f:04:a6:c4:86:c7:c4:00:
-+ 5f:7f:b9:ad:fc:bf:e9:f5:78:d7:82:1a:51:0d:fc:ab:9e:92:
-+ 1d:5f:0c:18:d1:82:e0:14:c9:ce:91:89:71:ff:49:49:ff:35:
-+ bf:7b:44:78:42:c1:d0:66:65:bb:28:2e:60:ca:9b:20:12:a9:
-+ 90:61:b1:96:ec:15:46:c9:37:f7:07:90:8a:89:45:2a:3f:37:
-+ ec:dc:e3:e5:8f:c3:3a:57:80:a5:54:60:0c:e1:b2:26:99:2b:
-+ 40:7e:36:d1:9a:70:02:ec:63:f4:3b:72:ae:81:fb:30:20:6d:
-+ cb:48:46:c6:b5:8f:39:b1:84:05:25:55:8d:f5:62:f6:1b:46:
-+ 2e:da:a3:4c:26:12:44:d7:56:b6:b8:a9:ca:d3:ab:71:45:7c:
-+ 9f:48:6d:1e
-+-----BEGIN CERTIFICATE-----
-+MIIDlDCCAnygAwIBAgIBATANBgkqhkiG9w0BAQsFADBeMQswCQYDVQQGEwJVUzEP
-+MA0GA1UECBMGTmV2YWRhMRIwEAYDVQQHEwlMYXMgVmVnYXMxGjAYBgNVBAoTEWdp
-+dGh1Yi5jb20vbGliL3BxMQ4wDAYDVQQDEwVwcSBDQTAeFw0xNDEwMTExNTA1MTVa
-+Fw0yNDEwMDgxNTA1MTVaMGExCzAJBgNVBAYTAlVTMQ8wDQYDVQQIEwZOZXZhZGEx
-+EjAQBgNVBAcTCUxhcyBWZWdhczEaMBgGA1UEChMRZ2l0aHViLmNvbS9saWIvcHEx
-+ETAPBgNVBAMTCHBvc3RncmVzMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKC
-+AQEA14pMhfsXpTyP4HIRKc4/sB8/fcbuf6f8Ais1RwimPZDfXFYUlADHbdHS4mGV
-+d7jjpmYx+R8hfWLhJ9qUN2FK6mNToGG4nLul4ue3ptgPBQTHKeLqSSt/3hUAphhw
-+UMcM3pr5Wpaw4ZQGxm1KITu0D6VtkoY0sk7XDqcZwHcLe4fIkt5C/4bSt5qk1BUj
-+yq2laSG4zn5my4Vdue2LLQmNlOQEHnLs79B2kBVapPeRS+nOTp1dmnAXnNjpc4Pq
-+PWGZps2skUBaiHflTiqOPRPz+ThvgWuKlcoOB6tv2rSM2f+qeAOqx8LPb2SS09iD
-+1a/xIxinLnsXC+d98fqoQaMEVwIDAQABo1owWDAdBgNVHQ4EFgQU7vCzRtzHCesO
-+ti/l/mJgRUSfWcwwHwYDVR0jBBgwFoAUUpPtHnYKn2VP3hlmwdUiQDXLoHIwCQYD
-+VR0TBAIwADALBgNVHQ8EBAMCBeAwDQYJKoZIhvcNAQELBQADggEBAH5abr6/0mzB
-+1vq2+z8GUzYIh52VsTmvnvZHOBc52iV88q0M46t0Gcr7jKBQwB0Zipwh7Q860ZZU
-+LhAJT7hw9yuZQ9LGFbw/JH0oOTI/jaRPQHV/Pg0c0WnyTpiDR5fSJazJNoYvBKbE
-+hsfEAF9/ua38v+n1eNeCGlEN/Kuekh1fDBjRguAUyc6RiXH/SUn/Nb97RHhCwdBm
-+ZbsoLmDKmyASqZBhsZbsFUbJN/cHkIqJRSo/N+zc4+WPwzpXgKVUYAzhsiaZK0B+
-+NtGacALsY/Q7cq6B+zAgbctIRsa1jzmxhAUlVY31YvYbRi7ao0wmEkTXVra4qcrT
-+q3FFfJ9IbR4=
-+-----END CERTIFICATE-----
-diff --git a/vendor/github.com/lib/pq/certs/server.key b/vendor/github.com/lib/pq/certs/server.key
-new file mode 100644
-index 00000000000..bd7b019b655
---- /dev/null
-+++ b/vendor/github.com/lib/pq/certs/server.key
-@@ -0,0 +1,27 @@
-+-----BEGIN RSA PRIVATE KEY-----
-+MIIEogIBAAKCAQEA14pMhfsXpTyP4HIRKc4/sB8/fcbuf6f8Ais1RwimPZDfXFYU
-+lADHbdHS4mGVd7jjpmYx+R8hfWLhJ9qUN2FK6mNToGG4nLul4ue3ptgPBQTHKeLq
-+SSt/3hUAphhwUMcM3pr5Wpaw4ZQGxm1KITu0D6VtkoY0sk7XDqcZwHcLe4fIkt5C
-+/4bSt5qk1BUjyq2laSG4zn5my4Vdue2LLQmNlOQEHnLs79B2kBVapPeRS+nOTp1d
-+mnAXnNjpc4PqPWGZps2skUBaiHflTiqOPRPz+ThvgWuKlcoOB6tv2rSM2f+qeAOq
-+x8LPb2SS09iD1a/xIxinLnsXC+d98fqoQaMEVwIDAQABAoIBAF3ZoihUhJ82F4+r
-+Gz4QyDpv4L1reT2sb1aiabhcU8ZK5nbWJG+tRyjSS/i2dNaEcttpdCj9HR/zhgZM
-+bm0OuAgG58rVwgS80CZUruq++Qs+YVojq8/gWPTiQD4SNhV2Fmx3HkwLgUk3oxuT
-+SsvdqzGE3okGVrutCIcgy126eA147VPMoej1Bb3fO6npqK0pFPhZfAc0YoqJuM+k
-+obRm5pAnGUipyLCFXjA9HYPKwYZw2RtfdA3CiImHeanSdqS+ctrC9y8BV40Th7gZ
-+haXdKUNdjmIxV695QQ1mkGqpKLZFqhzKioGQ2/Ly2d1iaKN9fZltTusu8unepWJ2
-+tlT9qMECgYEA9uHaF1t2CqE+AJvWTihHhPIIuLxoOQXYea1qvxfcH/UMtaLKzCNm
-+lQ5pqCGsPvp+10f36yttO1ZehIvlVNXuJsjt0zJmPtIolNuJY76yeussfQ9jHheB
-+5uPEzCFlHzxYbBUyqgWaF6W74okRGzEGJXjYSP0yHPPdU4ep2q3bGiUCgYEA34Af
-+wBSuQSK7uLxArWHvQhyuvi43ZGXls6oRGl+Ysj54s8BP6XGkq9hEJ6G4yxgyV+BR
-+DUOs5X8/TLT8POuIMYvKTQthQyCk0eLv2FLdESDuuKx0kBVY3s8lK3/z5HhrdOiN
-+VMNZU+xDKgKc3hN9ypkk8vcZe6EtH7Y14e0rVcsCgYBTgxi8F/M5K0wG9rAqphNz
-+VFBA9XKn/2M33cKjO5X5tXIEKzpAjaUQvNxexG04rJGljzG8+mar0M6ONahw5yD1
-+O7i/XWgazgpuOEkkVYiYbd8RutfDgR4vFVMn3hAP3eDnRtBplRWH9Ec3HTiNIys6
-+F8PKBOQjyRZQQC7jyzW3hQKBgACe5HeuFwXLSOYsb6mLmhR+6+VPT4wR1F95W27N
-+USk9jyxAnngxfpmTkiziABdgS9N+pfr5cyN4BP77ia/Jn6kzkC5Cl9SN5KdIkA3z
-+vPVtN/x/ThuQU5zaymmig1ThGLtMYggYOslG4LDfLPxY5YKIhle+Y+259twdr2yf
-+Mf2dAoGAaGv3tWMgnIdGRk6EQL/yb9PKHo7ShN+tKNlGaK7WwzBdKs+Fe8jkgcr7
-+pz4Ne887CmxejdISzOCcdT+Zm9Bx6I/uZwWOtDvWpIgIxVX9a9URj/+D1MxTE/y4
-+d6H+c89yDY62I2+drMpdjCd3EtCaTlxpTbRS+s1eAHMH7aEkcCE=
-+-----END RSA PRIVATE KEY-----
-diff --git a/vendor/github.com/lib/pq/conn.go b/vendor/github.com/lib/pq/conn.go
-new file mode 100644
-index 00000000000..55152b1242c
---- /dev/null
-+++ b/vendor/github.com/lib/pq/conn.go
-@@ -0,0 +1,1923 @@
-+package pq
-+
-+import (
-+ "bufio"
-+ "context"
-+ "crypto/md5"
-+ "crypto/sha256"
-+ "database/sql"
-+ "database/sql/driver"
-+ "encoding/binary"
-+ "errors"
-+ "fmt"
-+ "io"
-+ "net"
-+ "os"
-+ "os/user"
-+ "path"
-+ "path/filepath"
-+ "strconv"
-+ "strings"
-+ "time"
-+ "unicode"
-+
-+ "github.com/lib/pq/oid"
-+ "github.com/lib/pq/scram"
-+)
-+
-+// Common error types
-+var (
-+ ErrNotSupported = errors.New("pq: Unsupported command")
-+ ErrInFailedTransaction = errors.New("pq: Could not complete operation in a failed transaction")
-+ ErrSSLNotSupported = errors.New("pq: SSL is not enabled on the server")
-+ ErrSSLKeyHasWorldPermissions = errors.New("pq: Private key file has group or world access. Permissions should be u=rw (0600) or less")
-+ ErrCouldNotDetectUsername = errors.New("pq: Could not detect default username. Please provide one explicitly")
-+
-+ errUnexpectedReady = errors.New("unexpected ReadyForQuery")
-+ errNoRowsAffected = errors.New("no RowsAffected available after the empty statement")
-+ errNoLastInsertID = errors.New("no LastInsertId available after the empty statement")
-+)
-+
-+// Driver is the Postgres database driver.
-+type Driver struct{}
-+
-+// Open opens a new connection to the database. name is a connection string.
-+// Most users should only use it through database/sql package from the standard
-+// library.
-+func (d *Driver) Open(name string) (driver.Conn, error) {
-+ return Open(name)
-+}
-+
-+func init() {
-+ sql.Register("postgres", &Driver{})
-+}
-+
-+type parameterStatus struct {
-+ // server version in the same format as server_version_num, or 0 if
-+ // unavailable
-+ serverVersion int
-+
-+ // the current location based on the TimeZone value of the session, if
-+ // available
-+ currentLocation *time.Location
-+}
-+
-+type transactionStatus byte
-+
-+const (
-+ txnStatusIdle transactionStatus = 'I'
-+ txnStatusIdleInTransaction transactionStatus = 'T'
-+ txnStatusInFailedTransaction transactionStatus = 'E'
-+)
-+
-+func (s transactionStatus) String() string {
-+ switch s {
-+ case txnStatusIdle:
-+ return "idle"
-+ case txnStatusIdleInTransaction:
-+ return "idle in transaction"
-+ case txnStatusInFailedTransaction:
-+ return "in a failed transaction"
-+ default:
-+ errorf("unknown transactionStatus %d", s)
-+ }
-+
-+ panic("not reached")
-+}
-+
-+// Dialer is the dialer interface. It can be used to obtain more control over
-+// how pq creates network connections.
-+type Dialer interface {
-+ Dial(network, address string) (net.Conn, error)
-+ DialTimeout(network, address string, timeout time.Duration) (net.Conn, error)
-+}
-+
-+// DialerContext is the context-aware dialer interface.
-+type DialerContext interface {
-+ DialContext(ctx context.Context, network, address string) (net.Conn, error)
-+}
-+
-+type defaultDialer struct {
-+ d net.Dialer
-+}
-+
-+func (d defaultDialer) Dial(network, address string) (net.Conn, error) {
-+ return d.d.Dial(network, address)
-+}
-+func (d defaultDialer) DialTimeout(network, address string, timeout time.Duration) (net.Conn, error) {
-+ ctx, cancel := context.WithTimeout(context.Background(), timeout)
-+ defer cancel()
-+ return d.DialContext(ctx, network, address)
-+}
-+func (d defaultDialer) DialContext(ctx context.Context, network, address string) (net.Conn, error) {
-+ return d.d.DialContext(ctx, network, address)
-+}
-+
-+type conn struct {
-+ c net.Conn
-+ buf *bufio.Reader
-+ namei int
-+ scratch [512]byte
-+ txnStatus transactionStatus
-+ txnFinish func()
-+
-+ // Save connection arguments to use during CancelRequest.
-+ dialer Dialer
-+ opts values
-+
-+ // Cancellation key data for use with CancelRequest messages.
-+ processID int
-+ secretKey int
-+
-+ parameterStatus parameterStatus
-+
-+ saveMessageType byte
-+ saveMessageBuffer []byte
-+
-+ // If true, this connection is bad and all public-facing functions should
-+ // return ErrBadConn.
-+ bad bool
-+
-+ // If set, this connection should never use the binary format when
-+ // receiving query results from prepared statements. Only provided for
-+ // debugging.
-+ disablePreparedBinaryResult bool
-+
-+ // Whether to always send []byte parameters over as binary. Enables single
-+ // round-trip mode for non-prepared Query calls.
-+ binaryParameters bool
-+
-+ // If true this connection is in the middle of a COPY
-+ inCopy bool
-+}
-+
-+// Handle driver-side settings in parsed connection string.
-+func (cn *conn) handleDriverSettings(o values) (err error) {
-+ boolSetting := func(key string, val *bool) error {
-+ if value, ok := o[key]; ok {
-+ if value == "yes" {
-+ *val = true
-+ } else if value == "no" {
-+ *val = false
-+ } else {
-+ return fmt.Errorf("unrecognized value %q for %s", value, key)
-+ }
-+ }
-+ return nil
-+ }
-+
-+ err = boolSetting("disable_prepared_binary_result", &cn.disablePreparedBinaryResult)
-+ if err != nil {
-+ return err
-+ }
-+ return boolSetting("binary_parameters", &cn.binaryParameters)
-+}
-+
-+func (cn *conn) handlePgpass(o values) {
-+ // if a password was supplied, do not process .pgpass
-+ if _, ok := o["password"]; ok {
-+ return
-+ }
-+ filename := os.Getenv("PGPASSFILE")
-+ if filename == "" {
-+ // XXX this code doesn't work on Windows where the default filename is
-+ // XXX %APPDATA%\postgresql\pgpass.conf
-+ // Prefer $HOME over user.Current due to glibc bug: golang.org/issue/13470
-+ userHome := os.Getenv("HOME")
-+ if userHome == "" {
-+ user, err := user.Current()
-+ if err != nil {
-+ return
-+ }
-+ userHome = user.HomeDir
-+ }
-+ filename = filepath.Join(userHome, ".pgpass")
-+ }
-+ fileinfo, err := os.Stat(filename)
-+ if err != nil {
-+ return
-+ }
-+ mode := fileinfo.Mode()
-+ if mode&(0x77) != 0 {
-+ // XXX should warn about incorrect .pgpass permissions as psql does
-+ return
-+ }
-+ file, err := os.Open(filename)
-+ if err != nil {
-+ return
-+ }
-+ defer file.Close()
-+ scanner := bufio.NewScanner(io.Reader(file))
-+ hostname := o["host"]
-+ ntw, _ := network(o)
-+ port := o["port"]
-+ db := o["dbname"]
-+ username := o["user"]
-+ // From: https://github.com/tg/pgpass/blob/master/reader.go
-+ getFields := func(s string) []string {
-+ fs := make([]string, 0, 5)
-+ f := make([]rune, 0, len(s))
-+
-+ var esc bool
-+ for _, c := range s {
-+ switch {
-+ case esc:
-+ f = append(f, c)
-+ esc = false
-+ case c == '\\':
-+ esc = true
-+ case c == ':':
-+ fs = append(fs, string(f))
-+ f = f[:0]
-+ default:
-+ f = append(f, c)
-+ }
-+ }
-+ return append(fs, string(f))
-+ }
-+ for scanner.Scan() {
-+ line := scanner.Text()
-+ if len(line) == 0 || line[0] == '#' {
-+ continue
-+ }
-+ split := getFields(line)
-+ if len(split) != 5 {
-+ continue
-+ }
-+ if (split[0] == "*" || split[0] == hostname || (split[0] == "localhost" && (hostname == "" || ntw == "unix"))) && (split[1] == "*" || split[1] == port) && (split[2] == "*" || split[2] == db) && (split[3] == "*" || split[3] == username) {
-+ o["password"] = split[4]
-+ return
-+ }
-+ }
-+}
-+
-+func (cn *conn) writeBuf(b byte) *writeBuf {
-+ cn.scratch[0] = b
-+ return &writeBuf{
-+ buf: cn.scratch[:5],
-+ pos: 1,
-+ }
-+}
-+
-+// Open opens a new connection to the database. dsn is a connection string.
-+// Most users should only use it through database/sql package from the standard
-+// library.
-+func Open(dsn string) (_ driver.Conn, err error) {
-+ return DialOpen(defaultDialer{}, dsn)
-+}
-+
-+// DialOpen opens a new connection to the database using a dialer.
-+func DialOpen(d Dialer, dsn string) (_ driver.Conn, err error) {
-+ c, err := NewConnector(dsn)
-+ if err != nil {
-+ return nil, err
-+ }
-+ c.dialer = d
-+ return c.open(context.Background())
-+}
-+
-+func (c *Connector) open(ctx context.Context) (cn *conn, err error) {
-+ // Handle any panics during connection initialization. Note that we
-+ // specifically do *not* want to use errRecover(), as that would turn any
-+ // connection errors into ErrBadConns, hiding the real error message from
-+ // the user.
-+ defer errRecoverNoErrBadConn(&err)
-+
-+ o := c.opts
-+
-+ cn = &conn{
-+ opts: o,
-+ dialer: c.dialer,
-+ }
-+ err = cn.handleDriverSettings(o)
-+ if err != nil {
-+ return nil, err
-+ }
-+ cn.handlePgpass(o)
-+
-+ cn.c, err = dial(ctx, c.dialer, o)
-+ if err != nil {
-+ return nil, err
-+ }
-+
-+ err = cn.ssl(o)
-+ if err != nil {
-+ if cn.c != nil {
-+ cn.c.Close()
-+ }
-+ return nil, err
-+ }
-+
-+ // cn.startup panics on error. Make sure we don't leak cn.c.
-+ panicking := true
-+ defer func() {
-+ if panicking {
-+ cn.c.Close()
-+ }
-+ }()
-+
-+ cn.buf = bufio.NewReader(cn.c)
-+ cn.startup(o)
-+
-+ // reset the deadline, in case one was set (see dial)
-+ if timeout, ok := o["connect_timeout"]; ok && timeout != "0" {
-+ err = cn.c.SetDeadline(time.Time{})
-+ }
-+ panicking = false
-+ return cn, err
-+}
-+
-+func dial(ctx context.Context, d Dialer, o values) (net.Conn, error) {
-+ network, address := network(o)
-+ // SSL is not necessary or supported over UNIX domain sockets
-+ if network == "unix" {
-+ o["sslmode"] = "disable"
-+ }
-+
-+ // Zero or not specified means wait indefinitely.
-+ if timeout, ok := o["connect_timeout"]; ok && timeout != "0" {
-+ seconds, err := strconv.ParseInt(timeout, 10, 0)
-+ if err != nil {
-+ return nil, fmt.Errorf("invalid value for parameter connect_timeout: %s", err)
-+ }
-+ duration := time.Duration(seconds) * time.Second
-+
-+ // connect_timeout should apply to the entire connection establishment
-+ // procedure, so we both use a timeout for the TCP connection
-+ // establishment and set a deadline for doing the initial handshake.
-+ // The deadline is then reset after startup() is done.
-+ deadline := time.Now().Add(duration)
-+ var conn net.Conn
-+ if dctx, ok := d.(DialerContext); ok {
-+ ctx, cancel := context.WithTimeout(ctx, duration)
-+ defer cancel()
-+ conn, err = dctx.DialContext(ctx, network, address)
-+ } else {
-+ conn, err = d.DialTimeout(network, address, duration)
-+ }
-+ if err != nil {
-+ return nil, err
-+ }
-+ err = conn.SetDeadline(deadline)
-+ return conn, err
-+ }
-+ if dctx, ok := d.(DialerContext); ok {
-+ return dctx.DialContext(ctx, network, address)
-+ }
-+ return d.Dial(network, address)
-+}
-+
-+func network(o values) (string, string) {
-+ host := o["host"]
-+
-+ if strings.HasPrefix(host, "/") {
-+ sockPath := path.Join(host, ".s.PGSQL."+o["port"])
-+ return "unix", sockPath
-+ }
-+
-+ return "tcp", net.JoinHostPort(host, o["port"])
-+}
-+
-+type values map[string]string
-+
-+// scanner implements a tokenizer for libpq-style option strings.
-+type scanner struct {
-+ s []rune
-+ i int
-+}
-+
-+// newScanner returns a new scanner initialized with the option string s.
-+func newScanner(s string) *scanner {
-+ return &scanner{[]rune(s), 0}
-+}
-+
-+// Next returns the next rune.
-+// It returns 0, false if the end of the text has been reached.
-+func (s *scanner) Next() (rune, bool) {
-+ if s.i >= len(s.s) {
-+ return 0, false
-+ }
-+ r := s.s[s.i]
-+ s.i++
-+ return r, true
-+}
-+
-+// SkipSpaces returns the next non-whitespace rune.
-+// It returns 0, false if the end of the text has been reached.
-+func (s *scanner) SkipSpaces() (rune, bool) {
-+ r, ok := s.Next()
-+ for unicode.IsSpace(r) && ok {
-+ r, ok = s.Next()
-+ }
-+ return r, ok
-+}
-+
-+// parseOpts parses the options from name and adds them to the values.
-+//
-+// The parsing code is based on conninfo_parse from libpq's fe-connect.c
-+func parseOpts(name string, o values) error {
-+ s := newScanner(name)
-+
-+ for {
-+ var (
-+ keyRunes, valRunes []rune
-+ r rune
-+ ok bool
-+ )
-+
-+ if r, ok = s.SkipSpaces(); !ok {
-+ break
-+ }
-+
-+ // Scan the key
-+ for !unicode.IsSpace(r) && r != '=' {
-+ keyRunes = append(keyRunes, r)
-+ if r, ok = s.Next(); !ok {
-+ break
-+ }
-+ }
-+
-+ // Skip any whitespace if we're not at the = yet
-+ if r != '=' {
-+ r, ok = s.SkipSpaces()
-+ }
-+
-+ // The current character should be =
-+ if r != '=' || !ok {
-+ return fmt.Errorf(`missing "=" after %q in connection info string"`, string(keyRunes))
-+ }
-+
-+ // Skip any whitespace after the =
-+ if r, ok = s.SkipSpaces(); !ok {
-+ // If we reach the end here, the last value is just an empty string as per libpq.
-+ o[string(keyRunes)] = ""
-+ break
-+ }
-+
-+ if r != '\'' {
-+ for !unicode.IsSpace(r) {
-+ if r == '\\' {
-+ if r, ok = s.Next(); !ok {
-+ return fmt.Errorf(`missing character after backslash`)
-+ }
-+ }
-+ valRunes = append(valRunes, r)
-+
-+ if r, ok = s.Next(); !ok {
-+ break
-+ }
-+ }
-+ } else {
-+ quote:
-+ for {
-+ if r, ok = s.Next(); !ok {
-+ return fmt.Errorf(`unterminated quoted string literal in connection string`)
-+ }
-+ switch r {
-+ case '\'':
-+ break quote
-+ case '\\':
-+ r, _ = s.Next()
-+ fallthrough
-+ default:
-+ valRunes = append(valRunes, r)
-+ }
-+ }
-+ }
-+
-+ o[string(keyRunes)] = string(valRunes)
-+ }
-+
-+ return nil
-+}
-+
-+func (cn *conn) isInTransaction() bool {
-+ return cn.txnStatus == txnStatusIdleInTransaction ||
-+ cn.txnStatus == txnStatusInFailedTransaction
-+}
-+
-+func (cn *conn) checkIsInTransaction(intxn bool) {
-+ if cn.isInTransaction() != intxn {
-+ cn.bad = true
-+ errorf("unexpected transaction status %v", cn.txnStatus)
-+ }
-+}
-+
-+func (cn *conn) Begin() (_ driver.Tx, err error) {
-+ return cn.begin("")
-+}
-+
-+func (cn *conn) begin(mode string) (_ driver.Tx, err error) {
-+ if cn.bad {
-+ return nil, driver.ErrBadConn
-+ }
-+ defer cn.errRecover(&err)
-+
-+ cn.checkIsInTransaction(false)
-+ _, commandTag, err := cn.simpleExec("BEGIN" + mode)
-+ if err != nil {
-+ return nil, err
-+ }
-+ if commandTag != "BEGIN" {
-+ cn.bad = true
-+ return nil, fmt.Errorf("unexpected command tag %s", commandTag)
-+ }
-+ if cn.txnStatus != txnStatusIdleInTransaction {
-+ cn.bad = true
-+ return nil, fmt.Errorf("unexpected transaction status %v", cn.txnStatus)
-+ }
-+ return cn, nil
-+}
-+
-+func (cn *conn) closeTxn() {
-+ if finish := cn.txnFinish; finish != nil {
-+ finish()
-+ }
-+}
-+
-+func (cn *conn) Commit() (err error) {
-+ defer cn.closeTxn()
-+ if cn.bad {
-+ return driver.ErrBadConn
-+ }
-+ defer cn.errRecover(&err)
-+
-+ cn.checkIsInTransaction(true)
-+ // We don't want the client to think that everything is okay if it tries
-+ // to commit a failed transaction. However, no matter what we return,
-+ // database/sql will release this connection back into the free connection
-+ // pool so we have to abort the current transaction here. Note that you
-+ // would get the same behaviour if you issued a COMMIT in a failed
-+ // transaction, so it's also the least surprising thing to do here.
-+ if cn.txnStatus == txnStatusInFailedTransaction {
-+ if err := cn.rollback(); err != nil {
-+ return err
-+ }
-+ return ErrInFailedTransaction
-+ }
-+
-+ _, commandTag, err := cn.simpleExec("COMMIT")
-+ if err != nil {
-+ if cn.isInTransaction() {
-+ cn.bad = true
-+ }
-+ return err
-+ }
-+ if commandTag != "COMMIT" {
-+ cn.bad = true
-+ return fmt.Errorf("unexpected command tag %s", commandTag)
-+ }
-+ cn.checkIsInTransaction(false)
-+ return nil
-+}
-+
-+func (cn *conn) Rollback() (err error) {
-+ defer cn.closeTxn()
-+ if cn.bad {
-+ return driver.ErrBadConn
-+ }
-+ defer cn.errRecover(&err)
-+ return cn.rollback()
-+}
-+
-+func (cn *conn) rollback() (err error) {
-+ cn.checkIsInTransaction(true)
-+ _, commandTag, err := cn.simpleExec("ROLLBACK")
-+ if err != nil {
-+ if cn.isInTransaction() {
-+ cn.bad = true
-+ }
-+ return err
-+ }
-+ if commandTag != "ROLLBACK" {
-+ return fmt.Errorf("unexpected command tag %s", commandTag)
-+ }
-+ cn.checkIsInTransaction(false)
-+ return nil
-+}
-+
-+func (cn *conn) gname() string {
-+ cn.namei++
-+ return strconv.FormatInt(int64(cn.namei), 10)
-+}
-+
-+func (cn *conn) simpleExec(q string) (res driver.Result, commandTag string, err error) {
-+ b := cn.writeBuf('Q')
-+ b.string(q)
-+ cn.send(b)
-+
-+ for {
-+ t, r := cn.recv1()
-+ switch t {
-+ case 'C':
-+ res, commandTag = cn.parseComplete(r.string())
-+ case 'Z':
-+ cn.processReadyForQuery(r)
-+ if res == nil && err == nil {
-+ err = errUnexpectedReady
-+ }
-+ // done
-+ return
-+ case 'E':
-+ err = parseError(r)
-+ case 'I':
-+ res = emptyRows
-+ case 'T', 'D':
-+ // ignore any results
-+ default:
-+ cn.bad = true
-+ errorf("unknown response for simple query: %q", t)
-+ }
-+ }
-+}
-+
-+func (cn *conn) simpleQuery(q string) (res *rows, err error) {
-+ defer cn.errRecover(&err)
-+
-+ b := cn.writeBuf('Q')
-+ b.string(q)
-+ cn.send(b)
-+
-+ for {
-+ t, r := cn.recv1()
-+ switch t {
-+ case 'C', 'I':
-+ // We allow queries which don't return any results through Query as
-+ // well as Exec. We still have to give database/sql a rows object
-+ // the user can close, though, to avoid connections from being
-+ // leaked. A "rows" with done=true works fine for that purpose.
-+ if err != nil {
-+ cn.bad = true
-+ errorf("unexpected message %q in simple query execution", t)
-+ }
-+ if res == nil {
-+ res = &rows{
-+ cn: cn,
-+ }
-+ }
-+ // Set the result and tag to the last command complete if there wasn't a
-+ // query already run. Although queries usually return from here and cede
-+ // control to Next, a query with zero results does not.
-+ if t == 'C' && res.colNames == nil {
-+ res.result, res.tag = cn.parseComplete(r.string())
-+ }
-+ res.done = true
-+ case 'Z':
-+ cn.processReadyForQuery(r)
-+ // done
-+ return
-+ case 'E':
-+ res = nil
-+ err = parseError(r)
-+ case 'D':
-+ if res == nil {
-+ cn.bad = true
-+ errorf("unexpected DataRow in simple query execution")
-+ }
-+ // the query didn't fail; kick off to Next
-+ cn.saveMessage(t, r)
-+ return
-+ case 'T':
-+ // res might be non-nil here if we received a previous
-+ // CommandComplete, but that's fine; just overwrite it
-+ res = &rows{cn: cn}
-+ res.rowsHeader = parsePortalRowDescribe(r)
-+
-+ // To work around a bug in QueryRow in Go 1.2 and earlier, wait
-+ // until the first DataRow has been received.
-+ default:
-+ cn.bad = true
-+ errorf("unknown response for simple query: %q", t)
-+ }
-+ }
-+}
-+
-+type noRows struct{}
-+
-+var emptyRows noRows
-+
-+var _ driver.Result = noRows{}
-+
-+func (noRows) LastInsertId() (int64, error) {
-+ return 0, errNoLastInsertID
-+}
-+
-+func (noRows) RowsAffected() (int64, error) {
-+ return 0, errNoRowsAffected
-+}
-+
-+// Decides which column formats to use for a prepared statement. The input is
-+// an array of type oids, one element per result column.
-+func decideColumnFormats(colTyps []fieldDesc, forceText bool) (colFmts []format, colFmtData []byte) {
-+ if len(colTyps) == 0 {
-+ return nil, colFmtDataAllText
-+ }
-+
-+ colFmts = make([]format, len(colTyps))
-+ if forceText {
-+ return colFmts, colFmtDataAllText
-+ }
-+
-+ allBinary := true
-+ allText := true
-+ for i, t := range colTyps {
-+ switch t.OID {
-+ // This is the list of types to use binary mode for when receiving them
-+ // through a prepared statement. If a type appears in this list, it
-+ // must also be implemented in binaryDecode in encode.go.
-+ case oid.T_bytea:
-+ fallthrough
-+ case oid.T_int8:
-+ fallthrough
-+ case oid.T_int4:
-+ fallthrough
-+ case oid.T_int2:
-+ fallthrough
-+ case oid.T_uuid:
-+ colFmts[i] = formatBinary
-+ allText = false
-+
-+ default:
-+ allBinary = false
-+ }
-+ }
-+
-+ if allBinary {
-+ return colFmts, colFmtDataAllBinary
-+ } else if allText {
-+ return colFmts, colFmtDataAllText
-+ } else {
-+ colFmtData = make([]byte, 2+len(colFmts)*2)
-+ binary.BigEndian.PutUint16(colFmtData, uint16(len(colFmts)))
-+ for i, v := range colFmts {
-+ binary.BigEndian.PutUint16(colFmtData[2+i*2:], uint16(v))
-+ }
-+ return colFmts, colFmtData
-+ }
-+}
-+
-+func (cn *conn) prepareTo(q, stmtName string) *stmt {
-+ st := &stmt{cn: cn, name: stmtName}
-+
-+ b := cn.writeBuf('P')
-+ b.string(st.name)
-+ b.string(q)
-+ b.int16(0)
-+
-+ b.next('D')
-+ b.byte('S')
-+ b.string(st.name)
-+
-+ b.next('S')
-+ cn.send(b)
-+
-+ cn.readParseResponse()
-+ st.paramTyps, st.colNames, st.colTyps = cn.readStatementDescribeResponse()
-+ st.colFmts, st.colFmtData = decideColumnFormats(st.colTyps, cn.disablePreparedBinaryResult)
-+ cn.readReadyForQuery()
-+ return st
-+}
-+
-+func (cn *conn) Prepare(q string) (_ driver.Stmt, err error) {
-+ if cn.bad {
-+ return nil, driver.ErrBadConn
-+ }
-+ defer cn.errRecover(&err)
-+
-+ if len(q) >= 4 && strings.EqualFold(q[:4], "COPY") {
-+ s, err := cn.prepareCopyIn(q)
-+ if err == nil {
-+ cn.inCopy = true
-+ }
-+ return s, err
-+ }
-+ return cn.prepareTo(q, cn.gname()), nil
-+}
-+
-+func (cn *conn) Close() (err error) {
-+ // Skip cn.bad return here because we always want to close a connection.
-+ defer cn.errRecover(&err)
-+
-+ // Ensure that cn.c.Close is always run. Since error handling is done with
-+ // panics and cn.errRecover, the Close must be in a defer.
-+ defer func() {
-+ cerr := cn.c.Close()
-+ if err == nil {
-+ err = cerr
-+ }
-+ }()
-+
-+ // Don't go through send(); ListenerConn relies on us not scribbling on the
-+ // scratch buffer of this connection.
-+ return cn.sendSimpleMessage('X')
-+}
-+
-+// Implement the "Queryer" interface
-+func (cn *conn) Query(query string, args []driver.Value) (driver.Rows, error) {
-+ return cn.query(query, args)
-+}
-+
-+func (cn *conn) query(query string, args []driver.Value) (_ *rows, err error) {
-+ if cn.bad {
-+ return nil, driver.ErrBadConn
-+ }
-+ if cn.inCopy {
-+ return nil, errCopyInProgress
-+ }
-+ defer cn.errRecover(&err)
-+
-+ // Check to see if we can use the "simpleQuery" interface, which is
-+ // *much* faster than going through prepare/exec
-+ if len(args) == 0 {
-+ return cn.simpleQuery(query)
-+ }
-+
-+ if cn.binaryParameters {
-+ cn.sendBinaryModeQuery(query, args)
-+
-+ cn.readParseResponse()
-+ cn.readBindResponse()
-+ rows := &rows{cn: cn}
-+ rows.rowsHeader = cn.readPortalDescribeResponse()
-+ cn.postExecuteWorkaround()
-+ return rows, nil
-+ }
-+ st := cn.prepareTo(query, "")
-+ st.exec(args)
-+ return &rows{
-+ cn: cn,
-+ rowsHeader: st.rowsHeader,
-+ }, nil
-+}
-+
-+// Implement the optional "Execer" interface for one-shot queries
-+func (cn *conn) Exec(query string, args []driver.Value) (res driver.Result, err error) {
-+ if cn.bad {
-+ return nil, driver.ErrBadConn
-+ }
-+ defer cn.errRecover(&err)
-+
-+ // Check to see if we can use the "simpleExec" interface, which is
-+ // *much* faster than going through prepare/exec
-+ if len(args) == 0 {
-+ // ignore commandTag, our caller doesn't care
-+ r, _, err := cn.simpleExec(query)
-+ return r, err
-+ }
-+
-+ if cn.binaryParameters {
-+ cn.sendBinaryModeQuery(query, args)
-+
-+ cn.readParseResponse()
-+ cn.readBindResponse()
-+ cn.readPortalDescribeResponse()
-+ cn.postExecuteWorkaround()
-+ res, _, err = cn.readExecuteResponse("Execute")
-+ return res, err
-+ }
-+ // Use the unnamed statement to defer planning until bind
-+ // time, or else value-based selectivity estimates cannot be
-+ // used.
-+ st := cn.prepareTo(query, "")
-+ r, err := st.Exec(args)
-+ if err != nil {
-+ panic(err)
-+ }
-+ return r, err
-+}
-+
-+func (cn *conn) send(m *writeBuf) {
-+ _, err := cn.c.Write(m.wrap())
-+ if err != nil {
-+ panic(err)
-+ }
-+}
-+
-+func (cn *conn) sendStartupPacket(m *writeBuf) error {
-+ _, err := cn.c.Write((m.wrap())[1:])
-+ return err
-+}
-+
-+// Send a message of type typ to the server on the other end of cn. The
-+// message should have no payload. This method does not use the scratch
-+// buffer.
-+func (cn *conn) sendSimpleMessage(typ byte) (err error) {
-+ _, err = cn.c.Write([]byte{typ, '\x00', '\x00', '\x00', '\x04'})
-+ return err
-+}
-+
-+// saveMessage memorizes a message and its buffer in the conn struct.
-+// recvMessage will then return these values on the next call to it. This
-+// method is useful in cases where you have to see what the next message is
-+// going to be (e.g. to see whether it's an error or not) but you can't handle
-+// the message yourself.
-+func (cn *conn) saveMessage(typ byte, buf *readBuf) {
-+ if cn.saveMessageType != 0 {
-+ cn.bad = true
-+ errorf("unexpected saveMessageType %d", cn.saveMessageType)
-+ }
-+ cn.saveMessageType = typ
-+ cn.saveMessageBuffer = *buf
-+}
-+
-+// recvMessage receives any message from the backend, or returns an error if
-+// a problem occurred while reading the message.
-+func (cn *conn) recvMessage(r *readBuf) (byte, error) {
-+ // workaround for a QueryRow bug, see exec
-+ if cn.saveMessageType != 0 {
-+ t := cn.saveMessageType
-+ *r = cn.saveMessageBuffer
-+ cn.saveMessageType = 0
-+ cn.saveMessageBuffer = nil
-+ return t, nil
-+ }
-+
-+ x := cn.scratch[:5]
-+ _, err := io.ReadFull(cn.buf, x)
-+ if err != nil {
-+ return 0, err
-+ }
-+
-+ // read the type and length of the message that follows
-+ t := x[0]
-+ n := int(binary.BigEndian.Uint32(x[1:])) - 4
-+ var y []byte
-+ if n <= len(cn.scratch) {
-+ y = cn.scratch[:n]
-+ } else {
-+ y = make([]byte, n)
-+ }
-+ _, err = io.ReadFull(cn.buf, y)
-+ if err != nil {
-+ return 0, err
-+ }
-+ *r = y
-+ return t, nil
-+}
-+
-+// recv receives a message from the backend, but if an error happened while
-+// reading the message or the received message was an ErrorResponse, it panics.
-+// NoticeResponses are ignored. This function should generally be used only
-+// during the startup sequence.
-+func (cn *conn) recv() (t byte, r *readBuf) {
-+ for {
-+ var err error
-+ r = &readBuf{}
-+ t, err = cn.recvMessage(r)
-+ if err != nil {
-+ panic(err)
-+ }
-+ switch t {
-+ case 'E':
-+ panic(parseError(r))
-+ case 'N':
-+ // ignore
-+ default:
-+ return
-+ }
-+ }
-+}
-+
-+// recv1Buf is exactly equivalent to recv1, except it uses a buffer supplied by
-+// the caller to avoid an allocation.
-+func (cn *conn) recv1Buf(r *readBuf) byte {
-+ for {
-+ t, err := cn.recvMessage(r)
-+ if err != nil {
-+ panic(err)
-+ }
-+
-+ switch t {
-+ case 'A', 'N':
-+ // ignore
-+ case 'S':
-+ cn.processParameterStatus(r)
-+ default:
-+ return t
-+ }
-+ }
-+}
-+
-+// recv1 receives a message from the backend, panicking if an error occurs
-+// while attempting to read it. All asynchronous messages are ignored, with
-+// the exception of ErrorResponse.
-+func (cn *conn) recv1() (t byte, r *readBuf) {
-+ r = &readBuf{}
-+ t = cn.recv1Buf(r)
-+ return t, r
-+}
-+
-+func (cn *conn) ssl(o values) error {
-+ upgrade, err := ssl(o)
-+ if err != nil {
-+ return err
-+ }
-+
-+ if upgrade == nil {
-+ // Nothing to do
-+ return nil
-+ }
-+
-+ w := cn.writeBuf(0)
-+ w.int32(80877103)
-+ if err = cn.sendStartupPacket(w); err != nil {
-+ return err
-+ }
-+
-+ b := cn.scratch[:1]
-+ _, err = io.ReadFull(cn.c, b)
-+ if err != nil {
-+ return err
-+ }
-+
-+ if b[0] != 'S' {
-+ return ErrSSLNotSupported
-+ }
-+
-+ cn.c, err = upgrade(cn.c)
-+ return err
-+}
-+
-+// isDriverSetting returns true iff a setting is purely for configuring the
-+// driver's options and should not be sent to the server in the connection
-+// startup packet.
-+func isDriverSetting(key string) bool {
-+ switch key {
-+ case "host", "port":
-+ return true
-+ case "password":
-+ return true
-+ case "sslmode", "sslcert", "sslkey", "sslrootcert":
-+ return true
-+ case "fallback_application_name":
-+ return true
-+ case "connect_timeout":
-+ return true
-+ case "disable_prepared_binary_result":
-+ return true
-+ case "binary_parameters":
-+ return true
-+
-+ default:
-+ return false
-+ }
-+}
-+
-+func (cn *conn) startup(o values) {
-+ w := cn.writeBuf(0)
-+ w.int32(196608)
-+ // Send the backend the name of the database we want to connect to, and the
-+ // user we want to connect as. Additionally, we send over any run-time
-+ // parameters potentially included in the connection string. If the server
-+ // doesn't recognize any of them, it will reply with an error.
-+ for k, v := range o {
-+ if isDriverSetting(k) {
-+ // skip options which can't be run-time parameters
-+ continue
-+ }
-+ // The protocol requires us to supply the database name as "database"
-+ // instead of "dbname".
-+ if k == "dbname" {
-+ k = "database"
-+ }
-+ w.string(k)
-+ w.string(v)
-+ }
-+ w.string("")
-+ if err := cn.sendStartupPacket(w); err != nil {
-+ panic(err)
-+ }
-+
-+ for {
-+ t, r := cn.recv()
-+ switch t {
-+ case 'K':
-+ cn.processBackendKeyData(r)
-+ case 'S':
-+ cn.processParameterStatus(r)
-+ case 'R':
-+ cn.auth(r, o)
-+ case 'Z':
-+ cn.processReadyForQuery(r)
-+ return
-+ default:
-+ errorf("unknown response for startup: %q", t)
-+ }
-+ }
-+}
-+
-+func (cn *conn) auth(r *readBuf, o values) {
-+ switch code := r.int32(); code {
-+ case 0:
-+ // OK
-+ case 3:
-+ w := cn.writeBuf('p')
-+ w.string(o["password"])
-+ cn.send(w)
-+
-+ t, r := cn.recv()
-+ if t != 'R' {
-+ errorf("unexpected password response: %q", t)
-+ }
-+
-+ if r.int32() != 0 {
-+ errorf("unexpected authentication response: %q", t)
-+ }
-+ case 5:
-+ s := string(r.next(4))
-+ w := cn.writeBuf('p')
-+ w.string("md5" + md5s(md5s(o["password"]+o["user"])+s))
-+ cn.send(w)
-+
-+ t, r := cn.recv()
-+ if t != 'R' {
-+ errorf("unexpected password response: %q", t)
-+ }
-+
-+ if r.int32() != 0 {
-+ errorf("unexpected authentication response: %q", t)
-+ }
-+ case 10:
-+ sc := scram.NewClient(sha256.New, o["user"], o["password"])
-+ sc.Step(nil)
-+ if sc.Err() != nil {
-+ errorf("SCRAM-SHA-256 error: %s", sc.Err().Error())
-+ }
-+ scOut := sc.Out()
-+
-+ w := cn.writeBuf('p')
-+ w.string("SCRAM-SHA-256")
-+ w.int32(len(scOut))
-+ w.bytes(scOut)
-+ cn.send(w)
-+
-+ t, r := cn.recv()
-+ if t != 'R' {
-+ errorf("unexpected password response: %q", t)
-+ }
-+
-+ if r.int32() != 11 {
-+ errorf("unexpected authentication response: %q", t)
-+ }
-+
-+ nextStep := r.next(len(*r))
-+ sc.Step(nextStep)
-+ if sc.Err() != nil {
-+ errorf("SCRAM-SHA-256 error: %s", sc.Err().Error())
-+ }
-+
-+ scOut = sc.Out()
-+ w = cn.writeBuf('p')
-+ w.bytes(scOut)
-+ cn.send(w)
-+
-+ t, r = cn.recv()
-+ if t != 'R' {
-+ errorf("unexpected password response: %q", t)
-+ }
-+
-+ if r.int32() != 12 {
-+ errorf("unexpected authentication response: %q", t)
-+ }
-+
-+ nextStep = r.next(len(*r))
-+ sc.Step(nextStep)
-+ if sc.Err() != nil {
-+ errorf("SCRAM-SHA-256 error: %s", sc.Err().Error())
-+ }
-+
-+ default:
-+ errorf("unknown authentication response: %d", code)
-+ }
-+}
-+
-+type format int
-+
-+const formatText format = 0
-+const formatBinary format = 1
-+
-+// One result-column format code with the value 1 (i.e. all binary).
-+var colFmtDataAllBinary = []byte{0, 1, 0, 1}
-+
-+// No result-column format codes (i.e. all text).
-+var colFmtDataAllText = []byte{0, 0}
-+
-+type stmt struct {
-+ cn *conn
-+ name string
-+ rowsHeader
-+ colFmtData []byte
-+ paramTyps []oid.Oid
-+ closed bool
-+}
-+
-+func (st *stmt) Close() (err error) {
-+ if st.closed {
-+ return nil
-+ }
-+ if st.cn.bad {
-+ return driver.ErrBadConn
-+ }
-+ defer st.cn.errRecover(&err)
-+
-+ w := st.cn.writeBuf('C')
-+ w.byte('S')
-+ w.string(st.name)
-+ st.cn.send(w)
-+
-+ st.cn.send(st.cn.writeBuf('S'))
-+
-+ t, _ := st.cn.recv1()
-+ if t != '3' {
-+ st.cn.bad = true
-+ errorf("unexpected close response: %q", t)
-+ }
-+ st.closed = true
-+
-+ t, r := st.cn.recv1()
-+ if t != 'Z' {
-+ st.cn.bad = true
-+ errorf("expected ready for query, but got: %q", t)
-+ }
-+ st.cn.processReadyForQuery(r)
-+
-+ return nil
-+}
-+
-+func (st *stmt) Query(v []driver.Value) (r driver.Rows, err error) {
-+ if st.cn.bad {
-+ return nil, driver.ErrBadConn
-+ }
-+ defer st.cn.errRecover(&err)
-+
-+ st.exec(v)
-+ return &rows{
-+ cn: st.cn,
-+ rowsHeader: st.rowsHeader,
-+ }, nil
-+}
-+
-+func (st *stmt) Exec(v []driver.Value) (res driver.Result, err error) {
-+ if st.cn.bad {
-+ return nil, driver.ErrBadConn
-+ }
-+ defer st.cn.errRecover(&err)
-+
-+ st.exec(v)
-+ res, _, err = st.cn.readExecuteResponse("simple query")
-+ return res, err
-+}
-+
-+func (st *stmt) exec(v []driver.Value) {
-+ if len(v) >= 65536 {
-+ errorf("got %d parameters but PostgreSQL only supports 65535 parameters", len(v))
-+ }
-+ if len(v) != len(st.paramTyps) {
-+ errorf("got %d parameters but the statement requires %d", len(v), len(st.paramTyps))
-+ }
-+
-+ cn := st.cn
-+ w := cn.writeBuf('B')
-+ w.byte(0) // unnamed portal
-+ w.string(st.name)
-+
-+ if cn.binaryParameters {
-+ cn.sendBinaryParameters(w, v)
-+ } else {
-+ w.int16(0)
-+ w.int16(len(v))
-+ for i, x := range v {
-+ if x == nil {
-+ w.int32(-1)
-+ } else {
-+ b := encode(&cn.parameterStatus, x, st.paramTyps[i])
-+ w.int32(len(b))
-+ w.bytes(b)
-+ }
-+ }
-+ }
-+ w.bytes(st.colFmtData)
-+
-+ w.next('E')
-+ w.byte(0)
-+ w.int32(0)
-+
-+ w.next('S')
-+ cn.send(w)
-+
-+ cn.readBindResponse()
-+ cn.postExecuteWorkaround()
-+
-+}
-+
-+func (st *stmt) NumInput() int {
-+ return len(st.paramTyps)
-+}
-+
-+// parseComplete parses the "command tag" from a CommandComplete message, and
-+// returns the number of rows affected (if applicable) and a string
-+// identifying only the command that was executed, e.g. "ALTER TABLE". If the
-+// command tag could not be parsed, parseComplete panics.
-+func (cn *conn) parseComplete(commandTag string) (driver.Result, string) {
-+ commandsWithAffectedRows := []string{
-+ "SELECT ",
-+ // INSERT is handled below
-+ "UPDATE ",
-+ "DELETE ",
-+ "FETCH ",
-+ "MOVE ",
-+ "COPY ",
-+ }
-+
-+ var affectedRows *string
-+ for _, tag := range commandsWithAffectedRows {
-+ if strings.HasPrefix(commandTag, tag) {
-+ t := commandTag[len(tag):]
-+ affectedRows = &t
-+ commandTag = tag[:len(tag)-1]
-+ break
-+ }
-+ }
-+ // INSERT also includes the oid of the inserted row in its command tag.
-+ // Oids in user tables are deprecated, and the oid is only returned when
-+ // exactly one row is inserted, so it's unlikely to be of value to any
-+ // real-world application and we can ignore it.
-+ if affectedRows == nil && strings.HasPrefix(commandTag, "INSERT ") {
-+ parts := strings.Split(commandTag, " ")
-+ if len(parts) != 3 {
-+ cn.bad = true
-+ errorf("unexpected INSERT command tag %s", commandTag)
-+ }
-+ affectedRows = &parts[len(parts)-1]
-+ commandTag = "INSERT"
-+ }
-+ // There should be no affected rows attached to the tag, just return it
-+ if affectedRows == nil {
-+ return driver.RowsAffected(0), commandTag
-+ }
-+ n, err := strconv.ParseInt(*affectedRows, 10, 64)
-+ if err != nil {
-+ cn.bad = true
-+ errorf("could not parse commandTag: %s", err)
-+ }
-+ return driver.RowsAffected(n), commandTag
-+}
-+
-+type rowsHeader struct {
-+ colNames []string
-+ colTyps []fieldDesc
-+ colFmts []format
-+}
-+
-+type rows struct {
-+ cn *conn
-+ finish func()
-+ rowsHeader
-+ done bool
-+ rb readBuf
-+ result driver.Result
-+ tag string
-+
-+ next *rowsHeader
-+}
-+
-+func (rs *rows) Close() error {
-+ if finish := rs.finish; finish != nil {
-+ defer finish()
-+ }
-+ // no need to look at cn.bad as Next() will
-+ for {
-+ err := rs.Next(nil)
-+ switch err {
-+ case nil:
-+ case io.EOF:
-+ // rs.Next can return io.EOF on both 'Z' (ready for query) and 'T' (row
-+ // description, used with HasNextResultSet). We need to fetch messages until
-+ // we hit a 'Z', which is done by waiting for done to be set.
-+ if rs.done {
-+ return nil
-+ }
-+ default:
-+ return err
-+ }
-+ }
-+}
-+
-+func (rs *rows) Columns() []string {
-+ return rs.colNames
-+}
-+
-+func (rs *rows) Result() driver.Result {
-+ if rs.result == nil {
-+ return emptyRows
-+ }
-+ return rs.result
-+}
-+
-+func (rs *rows) Tag() string {
-+ return rs.tag
-+}
-+
-+func (rs *rows) Next(dest []driver.Value) (err error) {
-+ if rs.done {
-+ return io.EOF
-+ }
-+
-+ conn := rs.cn
-+ if conn.bad {
-+ return driver.ErrBadConn
-+ }
-+ defer conn.errRecover(&err)
-+
-+ for {
-+ t := conn.recv1Buf(&rs.rb)
-+ switch t {
-+ case 'E':
-+ err = parseError(&rs.rb)
-+ case 'C', 'I':
-+ if t == 'C' {
-+ rs.result, rs.tag = conn.parseComplete(rs.rb.string())
-+ }
-+ continue
-+ case 'Z':
-+ conn.processReadyForQuery(&rs.rb)
-+ rs.done = true
-+ if err != nil {
-+ return err
-+ }
-+ return io.EOF
-+ case 'D':
-+ n := rs.rb.int16()
-+ if err != nil {
-+ conn.bad = true
-+ errorf("unexpected DataRow after error %s", err)
-+ }
-+ if n < len(dest) {
-+ dest = dest[:n]
-+ }
-+ for i := range dest {
-+ l := rs.rb.int32()
-+ if l == -1 {
-+ dest[i] = nil
-+ continue
-+ }
-+ dest[i] = decode(&conn.parameterStatus, rs.rb.next(l), rs.colTyps[i].OID, rs.colFmts[i])
-+ }
-+ return
-+ case 'T':
-+ next := parsePortalRowDescribe(&rs.rb)
-+ rs.next = &next
-+ return io.EOF
-+ default:
-+ errorf("unexpected message after execute: %q", t)
-+ }
-+ }
-+}
-+
-+func (rs *rows) HasNextResultSet() bool {
-+ hasNext := rs.next != nil && !rs.done
-+ return hasNext
-+}
-+
-+func (rs *rows) NextResultSet() error {
-+ if rs.next == nil {
-+ return io.EOF
-+ }
-+ rs.rowsHeader = *rs.next
-+ rs.next = nil
-+ return nil
-+}
-+
-+// QuoteIdentifier quotes an "identifier" (e.g. a table or a column name) to be
-+// used as part of an SQL statement. For example:
-+//
-+// tblname := "my_table"
-+// data := "my_data"
-+// quoted := pq.QuoteIdentifier(tblname)
-+// err := db.Exec(fmt.Sprintf("INSERT INTO %s VALUES ($1)", quoted), data)
-+//
-+// Any double quotes in name will be escaped. The quoted identifier will be
-+// case sensitive when used in a query. If the input string contains a zero
-+// byte, the result will be truncated immediately before it.
-+func QuoteIdentifier(name string) string {
-+ end := strings.IndexRune(name, 0)
-+ if end > -1 {
-+ name = name[:end]
-+ }
-+ return `"` + strings.Replace(name, `"`, `""`, -1) + `"`
-+}
-+
-+// QuoteLiteral quotes a 'literal' (e.g. a parameter, often used to pass literal
-+// to DDL and other statements that do not accept parameters) to be used as part
-+// of an SQL statement. For example:
-+//
-+// exp_date := pq.QuoteLiteral("2023-01-05 15:00:00Z")
-+// err := db.Exec(fmt.Sprintf("CREATE ROLE my_user VALID UNTIL %s", exp_date))
-+//
-+// Any single quotes in name will be escaped. Any backslashes (i.e. "\") will be
-+// replaced by two backslashes (i.e. "\\") and the C-style escape identifier
-+// that PostgreSQL provides ('E') will be prepended to the string.
-+func QuoteLiteral(literal string) string {
-+ // This follows the PostgreSQL internal algorithm for handling quoted literals
-+ // from libpq, which can be found in the "PQEscapeStringInternal" function,
-+ // which is found in the libpq/fe-exec.c source file:
-+ // https://git.postgresql.org/gitweb/?p=postgresql.git;a=blob;f=src/interfaces/libpq/fe-exec.c
-+ //
-+ // substitute any single-quotes (') with two single-quotes ('')
-+ literal = strings.Replace(literal, `'`, `''`, -1)
-+ // determine if the string has any backslashes (\) in it.
-+ // if it does, replace any backslashes (\) with two backslashes (\\)
-+ // then, we need to wrap the entire string with a PostgreSQL
-+ // C-style escape. Per how "PQEscapeStringInternal" handles this case, we
-+ // also add a space before the "E"
-+ if strings.Contains(literal, `\`) {
-+ literal = strings.Replace(literal, `\`, `\\`, -1)
-+ literal = ` E'` + literal + `'`
-+ } else {
-+ // otherwise, we can just wrap the literal with a pair of single quotes
-+ literal = `'` + literal + `'`
-+ }
-+ return literal
-+}
-+
-+func md5s(s string) string {
-+ h := md5.New()
-+ h.Write([]byte(s))
-+ return fmt.Sprintf("%x", h.Sum(nil))
-+}
-+
-+func (cn *conn) sendBinaryParameters(b *writeBuf, args []driver.Value) {
-+ // Do one pass over the parameters to see if we're going to send any of
-+ // them over in binary. If we are, create a paramFormats array at the
-+ // same time.
-+ var paramFormats []int
-+ for i, x := range args {
-+ _, ok := x.([]byte)
-+ if ok {
-+ if paramFormats == nil {
-+ paramFormats = make([]int, len(args))
-+ }
-+ paramFormats[i] = 1
-+ }
-+ }
-+ if paramFormats == nil {
-+ b.int16(0)
-+ } else {
-+ b.int16(len(paramFormats))
-+ for _, x := range paramFormats {
-+ b.int16(x)
-+ }
-+ }
-+
-+ b.int16(len(args))
-+ for _, x := range args {
-+ if x == nil {
-+ b.int32(-1)
-+ } else {
-+ datum := binaryEncode(&cn.parameterStatus, x)
-+ b.int32(len(datum))
-+ b.bytes(datum)
-+ }
-+ }
-+}
-+
-+func (cn *conn) sendBinaryModeQuery(query string, args []driver.Value) {
-+ if len(args) >= 65536 {
-+ errorf("got %d parameters but PostgreSQL only supports 65535 parameters", len(args))
-+ }
-+
-+ b := cn.writeBuf('P')
-+ b.byte(0) // unnamed statement
-+ b.string(query)
-+ b.int16(0)
-+
-+ b.next('B')
-+ b.int16(0) // unnamed portal and statement
-+ cn.sendBinaryParameters(b, args)
-+ b.bytes(colFmtDataAllText)
-+
-+ b.next('D')
-+ b.byte('P')
-+ b.byte(0) // unnamed portal
-+
-+ b.next('E')
-+ b.byte(0)
-+ b.int32(0)
-+
-+ b.next('S')
-+ cn.send(b)
-+}
-+
-+func (cn *conn) processParameterStatus(r *readBuf) {
-+ var err error
-+
-+ param := r.string()
-+ switch param {
-+ case "server_version":
-+ var major1 int
-+ var major2 int
-+ var minor int
-+ _, err = fmt.Sscanf(r.string(), "%d.%d.%d", &major1, &major2, &minor)
-+ if err == nil {
-+ cn.parameterStatus.serverVersion = major1*10000 + major2*100 + minor
-+ }
-+
-+ case "TimeZone":
-+ cn.parameterStatus.currentLocation, err = time.LoadLocation(r.string())
-+ if err != nil {
-+ cn.parameterStatus.currentLocation = nil
-+ }
-+
-+ default:
-+ // ignore
-+ }
-+}
-+
-+func (cn *conn) processReadyForQuery(r *readBuf) {
-+ cn.txnStatus = transactionStatus(r.byte())
-+}
-+
-+func (cn *conn) readReadyForQuery() {
-+ t, r := cn.recv1()
-+ switch t {
-+ case 'Z':
-+ cn.processReadyForQuery(r)
-+ return
-+ default:
-+ cn.bad = true
-+ errorf("unexpected message %q; expected ReadyForQuery", t)
-+ }
-+}
-+
-+func (cn *conn) processBackendKeyData(r *readBuf) {
-+ cn.processID = r.int32()
-+ cn.secretKey = r.int32()
-+}
-+
-+func (cn *conn) readParseResponse() {
-+ t, r := cn.recv1()
-+ switch t {
-+ case '1':
-+ return
-+ case 'E':
-+ err := parseError(r)
-+ cn.readReadyForQuery()
-+ panic(err)
-+ default:
-+ cn.bad = true
-+ errorf("unexpected Parse response %q", t)
-+ }
-+}
-+
-+func (cn *conn) readStatementDescribeResponse() (paramTyps []oid.Oid, colNames []string, colTyps []fieldDesc) {
-+ for {
-+ t, r := cn.recv1()
-+ switch t {
-+ case 't':
-+ nparams := r.int16()
-+ paramTyps = make([]oid.Oid, nparams)
-+ for i := range paramTyps {
-+ paramTyps[i] = r.oid()
-+ }
-+ case 'n':
-+ return paramTyps, nil, nil
-+ case 'T':
-+ colNames, colTyps = parseStatementRowDescribe(r)
-+ return paramTyps, colNames, colTyps
-+ case 'E':
-+ err := parseError(r)
-+ cn.readReadyForQuery()
-+ panic(err)
-+ default:
-+ cn.bad = true
-+ errorf("unexpected Describe statement response %q", t)
-+ }
-+ }
-+}
-+
-+func (cn *conn) readPortalDescribeResponse() rowsHeader {
-+ t, r := cn.recv1()
-+ switch t {
-+ case 'T':
-+ return parsePortalRowDescribe(r)
-+ case 'n':
-+ return rowsHeader{}
-+ case 'E':
-+ err := parseError(r)
-+ cn.readReadyForQuery()
-+ panic(err)
-+ default:
-+ cn.bad = true
-+ errorf("unexpected Describe response %q", t)
-+ }
-+ panic("not reached")
-+}
-+
-+func (cn *conn) readBindResponse() {
-+ t, r := cn.recv1()
-+ switch t {
-+ case '2':
-+ return
-+ case 'E':
-+ err := parseError(r)
-+ cn.readReadyForQuery()
-+ panic(err)
-+ default:
-+ cn.bad = true
-+ errorf("unexpected Bind response %q", t)
-+ }
-+}
-+
-+func (cn *conn) postExecuteWorkaround() {
-+ // Work around a bug in sql.DB.QueryRow: in Go 1.2 and earlier it ignores
-+ // any errors from rows.Next, which masks errors that happened during the
-+ // execution of the query. To avoid the problem in common cases, we wait
-+ // here for one more message from the database. If it's not an error the
-+ // query will likely succeed (or perhaps has already, if it's a
-+ // CommandComplete), so we push the message into the conn struct; recv1
-+ // will return it as the next message for rows.Next or rows.Close.
-+ // However, if it's an error, we wait until ReadyForQuery and then return
-+ // the error to our caller.
-+ for {
-+ t, r := cn.recv1()
-+ switch t {
-+ case 'E':
-+ err := parseError(r)
-+ cn.readReadyForQuery()
-+ panic(err)
-+ case 'C', 'D', 'I':
-+ // the query didn't fail, but we can't process this message
-+ cn.saveMessage(t, r)
-+ return
-+ default:
-+ cn.bad = true
-+ errorf("unexpected message during extended query execution: %q", t)
-+ }
-+ }
-+}
-+
-+// Only for Exec(), since we ignore the returned data
-+func (cn *conn) readExecuteResponse(protocolState string) (res driver.Result, commandTag string, err error) {
-+ for {
-+ t, r := cn.recv1()
-+ switch t {
-+ case 'C':
-+ if err != nil {
-+ cn.bad = true
-+ errorf("unexpected CommandComplete after error %s", err)
-+ }
-+ res, commandTag = cn.parseComplete(r.string())
-+ case 'Z':
-+ cn.processReadyForQuery(r)
-+ if res == nil && err == nil {
-+ err = errUnexpectedReady
-+ }
-+ return res, commandTag, err
-+ case 'E':
-+ err = parseError(r)
-+ case 'T', 'D', 'I':
-+ if err != nil {
-+ cn.bad = true
-+ errorf("unexpected %q after error %s", t, err)
-+ }
-+ if t == 'I' {
-+ res = emptyRows
-+ }
-+ // ignore any results
-+ default:
-+ cn.bad = true
-+ errorf("unknown %s response: %q", protocolState, t)
-+ }
-+ }
-+}
-+
-+func parseStatementRowDescribe(r *readBuf) (colNames []string, colTyps []fieldDesc) {
-+ n := r.int16()
-+ colNames = make([]string, n)
-+ colTyps = make([]fieldDesc, n)
-+ for i := range colNames {
-+ colNames[i] = r.string()
-+ r.next(6)
-+ colTyps[i].OID = r.oid()
-+ colTyps[i].Len = r.int16()
-+ colTyps[i].Mod = r.int32()
-+ // format code not known when describing a statement; always 0
-+ r.next(2)
-+ }
-+ return
-+}
-+
-+func parsePortalRowDescribe(r *readBuf) rowsHeader {
-+ n := r.int16()
-+ colNames := make([]string, n)
-+ colFmts := make([]format, n)
-+ colTyps := make([]fieldDesc, n)
-+ for i := range colNames {
-+ colNames[i] = r.string()
-+ r.next(6)
-+ colTyps[i].OID = r.oid()
-+ colTyps[i].Len = r.int16()
-+ colTyps[i].Mod = r.int32()
-+ colFmts[i] = format(r.int16())
-+ }
-+ return rowsHeader{
-+ colNames: colNames,
-+ colFmts: colFmts,
-+ colTyps: colTyps,
-+ }
-+}
-+
-+// parseEnviron tries to mimic some of libpq's environment handling
-+//
-+// To ease testing, it does not directly reference os.Environ, but is
-+// designed to accept its output.
-+//
-+// Environment-set connection information is intended to have a higher
-+// precedence than a library default but lower than any explicitly
-+// passed information (such as in the URL or connection string).
-+func parseEnviron(env []string) (out map[string]string) {
-+ out = make(map[string]string)
-+
-+ for _, v := range env {
-+ parts := strings.SplitN(v, "=", 2)
-+
-+ accrue := func(keyname string) {
-+ out[keyname] = parts[1]
-+ }
-+ unsupported := func() {
-+ panic(fmt.Sprintf("setting %v not supported", parts[0]))
-+ }
-+
-+ // The order of these is the same as is seen in the
-+ // PostgreSQL 9.1 manual. Unsupported but well-defined
-+ // keys cause a panic; these should be unset prior to
-+ // execution. Options which pq expects to be set to a
-+ // certain value are allowed, but must be set to that
-+ // value if present (they can, of course, be absent).
-+ switch parts[0] {
-+ case "PGHOST":
-+ accrue("host")
-+ case "PGHOSTADDR":
-+ unsupported()
-+ case "PGPORT":
-+ accrue("port")
-+ case "PGDATABASE":
-+ accrue("dbname")
-+ case "PGUSER":
-+ accrue("user")
-+ case "PGPASSWORD":
-+ accrue("password")
-+ case "PGSERVICE", "PGSERVICEFILE", "PGREALM":
-+ unsupported()
-+ case "PGOPTIONS":
-+ accrue("options")
-+ case "PGAPPNAME":
-+ accrue("application_name")
-+ case "PGSSLMODE":
-+ accrue("sslmode")
-+ case "PGSSLCERT":
-+ accrue("sslcert")
-+ case "PGSSLKEY":
-+ accrue("sslkey")
-+ case "PGSSLROOTCERT":
-+ accrue("sslrootcert")
-+ case "PGREQUIRESSL", "PGSSLCRL":
-+ unsupported()
-+ case "PGREQUIREPEER":
-+ unsupported()
-+ case "PGKRBSRVNAME", "PGGSSLIB":
-+ unsupported()
-+ case "PGCONNECT_TIMEOUT":
-+ accrue("connect_timeout")
-+ case "PGCLIENTENCODING":
-+ accrue("client_encoding")
-+ case "PGDATESTYLE":
-+ accrue("datestyle")
-+ case "PGTZ":
-+ accrue("timezone")
-+ case "PGGEQO":
-+ accrue("geqo")
-+ case "PGSYSCONFDIR", "PGLOCALEDIR":
-+ unsupported()
-+ }
-+ }
-+
-+ return out
-+}
-+
-+// isUTF8 returns whether name is a fuzzy variation of the string "UTF-8".
-+func isUTF8(name string) bool {
-+ // Recognize all sorts of silly things as "UTF-8", like Postgres does
-+ s := strings.Map(alnumLowerASCII, name)
-+ return s == "utf8" || s == "unicode"
-+}
-+
-+func alnumLowerASCII(ch rune) rune {
-+ if 'A' <= ch && ch <= 'Z' {
-+ return ch + ('a' - 'A')
-+ }
-+ if 'a' <= ch && ch <= 'z' || '0' <= ch && ch <= '9' {
-+ return ch
-+ }
-+ return -1 // discard
-+}
-diff --git a/vendor/github.com/lib/pq/conn_go18.go b/vendor/github.com/lib/pq/conn_go18.go
-new file mode 100644
-index 00000000000..0fdd06a617c
---- /dev/null
-+++ b/vendor/github.com/lib/pq/conn_go18.go
-@@ -0,0 +1,149 @@
-+package pq
-+
-+import (
-+ "context"
-+ "database/sql"
-+ "database/sql/driver"
-+ "fmt"
-+ "io"
-+ "io/ioutil"
-+ "time"
-+)
-+
-+// Implement the "QueryerContext" interface
-+func (cn *conn) QueryContext(ctx context.Context, query string, args []driver.NamedValue) (driver.Rows, error) {
-+ list := make([]driver.Value, len(args))
-+ for i, nv := range args {
-+ list[i] = nv.Value
-+ }
-+ finish := cn.watchCancel(ctx)
-+ r, err := cn.query(query, list)
-+ if err != nil {
-+ if finish != nil {
-+ finish()
-+ }
-+ return nil, err
-+ }
-+ r.finish = finish
-+ return r, nil
-+}
-+
-+// Implement the "ExecerContext" interface
-+func (cn *conn) ExecContext(ctx context.Context, query string, args []driver.NamedValue) (driver.Result, error) {
-+ list := make([]driver.Value, len(args))
-+ for i, nv := range args {
-+ list[i] = nv.Value
-+ }
-+
-+ if finish := cn.watchCancel(ctx); finish != nil {
-+ defer finish()
-+ }
-+
-+ return cn.Exec(query, list)
-+}
-+
-+// Implement the "ConnBeginTx" interface
-+func (cn *conn) BeginTx(ctx context.Context, opts driver.TxOptions) (driver.Tx, error) {
-+ var mode string
-+
-+ switch sql.IsolationLevel(opts.Isolation) {
-+ case sql.LevelDefault:
-+ // Don't touch mode: use the server's default
-+ case sql.LevelReadUncommitted:
-+ mode = " ISOLATION LEVEL READ UNCOMMITTED"
-+ case sql.LevelReadCommitted:
-+ mode = " ISOLATION LEVEL READ COMMITTED"
-+ case sql.LevelRepeatableRead:
-+ mode = " ISOLATION LEVEL REPEATABLE READ"
-+ case sql.LevelSerializable:
-+ mode = " ISOLATION LEVEL SERIALIZABLE"
-+ default:
-+ return nil, fmt.Errorf("pq: isolation level not supported: %d", opts.Isolation)
-+ }
-+
-+ if opts.ReadOnly {
-+ mode += " READ ONLY"
-+ } else {
-+ mode += " READ WRITE"
-+ }
-+
-+ tx, err := cn.begin(mode)
-+ if err != nil {
-+ return nil, err
-+ }
-+ cn.txnFinish = cn.watchCancel(ctx)
-+ return tx, nil
-+}
-+
-+func (cn *conn) Ping(ctx context.Context) error {
-+ if finish := cn.watchCancel(ctx); finish != nil {
-+ defer finish()
-+ }
-+ rows, err := cn.simpleQuery("SELECT 'lib/pq ping test';")
-+ if err != nil {
-+ return driver.ErrBadConn // https://golang.org/pkg/database/sql/driver/#Pinger
-+ }
-+ rows.Close()
-+ return nil
-+}
-+
-+func (cn *conn) watchCancel(ctx context.Context) func() {
-+ if done := ctx.Done(); done != nil {
-+ finished := make(chan struct{})
-+ go func() {
-+ select {
-+ case <-done:
-+ // At this point the function level context is canceled,
-+ // so it must not be used for the additional network
-+ // request to cancel the query.
-+ // Create a new context to pass into the dial.
-+ ctxCancel, cancel := context.WithTimeout(context.Background(), time.Second*10)
-+ defer cancel()
-+
-+ _ = cn.cancel(ctxCancel)
-+ finished <- struct{}{}
-+ case <-finished:
-+ }
-+ }()
-+ return func() {
-+ select {
-+ case <-finished:
-+ case finished <- struct{}{}:
-+ }
-+ }
-+ }
-+ return nil
-+}
-+
-+func (cn *conn) cancel(ctx context.Context) error {
-+ c, err := dial(ctx, cn.dialer, cn.opts)
-+ if err != nil {
-+ return err
-+ }
-+ defer c.Close()
-+
-+ {
-+ can := conn{
-+ c: c,
-+ }
-+ err = can.ssl(cn.opts)
-+ if err != nil {
-+ return err
-+ }
-+
-+ w := can.writeBuf(0)
-+ w.int32(80877102) // cancel request code
-+ w.int32(cn.processID)
-+ w.int32(cn.secretKey)
-+
-+ if err := can.sendStartupPacket(w); err != nil {
-+ return err
-+ }
-+ }
-+
-+ // Read until EOF to ensure that the server received the cancel.
-+ {
-+ _, err := io.Copy(ioutil.Discard, c)
-+ return err
-+ }
-+}
-diff --git a/vendor/github.com/lib/pq/conn_test.go b/vendor/github.com/lib/pq/conn_test.go
-new file mode 100644
-index 00000000000..0d25c95548d
---- /dev/null
-+++ b/vendor/github.com/lib/pq/conn_test.go
-@@ -0,0 +1,1777 @@
-+package pq
-+
-+import (
-+ "context"
-+ "database/sql"
-+ "database/sql/driver"
-+ "fmt"
-+ "io"
-+ "net"
-+ "os"
-+ "reflect"
-+ "strings"
-+ "testing"
-+ "time"
-+)
-+
-+type Fatalistic interface {
-+ Fatal(args ...interface{})
-+}
-+
-+func forceBinaryParameters() bool {
-+ bp := os.Getenv("PQTEST_BINARY_PARAMETERS")
-+ if bp == "yes" {
-+ return true
-+ } else if bp == "" || bp == "no" {
-+ return false
-+ } else {
-+ panic("unexpected value for PQTEST_BINARY_PARAMETERS")
-+ }
-+}
-+
-+func testConninfo(conninfo string) string {
-+ defaultTo := func(envvar string, value string) {
-+ if os.Getenv(envvar) == "" {
-+ os.Setenv(envvar, value)
-+ }
-+ }
-+ defaultTo("PGDATABASE", "pqgotest")
-+ defaultTo("PGSSLMODE", "disable")
-+ defaultTo("PGCONNECT_TIMEOUT", "20")
-+
-+ if forceBinaryParameters() &&
-+ !strings.HasPrefix(conninfo, "postgres://") &&
-+ !strings.HasPrefix(conninfo, "postgresql://") {
-+ conninfo += " binary_parameters=yes"
-+ }
-+ return conninfo
-+}
-+
-+func openTestConnConninfo(conninfo string) (*sql.DB, error) {
-+ return sql.Open("postgres", testConninfo(conninfo))
-+}
-+
-+func openTestConn(t Fatalistic) *sql.DB {
-+ conn, err := openTestConnConninfo("")
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+
-+ return conn
-+}
-+
-+func getServerVersion(t *testing.T, db *sql.DB) int {
-+ var version int
-+ err := db.QueryRow("SHOW server_version_num").Scan(&version)
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+ return version
-+}
-+
-+func TestReconnect(t *testing.T) {
-+ db1 := openTestConn(t)
-+ defer db1.Close()
-+ tx, err := db1.Begin()
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+ var pid1 int
-+ err = tx.QueryRow("SELECT pg_backend_pid()").Scan(&pid1)
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+ db2 := openTestConn(t)
-+ defer db2.Close()
-+ _, err = db2.Exec("SELECT pg_terminate_backend($1)", pid1)
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+ // The rollback will probably "fail" because we just killed
-+ // its connection above
-+ _ = tx.Rollback()
-+
-+ const expected int = 42
-+ var result int
-+ err = db1.QueryRow(fmt.Sprintf("SELECT %d", expected)).Scan(&result)
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+ if result != expected {
-+ t.Errorf("got %v; expected %v", result, expected)
-+ }
-+}
-+
-+func TestCommitInFailedTransaction(t *testing.T) {
-+ db := openTestConn(t)
-+ defer db.Close()
-+
-+ txn, err := db.Begin()
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+ rows, err := txn.Query("SELECT error")
-+ if err == nil {
-+ rows.Close()
-+ t.Fatal("expected failure")
-+ }
-+ err = txn.Commit()
-+ if err != ErrInFailedTransaction {
-+ t.Fatalf("expected ErrInFailedTransaction; got %#v", err)
-+ }
-+}
-+
-+func TestOpenURL(t *testing.T) {
-+ testURL := func(url string) {
-+ db, err := openTestConnConninfo(url)
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+ defer db.Close()
-+ // database/sql might not call our Open at all unless we do something with
-+ // the connection
-+ txn, err := db.Begin()
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+ txn.Rollback()
-+ }
-+ testURL("postgres://")
-+ testURL("postgresql://")
-+}
-+
-+const pgpassFile = "/tmp/pqgotest_pgpass"
-+
-+func TestPgpass(t *testing.T) {
-+ if os.Getenv("TRAVIS") != "true" {
-+ t.Skip("not running under Travis, skipping pgpass tests")
-+ }
-+
-+ testAssert := func(conninfo string, expected string, reason string) {
-+ conn, err := openTestConnConninfo(conninfo)
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+ defer conn.Close()
-+
-+ txn, err := conn.Begin()
-+ if err != nil {
-+ if expected != "fail" {
-+ t.Fatalf(reason, err)
-+ }
-+ return
-+ }
-+ rows, err := txn.Query("SELECT USER")
-+ if err != nil {
-+ txn.Rollback()
-+ if expected != "fail" {
-+ t.Fatalf(reason, err)
-+ }
-+ } else {
-+ rows.Close()
-+ if expected != "ok" {
-+ t.Fatalf(reason, err)
-+ }
-+ }
-+ txn.Rollback()
-+ }
-+ testAssert("", "ok", "missing .pgpass, unexpected error %#v")
-+ os.Setenv("PGPASSFILE", pgpassFile)
-+ testAssert("host=/tmp", "fail", ", unexpected error %#v")
-+ os.Remove(pgpassFile)
-+ pgpass, err := os.OpenFile(pgpassFile, os.O_RDWR|os.O_CREATE, 0644)
-+ if err != nil {
-+ t.Fatalf("Unexpected error writing pgpass file %#v", err)
-+ }
-+ _, err = pgpass.WriteString(`# comment
-+server:5432:some_db:some_user:pass_A
-+*:5432:some_db:some_user:pass_B
-+localhost:*:*:*:pass_C
-+*:*:*:*:pass_fallback
-+`)
-+ if err != nil {
-+ t.Fatalf("Unexpected error writing pgpass file %#v", err)
-+ }
-+ pgpass.Close()
-+
-+ assertPassword := func(extra values, expected string) {
-+ o := values{
-+ "host": "localhost",
-+ "sslmode": "disable",
-+ "connect_timeout": "20",
-+ "user": "majid",
-+ "port": "5432",
-+ "extra_float_digits": "2",
-+ "dbname": "pqgotest",
-+ "client_encoding": "UTF8",
-+ "datestyle": "ISO, MDY",
-+ }
-+ for k, v := range extra {
-+ o[k] = v
-+ }
-+ (&conn{}).handlePgpass(o)
-+ if pw := o["password"]; pw != expected {
-+ t.Fatalf("For %v expected %s got %s", extra, expected, pw)
-+ }
-+ }
-+ // wrong permissions for the pgpass file means it should be ignored
-+ assertPassword(values{"host": "example.com", "user": "foo"}, "")
-+ // fix the permissions and check if it has taken effect
-+ os.Chmod(pgpassFile, 0600)
-+ assertPassword(values{"host": "server", "dbname": "some_db", "user": "some_user"}, "pass_A")
-+ assertPassword(values{"host": "example.com", "user": "foo"}, "pass_fallback")
-+ assertPassword(values{"host": "example.com", "dbname": "some_db", "user": "some_user"}, "pass_B")
-+ // localhost also matches the default "" and UNIX sockets
-+ assertPassword(values{"host": "", "user": "some_user"}, "pass_C")
-+ assertPassword(values{"host": "/tmp", "user": "some_user"}, "pass_C")
-+ // cleanup
-+ os.Remove(pgpassFile)
-+ os.Setenv("PGPASSFILE", "")
-+}
-+
-+func TestExec(t *testing.T) {
-+ db := openTestConn(t)
-+ defer db.Close()
-+
-+ _, err := db.Exec("CREATE TEMP TABLE temp (a int)")
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+
-+ r, err := db.Exec("INSERT INTO temp VALUES (1)")
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+
-+ if n, _ := r.RowsAffected(); n != 1 {
-+ t.Fatalf("expected 1 row affected, not %d", n)
-+ }
-+
-+ r, err = db.Exec("INSERT INTO temp VALUES ($1), ($2), ($3)", 1, 2, 3)
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+
-+ if n, _ := r.RowsAffected(); n != 3 {
-+ t.Fatalf("expected 3 rows affected, not %d", n)
-+ }
-+
-+ // SELECT doesn't send the number of returned rows in the command tag
-+ // before 9.0
-+ if getServerVersion(t, db) >= 90000 {
-+ r, err = db.Exec("SELECT g FROM generate_series(1, 2) g")
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+ if n, _ := r.RowsAffected(); n != 2 {
-+ t.Fatalf("expected 2 rows affected, not %d", n)
-+ }
-+
-+ r, err = db.Exec("SELECT g FROM generate_series(1, $1) g", 3)
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+ if n, _ := r.RowsAffected(); n != 3 {
-+ t.Fatalf("expected 3 rows affected, not %d", n)
-+ }
-+ }
-+}
-+
-+func TestStatment(t *testing.T) {
-+ db := openTestConn(t)
-+ defer db.Close()
-+
-+ st, err := db.Prepare("SELECT 1")
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+
-+ st1, err := db.Prepare("SELECT 2")
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+
-+ r, err := st.Query()
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+ defer r.Close()
-+
-+ if !r.Next() {
-+ t.Fatal("expected row")
-+ }
-+
-+ var i int
-+ err = r.Scan(&i)
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+
-+ if i != 1 {
-+ t.Fatalf("expected 1, got %d", i)
-+ }
-+
-+ // st1
-+
-+ r1, err := st1.Query()
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+ defer r1.Close()
-+
-+ if !r1.Next() {
-+ if r.Err() != nil {
-+ t.Fatal(r1.Err())
-+ }
-+ t.Fatal("expected row")
-+ }
-+
-+ err = r1.Scan(&i)
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+
-+ if i != 2 {
-+ t.Fatalf("expected 2, got %d", i)
-+ }
-+}
-+
-+func TestRowsCloseBeforeDone(t *testing.T) {
-+ db := openTestConn(t)
-+ defer db.Close()
-+
-+ r, err := db.Query("SELECT 1")
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+
-+ err = r.Close()
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+
-+ if r.Next() {
-+ t.Fatal("unexpected row")
-+ }
-+
-+ if r.Err() != nil {
-+ t.Fatal(r.Err())
-+ }
-+}
-+
-+func TestParameterCountMismatch(t *testing.T) {
-+ db := openTestConn(t)
-+ defer db.Close()
-+
-+ var notused int
-+ err := db.QueryRow("SELECT false", 1).Scan(¬used)
-+ if err == nil {
-+ t.Fatal("expected err")
-+ }
-+ // make sure we clean up correctly
-+ err = db.QueryRow("SELECT 1").Scan(¬used)
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+
-+ err = db.QueryRow("SELECT $1").Scan(¬used)
-+ if err == nil {
-+ t.Fatal("expected err")
-+ }
-+ // make sure we clean up correctly
-+ err = db.QueryRow("SELECT 1").Scan(¬used)
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+}
-+
-+// Test that EmptyQueryResponses are handled correctly.
-+func TestEmptyQuery(t *testing.T) {
-+ db := openTestConn(t)
-+ defer db.Close()
-+
-+ res, err := db.Exec("")
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+ if _, err := res.RowsAffected(); err != errNoRowsAffected {
-+ t.Fatalf("expected %s, got %v", errNoRowsAffected, err)
-+ }
-+ if _, err := res.LastInsertId(); err != errNoLastInsertID {
-+ t.Fatalf("expected %s, got %v", errNoLastInsertID, err)
-+ }
-+ rows, err := db.Query("")
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+ cols, err := rows.Columns()
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+ if len(cols) != 0 {
-+ t.Fatalf("unexpected number of columns %d in response to an empty query", len(cols))
-+ }
-+ if rows.Next() {
-+ t.Fatal("unexpected row")
-+ }
-+ if rows.Err() != nil {
-+ t.Fatal(rows.Err())
-+ }
-+
-+ stmt, err := db.Prepare("")
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+ res, err = stmt.Exec()
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+ if _, err := res.RowsAffected(); err != errNoRowsAffected {
-+ t.Fatalf("expected %s, got %v", errNoRowsAffected, err)
-+ }
-+ if _, err := res.LastInsertId(); err != errNoLastInsertID {
-+ t.Fatalf("expected %s, got %v", errNoLastInsertID, err)
-+ }
-+ rows, err = stmt.Query()
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+ cols, err = rows.Columns()
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+ if len(cols) != 0 {
-+ t.Fatalf("unexpected number of columns %d in response to an empty query", len(cols))
-+ }
-+ if rows.Next() {
-+ t.Fatal("unexpected row")
-+ }
-+ if rows.Err() != nil {
-+ t.Fatal(rows.Err())
-+ }
-+}
-+
-+// Test that rows.Columns() is correct even if there are no result rows.
-+func TestEmptyResultSetColumns(t *testing.T) {
-+ db := openTestConn(t)
-+ defer db.Close()
-+
-+ rows, err := db.Query("SELECT 1 AS a, text 'bar' AS bar WHERE FALSE")
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+ cols, err := rows.Columns()
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+ if len(cols) != 2 {
-+ t.Fatalf("unexpected number of columns %d in response to an empty query", len(cols))
-+ }
-+ if rows.Next() {
-+ t.Fatal("unexpected row")
-+ }
-+ if rows.Err() != nil {
-+ t.Fatal(rows.Err())
-+ }
-+ if cols[0] != "a" || cols[1] != "bar" {
-+ t.Fatalf("unexpected Columns result %v", cols)
-+ }
-+
-+ stmt, err := db.Prepare("SELECT $1::int AS a, text 'bar' AS bar WHERE FALSE")
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+ rows, err = stmt.Query(1)
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+ cols, err = rows.Columns()
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+ if len(cols) != 2 {
-+ t.Fatalf("unexpected number of columns %d in response to an empty query", len(cols))
-+ }
-+ if rows.Next() {
-+ t.Fatal("unexpected row")
-+ }
-+ if rows.Err() != nil {
-+ t.Fatal(rows.Err())
-+ }
-+ if cols[0] != "a" || cols[1] != "bar" {
-+ t.Fatalf("unexpected Columns result %v", cols)
-+ }
-+
-+}
-+
-+func TestEncodeDecode(t *testing.T) {
-+ db := openTestConn(t)
-+ defer db.Close()
-+
-+ q := `
-+ SELECT
-+ E'\\000\\001\\002'::bytea,
-+ 'foobar'::text,
-+ NULL::integer,
-+ '2000-1-1 01:02:03.04-7'::timestamptz,
-+ 0::boolean,
-+ 123,
-+ -321,
-+ 3.14::float8
-+ WHERE
-+ E'\\000\\001\\002'::bytea = $1
-+ AND 'foobar'::text = $2
-+ AND $3::integer is NULL
-+ `
-+ // AND '2000-1-1 12:00:00.000000-7'::timestamp = $3
-+
-+ exp1 := []byte{0, 1, 2}
-+ exp2 := "foobar"
-+
-+ r, err := db.Query(q, exp1, exp2, nil)
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+ defer r.Close()
-+
-+ if !r.Next() {
-+ if r.Err() != nil {
-+ t.Fatal(r.Err())
-+ }
-+ t.Fatal("expected row")
-+ }
-+
-+ var got1 []byte
-+ var got2 string
-+ var got3 = sql.NullInt64{Valid: true}
-+ var got4 time.Time
-+ var got5, got6, got7, got8 interface{}
-+
-+ err = r.Scan(&got1, &got2, &got3, &got4, &got5, &got6, &got7, &got8)
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+
-+ if !reflect.DeepEqual(exp1, got1) {
-+ t.Errorf("expected %q byte: %q", exp1, got1)
-+ }
-+
-+ if !reflect.DeepEqual(exp2, got2) {
-+ t.Errorf("expected %q byte: %q", exp2, got2)
-+ }
-+
-+ if got3.Valid {
-+ t.Fatal("expected invalid")
-+ }
-+
-+ if got4.Year() != 2000 {
-+ t.Fatal("wrong year")
-+ }
-+
-+ if got5 != false {
-+ t.Fatalf("expected false, got %q", got5)
-+ }
-+
-+ if got6 != int64(123) {
-+ t.Fatalf("expected 123, got %d", got6)
-+ }
-+
-+ if got7 != int64(-321) {
-+ t.Fatalf("expected -321, got %d", got7)
-+ }
-+
-+ if got8 != float64(3.14) {
-+ t.Fatalf("expected 3.14, got %f", got8)
-+ }
-+}
-+
-+func TestNoData(t *testing.T) {
-+ db := openTestConn(t)
-+ defer db.Close()
-+
-+ st, err := db.Prepare("SELECT 1 WHERE true = false")
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+ defer st.Close()
-+
-+ r, err := st.Query()
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+ defer r.Close()
-+
-+ if r.Next() {
-+ if r.Err() != nil {
-+ t.Fatal(r.Err())
-+ }
-+ t.Fatal("unexpected row")
-+ }
-+
-+ _, err = db.Query("SELECT * FROM nonexistenttable WHERE age=$1", 20)
-+ if err == nil {
-+ t.Fatal("Should have raised an error on non existent table")
-+ }
-+
-+ _, err = db.Query("SELECT * FROM nonexistenttable")
-+ if err == nil {
-+ t.Fatal("Should have raised an error on non existent table")
-+ }
-+}
-+
-+func TestErrorDuringStartup(t *testing.T) {
-+ // Don't use the normal connection setup, this is intended to
-+ // blow up in the startup packet from a non-existent user.
-+ db, err := openTestConnConninfo("user=thisuserreallydoesntexist")
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+ defer db.Close()
-+
-+ _, err = db.Begin()
-+ if err == nil {
-+ t.Fatal("expected error")
-+ }
-+
-+ e, ok := err.(*Error)
-+ if !ok {
-+ t.Fatalf("expected Error, got %#v", err)
-+ } else if e.Code.Name() != "invalid_authorization_specification" && e.Code.Name() != "invalid_password" {
-+ t.Fatalf("expected invalid_authorization_specification or invalid_password, got %s (%+v)", e.Code.Name(), err)
-+ }
-+}
-+
-+type testConn struct {
-+ closed bool
-+ net.Conn
-+}
-+
-+func (c *testConn) Close() error {
-+ c.closed = true
-+ return c.Conn.Close()
-+}
-+
-+type testDialer struct {
-+ conns []*testConn
-+}
-+
-+func (d *testDialer) Dial(ntw, addr string) (net.Conn, error) {
-+ c, err := net.Dial(ntw, addr)
-+ if err != nil {
-+ return nil, err
-+ }
-+ tc := &testConn{Conn: c}
-+ d.conns = append(d.conns, tc)
-+ return tc, nil
-+}
-+
-+func (d *testDialer) DialTimeout(ntw, addr string, timeout time.Duration) (net.Conn, error) {
-+ c, err := net.DialTimeout(ntw, addr, timeout)
-+ if err != nil {
-+ return nil, err
-+ }
-+ tc := &testConn{Conn: c}
-+ d.conns = append(d.conns, tc)
-+ return tc, nil
-+}
-+
-+func TestErrorDuringStartupClosesConn(t *testing.T) {
-+ // Don't use the normal connection setup, this is intended to
-+ // blow up in the startup packet from a non-existent user.
-+ var d testDialer
-+ c, err := DialOpen(&d, testConninfo("user=thisuserreallydoesntexist"))
-+ if err == nil {
-+ c.Close()
-+ t.Fatal("expected dial error")
-+ }
-+ if len(d.conns) != 1 {
-+ t.Fatalf("got len(d.conns) = %d, want = %d", len(d.conns), 1)
-+ }
-+ if !d.conns[0].closed {
-+ t.Error("connection leaked")
-+ }
-+}
-+
-+func TestBadConn(t *testing.T) {
-+ var err error
-+
-+ cn := conn{}
-+ func() {
-+ defer cn.errRecover(&err)
-+ panic(io.EOF)
-+ }()
-+ if err != driver.ErrBadConn {
-+ t.Fatalf("expected driver.ErrBadConn, got: %#v", err)
-+ }
-+ if !cn.bad {
-+ t.Fatalf("expected cn.bad")
-+ }
-+
-+ cn = conn{}
-+ func() {
-+ defer cn.errRecover(&err)
-+ e := &Error{Severity: Efatal}
-+ panic(e)
-+ }()
-+ if err != driver.ErrBadConn {
-+ t.Fatalf("expected driver.ErrBadConn, got: %#v", err)
-+ }
-+ if !cn.bad {
-+ t.Fatalf("expected cn.bad")
-+ }
-+}
-+
-+// TestCloseBadConn tests that the underlying connection can be closed with
-+// Close after an error.
-+func TestCloseBadConn(t *testing.T) {
-+ host := os.Getenv("PGHOST")
-+ if host == "" {
-+ host = "localhost"
-+ }
-+ port := os.Getenv("PGPORT")
-+ if port == "" {
-+ port = "5432"
-+ }
-+ nc, err := net.Dial("tcp", host+":"+port)
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+ cn := conn{c: nc}
-+ func() {
-+ defer cn.errRecover(&err)
-+ panic(io.EOF)
-+ }()
-+ // Verify we can write before closing.
-+ if _, err := nc.Write(nil); err != nil {
-+ t.Fatal(err)
-+ }
-+ // First close should close the connection.
-+ if err := cn.Close(); err != nil {
-+ t.Fatal(err)
-+ }
-+
-+ // During the Go 1.9 cycle, https://github.com/golang/go/commit/3792db5
-+ // changed this error from
-+ //
-+ // net.errClosing = errors.New("use of closed network connection")
-+ //
-+ // to
-+ //
-+ // internal/poll.ErrClosing = errors.New("use of closed file or network connection")
-+ const errClosing = "use of closed"
-+
-+ // Verify write after closing fails.
-+ if _, err := nc.Write(nil); err == nil {
-+ t.Fatal("expected error")
-+ } else if !strings.Contains(err.Error(), errClosing) {
-+ t.Fatalf("expected %s error, got %s", errClosing, err)
-+ }
-+ // Verify second close fails.
-+ if err := cn.Close(); err == nil {
-+ t.Fatal("expected error")
-+ } else if !strings.Contains(err.Error(), errClosing) {
-+ t.Fatalf("expected %s error, got %s", errClosing, err)
-+ }
-+}
-+
-+func TestErrorOnExec(t *testing.T) {
-+ db := openTestConn(t)
-+ defer db.Close()
-+
-+ txn, err := db.Begin()
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+ defer txn.Rollback()
-+
-+ _, err = txn.Exec("CREATE TEMPORARY TABLE foo(f1 int PRIMARY KEY)")
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+
-+ _, err = txn.Exec("INSERT INTO foo VALUES (0), (0)")
-+ if err == nil {
-+ t.Fatal("Should have raised error")
-+ }
-+
-+ e, ok := err.(*Error)
-+ if !ok {
-+ t.Fatalf("expected Error, got %#v", err)
-+ } else if e.Code.Name() != "unique_violation" {
-+ t.Fatalf("expected unique_violation, got %s (%+v)", e.Code.Name(), err)
-+ }
-+}
-+
-+func TestErrorOnQuery(t *testing.T) {
-+ db := openTestConn(t)
-+ defer db.Close()
-+
-+ txn, err := db.Begin()
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+ defer txn.Rollback()
-+
-+ _, err = txn.Exec("CREATE TEMPORARY TABLE foo(f1 int PRIMARY KEY)")
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+
-+ _, err = txn.Query("INSERT INTO foo VALUES (0), (0)")
-+ if err == nil {
-+ t.Fatal("Should have raised error")
-+ }
-+
-+ e, ok := err.(*Error)
-+ if !ok {
-+ t.Fatalf("expected Error, got %#v", err)
-+ } else if e.Code.Name() != "unique_violation" {
-+ t.Fatalf("expected unique_violation, got %s (%+v)", e.Code.Name(), err)
-+ }
-+}
-+
-+func TestErrorOnQueryRowSimpleQuery(t *testing.T) {
-+ db := openTestConn(t)
-+ defer db.Close()
-+
-+ txn, err := db.Begin()
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+ defer txn.Rollback()
-+
-+ _, err = txn.Exec("CREATE TEMPORARY TABLE foo(f1 int PRIMARY KEY)")
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+
-+ var v int
-+ err = txn.QueryRow("INSERT INTO foo VALUES (0), (0)").Scan(&v)
-+ if err == nil {
-+ t.Fatal("Should have raised error")
-+ }
-+
-+ e, ok := err.(*Error)
-+ if !ok {
-+ t.Fatalf("expected Error, got %#v", err)
-+ } else if e.Code.Name() != "unique_violation" {
-+ t.Fatalf("expected unique_violation, got %s (%+v)", e.Code.Name(), err)
-+ }
-+}
-+
-+// Test the QueryRow bug workarounds in stmt.exec() and simpleQuery()
-+func TestQueryRowBugWorkaround(t *testing.T) {
-+ db := openTestConn(t)
-+ defer db.Close()
-+
-+ // stmt.exec()
-+ _, err := db.Exec("CREATE TEMP TABLE notnulltemp (a varchar(10) not null)")
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+
-+ var a string
-+ err = db.QueryRow("INSERT INTO notnulltemp(a) values($1) RETURNING a", nil).Scan(&a)
-+ if err == sql.ErrNoRows {
-+ t.Fatalf("expected constraint violation error; got: %v", err)
-+ }
-+ pge, ok := err.(*Error)
-+ if !ok {
-+ t.Fatalf("expected *Error; got: %#v", err)
-+ }
-+ if pge.Code.Name() != "not_null_violation" {
-+ t.Fatalf("expected not_null_violation; got: %s (%+v)", pge.Code.Name(), err)
-+ }
-+
-+ // Test workaround in simpleQuery()
-+ tx, err := db.Begin()
-+ if err != nil {
-+ t.Fatalf("unexpected error %s in Begin", err)
-+ }
-+ defer tx.Rollback()
-+
-+ _, err = tx.Exec("SET LOCAL check_function_bodies TO FALSE")
-+ if err != nil {
-+ t.Fatalf("could not disable check_function_bodies: %s", err)
-+ }
-+ _, err = tx.Exec(`
-+CREATE OR REPLACE FUNCTION bad_function()
-+RETURNS integer
-+-- hack to prevent the function from being inlined
-+SET check_function_bodies TO TRUE
-+AS $$
-+ SELECT text 'bad'
-+$$ LANGUAGE sql`)
-+ if err != nil {
-+ t.Fatalf("could not create function: %s", err)
-+ }
-+
-+ err = tx.QueryRow("SELECT * FROM bad_function()").Scan(&a)
-+ if err == nil {
-+ t.Fatalf("expected error")
-+ }
-+ pge, ok = err.(*Error)
-+ if !ok {
-+ t.Fatalf("expected *Error; got: %#v", err)
-+ }
-+ if pge.Code.Name() != "invalid_function_definition" {
-+ t.Fatalf("expected invalid_function_definition; got: %s (%+v)", pge.Code.Name(), err)
-+ }
-+
-+ err = tx.Rollback()
-+ if err != nil {
-+ t.Fatalf("unexpected error %s in Rollback", err)
-+ }
-+
-+ // Also test that simpleQuery()'s workaround works when the query fails
-+ // after a row has been received.
-+ rows, err := db.Query(`
-+select
-+ (select generate_series(1, ss.i))
-+from (select gs.i
-+ from generate_series(1, 2) gs(i)
-+ order by gs.i limit 2) ss`)
-+ if err != nil {
-+ t.Fatalf("query failed: %s", err)
-+ }
-+ if !rows.Next() {
-+ t.Fatalf("expected at least one result row; got %s", rows.Err())
-+ }
-+ var i int
-+ err = rows.Scan(&i)
-+ if err != nil {
-+ t.Fatalf("rows.Scan() failed: %s", err)
-+ }
-+ if i != 1 {
-+ t.Fatalf("unexpected value for i: %d", i)
-+ }
-+ if rows.Next() {
-+ t.Fatalf("unexpected row")
-+ }
-+ pge, ok = rows.Err().(*Error)
-+ if !ok {
-+ t.Fatalf("expected *Error; got: %#v", err)
-+ }
-+ if pge.Code.Name() != "cardinality_violation" {
-+ t.Fatalf("expected cardinality_violation; got: %s (%+v)", pge.Code.Name(), rows.Err())
-+ }
-+}
-+
-+func TestSimpleQuery(t *testing.T) {
-+ db := openTestConn(t)
-+ defer db.Close()
-+
-+ r, err := db.Query("select 1")
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+ defer r.Close()
-+
-+ if !r.Next() {
-+ t.Fatal("expected row")
-+ }
-+}
-+
-+func TestBindError(t *testing.T) {
-+ db := openTestConn(t)
-+ defer db.Close()
-+
-+ _, err := db.Exec("create temp table test (i integer)")
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+
-+ _, err = db.Query("select * from test where i=$1", "hhh")
-+ if err == nil {
-+ t.Fatal("expected an error")
-+ }
-+
-+ // Should not get error here
-+ r, err := db.Query("select * from test where i=$1", 1)
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+ defer r.Close()
-+}
-+
-+func TestParseErrorInExtendedQuery(t *testing.T) {
-+ db := openTestConn(t)
-+ defer db.Close()
-+
-+ _, err := db.Query("PARSE_ERROR $1", 1)
-+ pqErr, _ := err.(*Error)
-+ // Expecting a syntax error.
-+ if err == nil || pqErr == nil || pqErr.Code != "42601" {
-+ t.Fatalf("expected syntax error, got %s", err)
-+ }
-+
-+ rows, err := db.Query("SELECT 1")
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+ rows.Close()
-+}
-+
-+// TestReturning tests that an INSERT query using the RETURNING clause returns a row.
-+func TestReturning(t *testing.T) {
-+ db := openTestConn(t)
-+ defer db.Close()
-+
-+ _, err := db.Exec("CREATE TEMP TABLE distributors (did integer default 0, dname text)")
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+
-+ rows, err := db.Query("INSERT INTO distributors (did, dname) VALUES (DEFAULT, 'XYZ Widgets') " +
-+ "RETURNING did;")
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+ if !rows.Next() {
-+ t.Fatal("no rows")
-+ }
-+ var did int
-+ err = rows.Scan(&did)
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+ if did != 0 {
-+ t.Fatalf("bad value for did: got %d, want %d", did, 0)
-+ }
-+
-+ if rows.Next() {
-+ t.Fatal("unexpected next row")
-+ }
-+ err = rows.Err()
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+}
-+
-+func TestIssue186(t *testing.T) {
-+ db := openTestConn(t)
-+ defer db.Close()
-+
-+ // Exec() a query which returns results
-+ _, err := db.Exec("VALUES (1), (2), (3)")
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+
-+ _, err = db.Exec("VALUES ($1), ($2), ($3)", 1, 2, 3)
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+
-+ // Query() a query which doesn't return any results
-+ txn, err := db.Begin()
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+ defer txn.Rollback()
-+
-+ rows, err := txn.Query("CREATE TEMP TABLE foo(f1 int)")
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+ if err = rows.Close(); err != nil {
-+ t.Fatal(err)
-+ }
-+
-+ // small trick to get NoData from a parameterized query
-+ _, err = txn.Exec("CREATE RULE nodata AS ON INSERT TO foo DO INSTEAD NOTHING")
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+ rows, err = txn.Query("INSERT INTO foo VALUES ($1)", 1)
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+ if err = rows.Close(); err != nil {
-+ t.Fatal(err)
-+ }
-+}
-+
-+func TestIssue196(t *testing.T) {
-+ db := openTestConn(t)
-+ defer db.Close()
-+
-+ row := db.QueryRow("SELECT float4 '0.10000122' = $1, float8 '35.03554004971999' = $2",
-+ float32(0.10000122), float64(35.03554004971999))
-+
-+ var float4match, float8match bool
-+ err := row.Scan(&float4match, &float8match)
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+ if !float4match {
-+ t.Errorf("Expected float4 fidelity to be maintained; got no match")
-+ }
-+ if !float8match {
-+ t.Errorf("Expected float8 fidelity to be maintained; got no match")
-+ }
-+}
-+
-+// Test that any CommandComplete messages sent before the query results are
-+// ignored.
-+func TestIssue282(t *testing.T) {
-+ db := openTestConn(t)
-+ defer db.Close()
-+
-+ var searchPath string
-+ err := db.QueryRow(`
-+ SET LOCAL search_path TO pg_catalog;
-+ SET LOCAL search_path TO pg_catalog;
-+ SHOW search_path`).Scan(&searchPath)
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+ if searchPath != "pg_catalog" {
-+ t.Fatalf("unexpected search_path %s", searchPath)
-+ }
-+}
-+
-+func TestReadFloatPrecision(t *testing.T) {
-+ db := openTestConn(t)
-+ defer db.Close()
-+
-+ row := db.QueryRow("SELECT float4 '0.10000122', float8 '35.03554004971999', float4 '1.2'")
-+ var float4val float32
-+ var float8val float64
-+ var float4val2 float64
-+ err := row.Scan(&float4val, &float8val, &float4val2)
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+ if float4val != float32(0.10000122) {
-+ t.Errorf("Expected float4 fidelity to be maintained; got no match")
-+ }
-+ if float8val != float64(35.03554004971999) {
-+ t.Errorf("Expected float8 fidelity to be maintained; got no match")
-+ }
-+ if float4val2 != float64(1.2) {
-+ t.Errorf("Expected float4 fidelity into a float64 to be maintained; got no match")
-+ }
-+}
-+
-+func TestXactMultiStmt(t *testing.T) {
-+ // minified test case based on bug reports from
-+ // pico303@gmail.com and rangelspam@gmail.com
-+ t.Skip("Skipping failing test")
-+ db := openTestConn(t)
-+ defer db.Close()
-+
-+ tx, err := db.Begin()
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+ defer tx.Commit()
-+
-+ rows, err := tx.Query("select 1")
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+
-+ if rows.Next() {
-+ var val int32
-+ if err = rows.Scan(&val); err != nil {
-+ t.Fatal(err)
-+ }
-+ } else {
-+ t.Fatal("Expected at least one row in first query in xact")
-+ }
-+
-+ rows2, err := tx.Query("select 2")
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+
-+ if rows2.Next() {
-+ var val2 int32
-+ if err := rows2.Scan(&val2); err != nil {
-+ t.Fatal(err)
-+ }
-+ } else {
-+ t.Fatal("Expected at least one row in second query in xact")
-+ }
-+
-+ if err = rows.Err(); err != nil {
-+ t.Fatal(err)
-+ }
-+
-+ if err = rows2.Err(); err != nil {
-+ t.Fatal(err)
-+ }
-+
-+ if err = tx.Commit(); err != nil {
-+ t.Fatal(err)
-+ }
-+}
-+
-+var envParseTests = []struct {
-+ Expected map[string]string
-+ Env []string
-+}{
-+ {
-+ Env: []string{"PGDATABASE=hello", "PGUSER=goodbye"},
-+ Expected: map[string]string{"dbname": "hello", "user": "goodbye"},
-+ },
-+ {
-+ Env: []string{"PGDATESTYLE=ISO, MDY"},
-+ Expected: map[string]string{"datestyle": "ISO, MDY"},
-+ },
-+ {
-+ Env: []string{"PGCONNECT_TIMEOUT=30"},
-+ Expected: map[string]string{"connect_timeout": "30"},
-+ },
-+}
-+
-+func TestParseEnviron(t *testing.T) {
-+ for i, tt := range envParseTests {
-+ results := parseEnviron(tt.Env)
-+ if !reflect.DeepEqual(tt.Expected, results) {
-+ t.Errorf("%d: Expected: %#v Got: %#v", i, tt.Expected, results)
-+ }
-+ }
-+}
-+
-+func TestParseComplete(t *testing.T) {
-+ tpc := func(commandTag string, command string, affectedRows int64, shouldFail bool) {
-+ defer func() {
-+ if p := recover(); p != nil {
-+ if !shouldFail {
-+ t.Error(p)
-+ }
-+ }
-+ }()
-+ cn := &conn{}
-+ res, c := cn.parseComplete(commandTag)
-+ if c != command {
-+ t.Errorf("Expected %v, got %v", command, c)
-+ }
-+ n, err := res.RowsAffected()
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+ if n != affectedRows {
-+ t.Errorf("Expected %d, got %d", affectedRows, n)
-+ }
-+ }
-+
-+ tpc("ALTER TABLE", "ALTER TABLE", 0, false)
-+ tpc("INSERT 0 1", "INSERT", 1, false)
-+ tpc("UPDATE 100", "UPDATE", 100, false)
-+ tpc("SELECT 100", "SELECT", 100, false)
-+ tpc("FETCH 100", "FETCH", 100, false)
-+ // allow COPY (and others) without row count
-+ tpc("COPY", "COPY", 0, false)
-+ // don't fail on command tags we don't recognize
-+ tpc("UNKNOWNCOMMANDTAG", "UNKNOWNCOMMANDTAG", 0, false)
-+
-+ // failure cases
-+ tpc("INSERT 1", "", 0, true) // missing oid
-+ tpc("UPDATE 0 1", "", 0, true) // too many numbers
-+ tpc("SELECT foo", "", 0, true) // invalid row count
-+}
-+
-+// Test interface conformance.
-+var (
-+ _ driver.ExecerContext = (*conn)(nil)
-+ _ driver.QueryerContext = (*conn)(nil)
-+)
-+
-+func TestNullAfterNonNull(t *testing.T) {
-+ db := openTestConn(t)
-+ defer db.Close()
-+
-+ r, err := db.Query("SELECT 9::integer UNION SELECT NULL::integer")
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+
-+ var n sql.NullInt64
-+
-+ if !r.Next() {
-+ if r.Err() != nil {
-+ t.Fatal(err)
-+ }
-+ t.Fatal("expected row")
-+ }
-+
-+ if err := r.Scan(&n); err != nil {
-+ t.Fatal(err)
-+ }
-+
-+ if n.Int64 != 9 {
-+ t.Fatalf("expected 2, not %d", n.Int64)
-+ }
-+
-+ if !r.Next() {
-+ if r.Err() != nil {
-+ t.Fatal(err)
-+ }
-+ t.Fatal("expected row")
-+ }
-+
-+ if err := r.Scan(&n); err != nil {
-+ t.Fatal(err)
-+ }
-+
-+ if n.Valid {
-+ t.Fatal("expected n to be invalid")
-+ }
-+
-+ if n.Int64 != 0 {
-+ t.Fatalf("expected n to 2, not %d", n.Int64)
-+ }
-+}
-+
-+func Test64BitErrorChecking(t *testing.T) {
-+ defer func() {
-+ if err := recover(); err != nil {
-+ t.Fatal("panic due to 0xFFFFFFFF != -1 " +
-+ "when int is 64 bits")
-+ }
-+ }()
-+
-+ db := openTestConn(t)
-+ defer db.Close()
-+
-+ r, err := db.Query(`SELECT *
-+FROM (VALUES (0::integer, NULL::text), (1, 'test string')) AS t;`)
-+
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+
-+ defer r.Close()
-+
-+ for r.Next() {
-+ }
-+}
-+
-+func TestCommit(t *testing.T) {
-+ db := openTestConn(t)
-+ defer db.Close()
-+
-+ _, err := db.Exec("CREATE TEMP TABLE temp (a int)")
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+ sqlInsert := "INSERT INTO temp VALUES (1)"
-+ sqlSelect := "SELECT * FROM temp"
-+ tx, err := db.Begin()
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+ _, err = tx.Exec(sqlInsert)
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+ err = tx.Commit()
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+ var i int
-+ err = db.QueryRow(sqlSelect).Scan(&i)
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+ if i != 1 {
-+ t.Fatalf("expected 1, got %d", i)
-+ }
-+}
-+
-+func TestErrorClass(t *testing.T) {
-+ db := openTestConn(t)
-+ defer db.Close()
-+
-+ _, err := db.Query("SELECT int 'notint'")
-+ if err == nil {
-+ t.Fatal("expected error")
-+ }
-+ pge, ok := err.(*Error)
-+ if !ok {
-+ t.Fatalf("expected *pq.Error, got %#+v", err)
-+ }
-+ if pge.Code.Class() != "22" {
-+ t.Fatalf("expected class 28, got %v", pge.Code.Class())
-+ }
-+ if pge.Code.Class().Name() != "data_exception" {
-+ t.Fatalf("expected data_exception, got %v", pge.Code.Class().Name())
-+ }
-+}
-+
-+func TestParseOpts(t *testing.T) {
-+ tests := []struct {
-+ in string
-+ expected values
-+ valid bool
-+ }{
-+ {"dbname=hello user=goodbye", values{"dbname": "hello", "user": "goodbye"}, true},
-+ {"dbname=hello user=goodbye ", values{"dbname": "hello", "user": "goodbye"}, true},
-+ {"dbname = hello user=goodbye", values{"dbname": "hello", "user": "goodbye"}, true},
-+ {"dbname=hello user =goodbye", values{"dbname": "hello", "user": "goodbye"}, true},
-+ {"dbname=hello user= goodbye", values{"dbname": "hello", "user": "goodbye"}, true},
-+ {"host=localhost password='correct horse battery staple'", values{"host": "localhost", "password": "correct horse battery staple"}, true},
-+ {"dbname=データベース password=パスワード", values{"dbname": "データベース", "password": "パスワード"}, true},
-+ {"dbname=hello user=''", values{"dbname": "hello", "user": ""}, true},
-+ {"user='' dbname=hello", values{"dbname": "hello", "user": ""}, true},
-+ // The last option value is an empty string if there's no non-whitespace after its =
-+ {"dbname=hello user= ", values{"dbname": "hello", "user": ""}, true},
-+
-+ // The parser ignores spaces after = and interprets the next set of non-whitespace characters as the value.
-+ {"user= password=foo", values{"user": "password=foo"}, true},
-+
-+ // Backslash escapes next char
-+ {`user=a\ \'\\b`, values{"user": `a '\b`}, true},
-+ {`user='a \'b'`, values{"user": `a 'b`}, true},
-+
-+ // Incomplete escape
-+ {`user=x\`, values{}, false},
-+
-+ // No '=' after the key
-+ {"postgre://marko@internet", values{}, false},
-+ {"dbname user=goodbye", values{}, false},
-+ {"user=foo blah", values{}, false},
-+ {"user=foo blah ", values{}, false},
-+
-+ // Unterminated quoted value
-+ {"dbname=hello user='unterminated", values{}, false},
-+ }
-+
-+ for _, test := range tests {
-+ o := make(values)
-+ err := parseOpts(test.in, o)
-+
-+ switch {
-+ case err != nil && test.valid:
-+ t.Errorf("%q got unexpected error: %s", test.in, err)
-+ case err == nil && test.valid && !reflect.DeepEqual(test.expected, o):
-+ t.Errorf("%q got: %#v want: %#v", test.in, o, test.expected)
-+ case err == nil && !test.valid:
-+ t.Errorf("%q expected an error", test.in)
-+ }
-+ }
-+}
-+
-+func TestRuntimeParameters(t *testing.T) {
-+ tests := []struct {
-+ conninfo string
-+ param string
-+ expected string
-+ success bool
-+ }{
-+ // invalid parameter
-+ {"DOESNOTEXIST=foo", "", "", false},
-+ // we can only work with a specific value for these two
-+ {"client_encoding=SQL_ASCII", "", "", false},
-+ {"datestyle='ISO, YDM'", "", "", false},
-+ // "options" should work exactly as it does in libpq
-+ {"options='-c search_path=pqgotest'", "search_path", "pqgotest", true},
-+ // pq should override client_encoding in this case
-+ {"options='-c client_encoding=SQL_ASCII'", "client_encoding", "UTF8", true},
-+ // allow client_encoding to be set explicitly
-+ {"client_encoding=UTF8", "client_encoding", "UTF8", true},
-+ // test a runtime parameter not supported by libpq
-+ {"work_mem='139kB'", "work_mem", "139kB", true},
-+ // test fallback_application_name
-+ {"application_name=foo fallback_application_name=bar", "application_name", "foo", true},
-+ {"application_name='' fallback_application_name=bar", "application_name", "", true},
-+ {"fallback_application_name=bar", "application_name", "bar", true},
-+ }
-+
-+ for _, test := range tests {
-+ db, err := openTestConnConninfo(test.conninfo)
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+
-+ // application_name didn't exist before 9.0
-+ if test.param == "application_name" && getServerVersion(t, db) < 90000 {
-+ db.Close()
-+ continue
-+ }
-+
-+ tryGetParameterValue := func() (value string, success bool) {
-+ defer db.Close()
-+ row := db.QueryRow("SELECT current_setting($1)", test.param)
-+ err = row.Scan(&value)
-+ if err != nil {
-+ return "", false
-+ }
-+ return value, true
-+ }
-+
-+ value, success := tryGetParameterValue()
-+ if success != test.success && !test.success {
-+ t.Fatalf("%v: unexpected error: %v", test.conninfo, err)
-+ }
-+ if success != test.success {
-+ t.Fatalf("unexpected outcome %v (was expecting %v) for conninfo \"%s\"",
-+ success, test.success, test.conninfo)
-+ }
-+ if value != test.expected {
-+ t.Fatalf("bad value for %s: got %s, want %s with conninfo \"%s\"",
-+ test.param, value, test.expected, test.conninfo)
-+ }
-+ }
-+}
-+
-+func TestIsUTF8(t *testing.T) {
-+ var cases = []struct {
-+ name string
-+ want bool
-+ }{
-+ {"unicode", true},
-+ {"utf-8", true},
-+ {"utf_8", true},
-+ {"UTF-8", true},
-+ {"UTF8", true},
-+ {"utf8", true},
-+ {"u n ic_ode", true},
-+ {"ut_f%8", true},
-+ {"ubf8", false},
-+ {"punycode", false},
-+ }
-+
-+ for _, test := range cases {
-+ if g := isUTF8(test.name); g != test.want {
-+ t.Errorf("isUTF8(%q) = %v want %v", test.name, g, test.want)
-+ }
-+ }
-+}
-+
-+func TestQuoteIdentifier(t *testing.T) {
-+ var cases = []struct {
-+ input string
-+ want string
-+ }{
-+ {`foo`, `"foo"`},
-+ {`foo bar baz`, `"foo bar baz"`},
-+ {`foo"bar`, `"foo""bar"`},
-+ {"foo\x00bar", `"foo"`},
-+ {"\x00foo", `""`},
-+ }
-+
-+ for _, test := range cases {
-+ got := QuoteIdentifier(test.input)
-+ if got != test.want {
-+ t.Errorf("QuoteIdentifier(%q) = %v want %v", test.input, got, test.want)
-+ }
-+ }
-+}
-+
-+func TestQuoteLiteral(t *testing.T) {
-+ var cases = []struct {
-+ input string
-+ want string
-+ }{
-+ {`foo`, `'foo'`},
-+ {`foo bar baz`, `'foo bar baz'`},
-+ {`foo'bar`, `'foo''bar'`},
-+ {`foo\bar`, ` E'foo\\bar'`},
-+ {`foo\ba'r`, ` E'foo\\ba''r'`},
-+ {`foo"bar`, `'foo"bar'`},
-+ {`foo\x00bar`, ` E'foo\\x00bar'`},
-+ {`\x00foo`, ` E'\\x00foo'`},
-+ {`'`, `''''`},
-+ {`''`, `''''''`},
-+ {`\`, ` E'\\'`},
-+ {`'abc'; DROP TABLE users;`, `'''abc''; DROP TABLE users;'`},
-+ {`\'`, ` E'\\'''`},
-+ {`E'\''`, ` E'E''\\'''''`},
-+ {`e'\''`, ` E'e''\\'''''`},
-+ {`E'\'abc\'; DROP TABLE users;'`, ` E'E''\\''abc\\''; DROP TABLE users;'''`},
-+ {`e'\'abc\'; DROP TABLE users;'`, ` E'e''\\''abc\\''; DROP TABLE users;'''`},
-+ }
-+
-+ for _, test := range cases {
-+ got := QuoteLiteral(test.input)
-+ if got != test.want {
-+ t.Errorf("QuoteLiteral(%q) = %v want %v", test.input, got, test.want)
-+ }
-+ }
-+}
-+
-+func TestRowsResultTag(t *testing.T) {
-+ type ResultTag interface {
-+ Result() driver.Result
-+ Tag() string
-+ }
-+
-+ tests := []struct {
-+ query string
-+ tag string
-+ ra int64
-+ }{
-+ {
-+ query: "CREATE TEMP TABLE temp (a int)",
-+ tag: "CREATE TABLE",
-+ },
-+ {
-+ query: "INSERT INTO temp VALUES (1), (2)",
-+ tag: "INSERT",
-+ ra: 2,
-+ },
-+ {
-+ query: "SELECT 1",
-+ },
-+ // A SELECT anywhere should take precedent.
-+ {
-+ query: "SELECT 1; INSERT INTO temp VALUES (1), (2)",
-+ },
-+ {
-+ query: "INSERT INTO temp VALUES (1), (2); SELECT 1",
-+ },
-+ // Multiple statements that don't return rows should return the last tag.
-+ {
-+ query: "CREATE TEMP TABLE t (a int); DROP TABLE t",
-+ tag: "DROP TABLE",
-+ },
-+ // Ensure a rows-returning query in any position among various tags-returing
-+ // statements will prefer the rows.
-+ {
-+ query: "SELECT 1; CREATE TEMP TABLE t (a int); DROP TABLE t",
-+ },
-+ {
-+ query: "CREATE TEMP TABLE t (a int); SELECT 1; DROP TABLE t",
-+ },
-+ {
-+ query: "CREATE TEMP TABLE t (a int); DROP TABLE t; SELECT 1",
-+ },
-+ // Verify that an no-results query doesn't set the tag.
-+ {
-+ query: "CREATE TEMP TABLE t (a int); SELECT 1 WHERE FALSE; DROP TABLE t;",
-+ },
-+ }
-+
-+ // If this is the only test run, this will correct the connection string.
-+ openTestConn(t).Close()
-+
-+ conn, err := Open("")
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+ defer conn.Close()
-+ q := conn.(driver.QueryerContext)
-+
-+ for _, test := range tests {
-+ if rows, err := q.QueryContext(context.Background(), test.query, nil); err != nil {
-+ t.Fatalf("%s: %s", test.query, err)
-+ } else {
-+ r := rows.(ResultTag)
-+ if tag := r.Tag(); tag != test.tag {
-+ t.Fatalf("%s: unexpected tag %q", test.query, tag)
-+ }
-+ res := r.Result()
-+ if ra, _ := res.RowsAffected(); ra != test.ra {
-+ t.Fatalf("%s: unexpected rows affected: %d", test.query, ra)
-+ }
-+ rows.Close()
-+ }
-+ }
-+}
-+
-+// TestQuickClose tests that closing a query early allows a subsequent query to work.
-+func TestQuickClose(t *testing.T) {
-+ db := openTestConn(t)
-+ defer db.Close()
-+
-+ tx, err := db.Begin()
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+ rows, err := tx.Query("SELECT 1; SELECT 2;")
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+ if err := rows.Close(); err != nil {
-+ t.Fatal(err)
-+ }
-+
-+ var id int
-+ if err := tx.QueryRow("SELECT 3").Scan(&id); err != nil {
-+ t.Fatal(err)
-+ }
-+ if id != 3 {
-+ t.Fatalf("unexpected %d", id)
-+ }
-+ if err := tx.Commit(); err != nil {
-+ t.Fatal(err)
-+ }
-+}
-+
-+func TestMultipleResult(t *testing.T) {
-+ db := openTestConn(t)
-+ defer db.Close()
-+
-+ rows, err := db.Query(`
-+ begin;
-+ select * from information_schema.tables limit 1;
-+ select * from information_schema.columns limit 2;
-+ commit;
-+ `)
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+ type set struct {
-+ cols []string
-+ rowCount int
-+ }
-+ buf := []*set{}
-+ for {
-+ cols, err := rows.Columns()
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+ s := &set{
-+ cols: cols,
-+ }
-+ buf = append(buf, s)
-+
-+ for rows.Next() {
-+ s.rowCount++
-+ }
-+ if !rows.NextResultSet() {
-+ break
-+ }
-+ }
-+ if len(buf) != 2 {
-+ t.Fatalf("got %d sets, expected 2", len(buf))
-+ }
-+ if len(buf[0].cols) == len(buf[1].cols) || len(buf[1].cols) == 0 {
-+ t.Fatal("invalid cols size, expected different column count and greater then zero")
-+ }
-+ if buf[0].rowCount != 1 || buf[1].rowCount != 2 {
-+ t.Fatal("incorrect number of rows returned")
-+ }
-+}
-+
-+func TestCopyInStmtAffectedRows(t *testing.T) {
-+ db := openTestConn(t)
-+ defer db.Close()
-+
-+ _, err := db.Exec("CREATE TEMP TABLE temp (a int)")
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+
-+ txn, err := db.BeginTx(context.TODO(), nil)
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+
-+ copyStmt, err := txn.Prepare(CopyIn("temp", "a"))
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+
-+ res, err := copyStmt.Exec()
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+
-+ res.RowsAffected()
-+ res.LastInsertId()
-+}
-diff --git a/vendor/github.com/lib/pq/connector.go b/vendor/github.com/lib/pq/connector.go
-new file mode 100644
-index 00000000000..2f8ced6737d
---- /dev/null
-+++ b/vendor/github.com/lib/pq/connector.go
-@@ -0,0 +1,110 @@
-+package pq
-+
-+import (
-+ "context"
-+ "database/sql/driver"
-+ "errors"
-+ "fmt"
-+ "os"
-+ "strings"
-+)
-+
-+// Connector represents a fixed configuration for the pq driver with a given
-+// name. Connector satisfies the database/sql/driver Connector interface and
-+// can be used to create any number of DB Conn's via the database/sql OpenDB
-+// function.
-+//
-+// See https://golang.org/pkg/database/sql/driver/#Connector.
-+// See https://golang.org/pkg/database/sql/#OpenDB.
-+type Connector struct {
-+ opts values
-+ dialer Dialer
-+}
-+
-+// Connect returns a connection to the database using the fixed configuration
-+// of this Connector. Context is not used.
-+func (c *Connector) Connect(ctx context.Context) (driver.Conn, error) {
-+ return c.open(ctx)
-+}
-+
-+// Driver returnst the underlying driver of this Connector.
-+func (c *Connector) Driver() driver.Driver {
-+ return &Driver{}
-+}
-+
-+// NewConnector returns a connector for the pq driver in a fixed configuration
-+// with the given dsn. The returned connector can be used to create any number
-+// of equivalent Conn's. The returned connector is intended to be used with
-+// database/sql.OpenDB.
-+//
-+// See https://golang.org/pkg/database/sql/driver/#Connector.
-+// See https://golang.org/pkg/database/sql/#OpenDB.
-+func NewConnector(dsn string) (*Connector, error) {
-+ var err error
-+ o := make(values)
-+
-+ // A number of defaults are applied here, in this order:
-+ //
-+ // * Very low precedence defaults applied in every situation
-+ // * Environment variables
-+ // * Explicitly passed connection information
-+ o["host"] = "localhost"
-+ o["port"] = "5432"
-+ // N.B.: Extra float digits should be set to 3, but that breaks
-+ // Postgres 8.4 and older, where the max is 2.
-+ o["extra_float_digits"] = "2"
-+ for k, v := range parseEnviron(os.Environ()) {
-+ o[k] = v
-+ }
-+
-+ if strings.HasPrefix(dsn, "postgres://") || strings.HasPrefix(dsn, "postgresql://") {
-+ dsn, err = ParseURL(dsn)
-+ if err != nil {
-+ return nil, err
-+ }
-+ }
-+
-+ if err := parseOpts(dsn, o); err != nil {
-+ return nil, err
-+ }
-+
-+ // Use the "fallback" application name if necessary
-+ if fallback, ok := o["fallback_application_name"]; ok {
-+ if _, ok := o["application_name"]; !ok {
-+ o["application_name"] = fallback
-+ }
-+ }
-+
-+ // We can't work with any client_encoding other than UTF-8 currently.
-+ // However, we have historically allowed the user to set it to UTF-8
-+ // explicitly, and there's no reason to break such programs, so allow that.
-+ // Note that the "options" setting could also set client_encoding, but
-+ // parsing its value is not worth it. Instead, we always explicitly send
-+ // client_encoding as a separate run-time parameter, which should override
-+ // anything set in options.
-+ if enc, ok := o["client_encoding"]; ok && !isUTF8(enc) {
-+ return nil, errors.New("client_encoding must be absent or 'UTF8'")
-+ }
-+ o["client_encoding"] = "UTF8"
-+ // DateStyle needs a similar treatment.
-+ if datestyle, ok := o["datestyle"]; ok {
-+ if datestyle != "ISO, MDY" {
-+ return nil, fmt.Errorf("setting datestyle must be absent or %v; got %v", "ISO, MDY", datestyle)
-+ }
-+ } else {
-+ o["datestyle"] = "ISO, MDY"
-+ }
-+
-+ // If a user is not provided by any other means, the last
-+ // resort is to use the current operating system provided user
-+ // name.
-+ if _, ok := o["user"]; !ok {
-+ u, err := userCurrent()
-+ if err != nil {
-+ return nil, err
-+ }
-+ o["user"] = u
-+ }
-+
-+ return &Connector{opts: o, dialer: defaultDialer{}}, nil
-+}
-diff --git a/vendor/github.com/lib/pq/connector_example_test.go b/vendor/github.com/lib/pq/connector_example_test.go
-new file mode 100644
-index 00000000000..9401fa0d400
---- /dev/null
-+++ b/vendor/github.com/lib/pq/connector_example_test.go
-@@ -0,0 +1,29 @@
-+// +build go1.10
-+
-+package pq_test
-+
-+import (
-+ "database/sql"
-+ "fmt"
-+
-+ "github.com/lib/pq"
-+)
-+
-+func ExampleNewConnector() {
-+ name := ""
-+ connector, err := pq.NewConnector(name)
-+ if err != nil {
-+ fmt.Println(err)
-+ return
-+ }
-+ db := sql.OpenDB(connector)
-+ defer db.Close()
-+
-+ // Use the DB
-+ txn, err := db.Begin()
-+ if err != nil {
-+ fmt.Println(err)
-+ return
-+ }
-+ txn.Rollback()
-+}
-diff --git a/vendor/github.com/lib/pq/connector_test.go b/vendor/github.com/lib/pq/connector_test.go
-new file mode 100644
-index 00000000000..3d2c67b0680
---- /dev/null
-+++ b/vendor/github.com/lib/pq/connector_test.go
-@@ -0,0 +1,67 @@
-+// +build go1.10
-+
-+package pq
-+
-+import (
-+ "context"
-+ "database/sql"
-+ "database/sql/driver"
-+ "testing"
-+)
-+
-+func TestNewConnector_WorksWithOpenDB(t *testing.T) {
-+ name := ""
-+ c, err := NewConnector(name)
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+ db := sql.OpenDB(c)
-+ defer db.Close()
-+ // database/sql might not call our Open at all unless we do something with
-+ // the connection
-+ txn, err := db.Begin()
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+ txn.Rollback()
-+}
-+
-+func TestNewConnector_Connect(t *testing.T) {
-+ name := ""
-+ c, err := NewConnector(name)
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+ db, err := c.Connect(context.Background())
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+ defer db.Close()
-+ // database/sql might not call our Open at all unless we do something with
-+ // the connection
-+ txn, err := db.(driver.ConnBeginTx).BeginTx(context.Background(), driver.TxOptions{})
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+ txn.Rollback()
-+}
-+
-+func TestNewConnector_Driver(t *testing.T) {
-+ name := ""
-+ c, err := NewConnector(name)
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+ db, err := c.Driver().Open(name)
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+ defer db.Close()
-+ // database/sql might not call our Open at all unless we do something with
-+ // the connection
-+ txn, err := db.(driver.ConnBeginTx).BeginTx(context.Background(), driver.TxOptions{})
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+ txn.Rollback()
-+}
-diff --git a/vendor/github.com/lib/pq/copy.go b/vendor/github.com/lib/pq/copy.go
-new file mode 100644
-index 00000000000..55378d5b119
---- /dev/null
-+++ b/vendor/github.com/lib/pq/copy.go
-@@ -0,0 +1,282 @@
-+package pq
-+
-+import (
-+ "database/sql/driver"
-+ "encoding/binary"
-+ "errors"
-+ "fmt"
-+ "sync"
-+)
-+
-+var (
-+ errCopyInClosed = errors.New("pq: copyin statement has already been closed")
-+ errBinaryCopyNotSupported = errors.New("pq: only text format supported for COPY")
-+ errCopyToNotSupported = errors.New("pq: COPY TO is not supported")
-+ errCopyNotSupportedOutsideTxn = errors.New("pq: COPY is only allowed inside a transaction")
-+ errCopyInProgress = errors.New("pq: COPY in progress")
-+)
-+
-+// CopyIn creates a COPY FROM statement which can be prepared with
-+// Tx.Prepare(). The target table should be visible in search_path.
-+func CopyIn(table string, columns ...string) string {
-+ stmt := "COPY " + QuoteIdentifier(table) + " ("
-+ for i, col := range columns {
-+ if i != 0 {
-+ stmt += ", "
-+ }
-+ stmt += QuoteIdentifier(col)
-+ }
-+ stmt += ") FROM STDIN"
-+ return stmt
-+}
-+
-+// CopyInSchema creates a COPY FROM statement which can be prepared with
-+// Tx.Prepare().
-+func CopyInSchema(schema, table string, columns ...string) string {
-+ stmt := "COPY " + QuoteIdentifier(schema) + "." + QuoteIdentifier(table) + " ("
-+ for i, col := range columns {
-+ if i != 0 {
-+ stmt += ", "
-+ }
-+ stmt += QuoteIdentifier(col)
-+ }
-+ stmt += ") FROM STDIN"
-+ return stmt
-+}
-+
-+type copyin struct {
-+ cn *conn
-+ buffer []byte
-+ rowData chan []byte
-+ done chan bool
-+
-+ closed bool
-+
-+ sync.Mutex // guards err
-+ err error
-+}
-+
-+const ciBufferSize = 64 * 1024
-+
-+// flush buffer before the buffer is filled up and needs reallocation
-+const ciBufferFlushSize = 63 * 1024
-+
-+func (cn *conn) prepareCopyIn(q string) (_ driver.Stmt, err error) {
-+ if !cn.isInTransaction() {
-+ return nil, errCopyNotSupportedOutsideTxn
-+ }
-+
-+ ci := ©in{
-+ cn: cn,
-+ buffer: make([]byte, 0, ciBufferSize),
-+ rowData: make(chan []byte),
-+ done: make(chan bool, 1),
-+ }
-+ // add CopyData identifier + 4 bytes for message length
-+ ci.buffer = append(ci.buffer, 'd', 0, 0, 0, 0)
-+
-+ b := cn.writeBuf('Q')
-+ b.string(q)
-+ cn.send(b)
-+
-+awaitCopyInResponse:
-+ for {
-+ t, r := cn.recv1()
-+ switch t {
-+ case 'G':
-+ if r.byte() != 0 {
-+ err = errBinaryCopyNotSupported
-+ break awaitCopyInResponse
-+ }
-+ go ci.resploop()
-+ return ci, nil
-+ case 'H':
-+ err = errCopyToNotSupported
-+ break awaitCopyInResponse
-+ case 'E':
-+ err = parseError(r)
-+ case 'Z':
-+ if err == nil {
-+ ci.setBad()
-+ errorf("unexpected ReadyForQuery in response to COPY")
-+ }
-+ cn.processReadyForQuery(r)
-+ return nil, err
-+ default:
-+ ci.setBad()
-+ errorf("unknown response for copy query: %q", t)
-+ }
-+ }
-+
-+ // something went wrong, abort COPY before we return
-+ b = cn.writeBuf('f')
-+ b.string(err.Error())
-+ cn.send(b)
-+
-+ for {
-+ t, r := cn.recv1()
-+ switch t {
-+ case 'c', 'C', 'E':
-+ case 'Z':
-+ // correctly aborted, we're done
-+ cn.processReadyForQuery(r)
-+ return nil, err
-+ default:
-+ ci.setBad()
-+ errorf("unknown response for CopyFail: %q", t)
-+ }
-+ }
-+}
-+
-+func (ci *copyin) flush(buf []byte) {
-+ // set message length (without message identifier)
-+ binary.BigEndian.PutUint32(buf[1:], uint32(len(buf)-1))
-+
-+ _, err := ci.cn.c.Write(buf)
-+ if err != nil {
-+ panic(err)
-+ }
-+}
-+
-+func (ci *copyin) resploop() {
-+ for {
-+ var r readBuf
-+ t, err := ci.cn.recvMessage(&r)
-+ if err != nil {
-+ ci.setBad()
-+ ci.setError(err)
-+ ci.done <- true
-+ return
-+ }
-+ switch t {
-+ case 'C':
-+ // complete
-+ case 'N':
-+ // NoticeResponse
-+ case 'Z':
-+ ci.cn.processReadyForQuery(&r)
-+ ci.done <- true
-+ return
-+ case 'E':
-+ err := parseError(&r)
-+ ci.setError(err)
-+ default:
-+ ci.setBad()
-+ ci.setError(fmt.Errorf("unknown response during CopyIn: %q", t))
-+ ci.done <- true
-+ return
-+ }
-+ }
-+}
-+
-+func (ci *copyin) setBad() {
-+ ci.Lock()
-+ ci.cn.bad = true
-+ ci.Unlock()
-+}
-+
-+func (ci *copyin) isBad() bool {
-+ ci.Lock()
-+ b := ci.cn.bad
-+ ci.Unlock()
-+ return b
-+}
-+
-+func (ci *copyin) isErrorSet() bool {
-+ ci.Lock()
-+ isSet := (ci.err != nil)
-+ ci.Unlock()
-+ return isSet
-+}
-+
-+// setError() sets ci.err if one has not been set already. Caller must not be
-+// holding ci.Mutex.
-+func (ci *copyin) setError(err error) {
-+ ci.Lock()
-+ if ci.err == nil {
-+ ci.err = err
-+ }
-+ ci.Unlock()
-+}
-+
-+func (ci *copyin) NumInput() int {
-+ return -1
-+}
-+
-+func (ci *copyin) Query(v []driver.Value) (r driver.Rows, err error) {
-+ return nil, ErrNotSupported
-+}
-+
-+// Exec inserts values into the COPY stream. The insert is asynchronous
-+// and Exec can return errors from previous Exec calls to the same
-+// COPY stmt.
-+//
-+// You need to call Exec(nil) to sync the COPY stream and to get any
-+// errors from pending data, since Stmt.Close() doesn't return errors
-+// to the user.
-+func (ci *copyin) Exec(v []driver.Value) (r driver.Result, err error) {
-+ if ci.closed {
-+ return nil, errCopyInClosed
-+ }
-+
-+ if ci.isBad() {
-+ return nil, driver.ErrBadConn
-+ }
-+ defer ci.cn.errRecover(&err)
-+
-+ if ci.isErrorSet() {
-+ return nil, ci.err
-+ }
-+
-+ if len(v) == 0 {
-+ return driver.RowsAffected(0), ci.Close()
-+ }
-+
-+ numValues := len(v)
-+ for i, value := range v {
-+ ci.buffer = appendEncodedText(&ci.cn.parameterStatus, ci.buffer, value)
-+ if i < numValues-1 {
-+ ci.buffer = append(ci.buffer, '\t')
-+ }
-+ }
-+
-+ ci.buffer = append(ci.buffer, '\n')
-+
-+ if len(ci.buffer) > ciBufferFlushSize {
-+ ci.flush(ci.buffer)
-+ // reset buffer, keep bytes for message identifier and length
-+ ci.buffer = ci.buffer[:5]
-+ }
-+
-+ return driver.RowsAffected(0), nil
-+}
-+
-+func (ci *copyin) Close() (err error) {
-+ if ci.closed { // Don't do anything, we're already closed
-+ return nil
-+ }
-+ ci.closed = true
-+
-+ if ci.isBad() {
-+ return driver.ErrBadConn
-+ }
-+ defer ci.cn.errRecover(&err)
-+
-+ if len(ci.buffer) > 0 {
-+ ci.flush(ci.buffer)
-+ }
-+ // Avoid touching the scratch buffer as resploop could be using it.
-+ err = ci.cn.sendSimpleMessage('c')
-+ if err != nil {
-+ return err
-+ }
-+
-+ <-ci.done
-+ ci.cn.inCopy = false
-+
-+ if ci.isErrorSet() {
-+ err = ci.err
-+ return err
-+ }
-+ return nil
-+}
-diff --git a/vendor/github.com/lib/pq/copy_test.go b/vendor/github.com/lib/pq/copy_test.go
-new file mode 100644
-index 00000000000..c5e2694b648
---- /dev/null
-+++ b/vendor/github.com/lib/pq/copy_test.go
-@@ -0,0 +1,468 @@
-+package pq
-+
-+import (
-+ "bytes"
-+ "database/sql"
-+ "database/sql/driver"
-+ "net"
-+ "strings"
-+ "testing"
-+)
-+
-+func TestCopyInStmt(t *testing.T) {
-+ stmt := CopyIn("table name")
-+ if stmt != `COPY "table name" () FROM STDIN` {
-+ t.Fatal(stmt)
-+ }
-+
-+ stmt = CopyIn("table name", "column 1", "column 2")
-+ if stmt != `COPY "table name" ("column 1", "column 2") FROM STDIN` {
-+ t.Fatal(stmt)
-+ }
-+
-+ stmt = CopyIn(`table " name """`, `co"lumn""`)
-+ if stmt != `COPY "table "" name """"""" ("co""lumn""""") FROM STDIN` {
-+ t.Fatal(stmt)
-+ }
-+}
-+
-+func TestCopyInSchemaStmt(t *testing.T) {
-+ stmt := CopyInSchema("schema name", "table name")
-+ if stmt != `COPY "schema name"."table name" () FROM STDIN` {
-+ t.Fatal(stmt)
-+ }
-+
-+ stmt = CopyInSchema("schema name", "table name", "column 1", "column 2")
-+ if stmt != `COPY "schema name"."table name" ("column 1", "column 2") FROM STDIN` {
-+ t.Fatal(stmt)
-+ }
-+
-+ stmt = CopyInSchema(`schema " name """`, `table " name """`, `co"lumn""`)
-+ if stmt != `COPY "schema "" name """"""".`+
-+ `"table "" name """"""" ("co""lumn""""") FROM STDIN` {
-+ t.Fatal(stmt)
-+ }
-+}
-+
-+func TestCopyInMultipleValues(t *testing.T) {
-+ db := openTestConn(t)
-+ defer db.Close()
-+
-+ txn, err := db.Begin()
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+ defer txn.Rollback()
-+
-+ _, err = txn.Exec("CREATE TEMP TABLE temp (a int, b varchar)")
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+
-+ stmt, err := txn.Prepare(CopyIn("temp", "a", "b"))
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+
-+ longString := strings.Repeat("#", 500)
-+
-+ for i := 0; i < 500; i++ {
-+ _, err = stmt.Exec(int64(i), longString)
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+ }
-+
-+ _, err = stmt.Exec()
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+
-+ err = stmt.Close()
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+
-+ var num int
-+ err = txn.QueryRow("SELECT COUNT(*) FROM temp").Scan(&num)
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+
-+ if num != 500 {
-+ t.Fatalf("expected 500 items, not %d", num)
-+ }
-+}
-+
-+func TestCopyInRaiseStmtTrigger(t *testing.T) {
-+ db := openTestConn(t)
-+ defer db.Close()
-+
-+ if getServerVersion(t, db) < 90000 {
-+ var exists int
-+ err := db.QueryRow("SELECT 1 FROM pg_language WHERE lanname = 'plpgsql'").Scan(&exists)
-+ if err == sql.ErrNoRows {
-+ t.Skip("language PL/PgSQL does not exist; skipping TestCopyInRaiseStmtTrigger")
-+ } else if err != nil {
-+ t.Fatal(err)
-+ }
-+ }
-+
-+ txn, err := db.Begin()
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+ defer txn.Rollback()
-+
-+ _, err = txn.Exec("CREATE TEMP TABLE temp (a int, b varchar)")
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+
-+ _, err = txn.Exec(`
-+ CREATE OR REPLACE FUNCTION pg_temp.temptest()
-+ RETURNS trigger AS
-+ $BODY$ begin
-+ raise notice 'Hello world';
-+ return new;
-+ end $BODY$
-+ LANGUAGE plpgsql`)
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+
-+ _, err = txn.Exec(`
-+ CREATE TRIGGER temptest_trigger
-+ BEFORE INSERT
-+ ON temp
-+ FOR EACH ROW
-+ EXECUTE PROCEDURE pg_temp.temptest()`)
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+
-+ stmt, err := txn.Prepare(CopyIn("temp", "a", "b"))
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+
-+ longString := strings.Repeat("#", 500)
-+
-+ _, err = stmt.Exec(int64(1), longString)
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+
-+ _, err = stmt.Exec()
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+
-+ err = stmt.Close()
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+
-+ var num int
-+ err = txn.QueryRow("SELECT COUNT(*) FROM temp").Scan(&num)
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+
-+ if num != 1 {
-+ t.Fatalf("expected 1 items, not %d", num)
-+ }
-+}
-+
-+func TestCopyInTypes(t *testing.T) {
-+ db := openTestConn(t)
-+ defer db.Close()
-+
-+ txn, err := db.Begin()
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+ defer txn.Rollback()
-+
-+ _, err = txn.Exec("CREATE TEMP TABLE temp (num INTEGER, text VARCHAR, blob BYTEA, nothing VARCHAR)")
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+
-+ stmt, err := txn.Prepare(CopyIn("temp", "num", "text", "blob", "nothing"))
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+
-+ _, err = stmt.Exec(int64(1234567890), "Héllö\n ☃!\r\t\\", []byte{0, 255, 9, 10, 13}, nil)
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+
-+ _, err = stmt.Exec()
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+
-+ err = stmt.Close()
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+
-+ var num int
-+ var text string
-+ var blob []byte
-+ var nothing sql.NullString
-+
-+ err = txn.QueryRow("SELECT * FROM temp").Scan(&num, &text, &blob, ¬hing)
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+
-+ if num != 1234567890 {
-+ t.Fatal("unexpected result", num)
-+ }
-+ if text != "Héllö\n ☃!\r\t\\" {
-+ t.Fatal("unexpected result", text)
-+ }
-+ if !bytes.Equal(blob, []byte{0, 255, 9, 10, 13}) {
-+ t.Fatal("unexpected result", blob)
-+ }
-+ if nothing.Valid {
-+ t.Fatal("unexpected result", nothing.String)
-+ }
-+}
-+
-+func TestCopyInWrongType(t *testing.T) {
-+ db := openTestConn(t)
-+ defer db.Close()
-+
-+ txn, err := db.Begin()
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+ defer txn.Rollback()
-+
-+ _, err = txn.Exec("CREATE TEMP TABLE temp (num INTEGER)")
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+
-+ stmt, err := txn.Prepare(CopyIn("temp", "num"))
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+ defer stmt.Close()
-+
-+ _, err = stmt.Exec("Héllö\n ☃!\r\t\\")
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+
-+ _, err = stmt.Exec()
-+ if err == nil {
-+ t.Fatal("expected error")
-+ }
-+ if pge := err.(*Error); pge.Code.Name() != "invalid_text_representation" {
-+ t.Fatalf("expected 'invalid input syntax for integer' error, got %s (%+v)", pge.Code.Name(), pge)
-+ }
-+}
-+
-+func TestCopyOutsideOfTxnError(t *testing.T) {
-+ db := openTestConn(t)
-+ defer db.Close()
-+
-+ _, err := db.Prepare(CopyIn("temp", "num"))
-+ if err == nil {
-+ t.Fatal("COPY outside of transaction did not return an error")
-+ }
-+ if err != errCopyNotSupportedOutsideTxn {
-+ t.Fatalf("expected %s, got %s", err, err.Error())
-+ }
-+}
-+
-+func TestCopyInBinaryError(t *testing.T) {
-+ db := openTestConn(t)
-+ defer db.Close()
-+
-+ txn, err := db.Begin()
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+ defer txn.Rollback()
-+
-+ _, err = txn.Exec("CREATE TEMP TABLE temp (num INTEGER)")
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+ _, err = txn.Prepare("COPY temp (num) FROM STDIN WITH binary")
-+ if err != errBinaryCopyNotSupported {
-+ t.Fatalf("expected %s, got %+v", errBinaryCopyNotSupported, err)
-+ }
-+ // check that the protocol is in a valid state
-+ err = txn.Rollback()
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+}
-+
-+func TestCopyFromError(t *testing.T) {
-+ db := openTestConn(t)
-+ defer db.Close()
-+
-+ txn, err := db.Begin()
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+ defer txn.Rollback()
-+
-+ _, err = txn.Exec("CREATE TEMP TABLE temp (num INTEGER)")
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+ _, err = txn.Prepare("COPY temp (num) TO STDOUT")
-+ if err != errCopyToNotSupported {
-+ t.Fatalf("expected %s, got %+v", errCopyToNotSupported, err)
-+ }
-+ // check that the protocol is in a valid state
-+ err = txn.Rollback()
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+}
-+
-+func TestCopySyntaxError(t *testing.T) {
-+ db := openTestConn(t)
-+ defer db.Close()
-+
-+ txn, err := db.Begin()
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+ defer txn.Rollback()
-+
-+ _, err = txn.Prepare("COPY ")
-+ if err == nil {
-+ t.Fatal("expected error")
-+ }
-+ if pge := err.(*Error); pge.Code.Name() != "syntax_error" {
-+ t.Fatalf("expected syntax error, got %s (%+v)", pge.Code.Name(), pge)
-+ }
-+ // check that the protocol is in a valid state
-+ err = txn.Rollback()
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+}
-+
-+// Tests for connection errors in copyin.resploop()
-+func TestCopyRespLoopConnectionError(t *testing.T) {
-+ db := openTestConn(t)
-+ defer db.Close()
-+
-+ txn, err := db.Begin()
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+ defer txn.Rollback()
-+
-+ var pid int
-+ err = txn.QueryRow("SELECT pg_backend_pid()").Scan(&pid)
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+
-+ _, err = txn.Exec("CREATE TEMP TABLE temp (a int)")
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+
-+ stmt, err := txn.Prepare(CopyIn("temp", "a"))
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+ defer stmt.Close()
-+
-+ _, err = db.Exec("SELECT pg_terminate_backend($1)", pid)
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+
-+ if getServerVersion(t, db) < 90500 {
-+ // We have to try and send something over, since postgres before
-+ // version 9.5 won't process SIGTERMs while it's waiting for
-+ // CopyData/CopyEnd messages; see tcop/postgres.c.
-+ _, err = stmt.Exec(1)
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+ }
-+ _, err = stmt.Exec()
-+ if err == nil {
-+ t.Fatalf("expected error")
-+ }
-+ switch pge := err.(type) {
-+ case *Error:
-+ if pge.Code.Name() != "admin_shutdown" {
-+ t.Fatalf("expected admin_shutdown, got %s", pge.Code.Name())
-+ }
-+ case *net.OpError:
-+ // ignore
-+ default:
-+ if err == driver.ErrBadConn {
-+ // likely an EPIPE
-+ } else {
-+ t.Fatalf("unexpected error, got %+#v", err)
-+ }
-+ }
-+
-+ _ = stmt.Close()
-+}
-+
-+func BenchmarkCopyIn(b *testing.B) {
-+ db := openTestConn(b)
-+ defer db.Close()
-+
-+ txn, err := db.Begin()
-+ if err != nil {
-+ b.Fatal(err)
-+ }
-+ defer txn.Rollback()
-+
-+ _, err = txn.Exec("CREATE TEMP TABLE temp (a int, b varchar)")
-+ if err != nil {
-+ b.Fatal(err)
-+ }
-+
-+ stmt, err := txn.Prepare(CopyIn("temp", "a", "b"))
-+ if err != nil {
-+ b.Fatal(err)
-+ }
-+
-+ for i := 0; i < b.N; i++ {
-+ _, err = stmt.Exec(int64(i), "hello world!")
-+ if err != nil {
-+ b.Fatal(err)
-+ }
-+ }
-+
-+ _, err = stmt.Exec()
-+ if err != nil {
-+ b.Fatal(err)
-+ }
-+
-+ err = stmt.Close()
-+ if err != nil {
-+ b.Fatal(err)
-+ }
-+
-+ var num int
-+ err = txn.QueryRow("SELECT COUNT(*) FROM temp").Scan(&num)
-+ if err != nil {
-+ b.Fatal(err)
-+ }
-+
-+ if num != b.N {
-+ b.Fatalf("expected %d items, not %d", b.N, num)
-+ }
-+}
-diff --git a/vendor/github.com/lib/pq/doc.go b/vendor/github.com/lib/pq/doc.go
-new file mode 100644
-index 00000000000..2a60054e2e0
---- /dev/null
-+++ b/vendor/github.com/lib/pq/doc.go
-@@ -0,0 +1,245 @@
-+/*
-+Package pq is a pure Go Postgres driver for the database/sql package.
-+
-+In most cases clients will use the database/sql package instead of
-+using this package directly. For example:
-+
-+ import (
-+ "database/sql"
-+
-+ _ "github.com/lib/pq"
-+ )
-+
-+ func main() {
-+ connStr := "user=pqgotest dbname=pqgotest sslmode=verify-full"
-+ db, err := sql.Open("postgres", connStr)
-+ if err != nil {
-+ log.Fatal(err)
-+ }
-+
-+ age := 21
-+ rows, err := db.Query("SELECT name FROM users WHERE age = $1", age)
-+ …
-+ }
-+
-+You can also connect to a database using a URL. For example:
-+
-+ connStr := "postgres://pqgotest:password@localhost/pqgotest?sslmode=verify-full"
-+ db, err := sql.Open("postgres", connStr)
-+
-+
-+Connection String Parameters
-+
-+
-+Similarly to libpq, when establishing a connection using pq you are expected to
-+supply a connection string containing zero or more parameters.
-+A subset of the connection parameters supported by libpq are also supported by pq.
-+Additionally, pq also lets you specify run-time parameters (such as search_path or work_mem)
-+directly in the connection string. This is different from libpq, which does not allow
-+run-time parameters in the connection string, instead requiring you to supply
-+them in the options parameter.
-+
-+For compatibility with libpq, the following special connection parameters are
-+supported:
-+
-+ * dbname - The name of the database to connect to
-+ * user - The user to sign in as
-+ * password - The user's password
-+ * host - The host to connect to. Values that start with / are for unix
-+ domain sockets. (default is localhost)
-+ * port - The port to bind to. (default is 5432)
-+ * sslmode - Whether or not to use SSL (default is require, this is not
-+ the default for libpq)
-+ * fallback_application_name - An application_name to fall back to if one isn't provided.
-+ * connect_timeout - Maximum wait for connection, in seconds. Zero or
-+ not specified means wait indefinitely.
-+ * sslcert - Cert file location. The file must contain PEM encoded data.
-+ * sslkey - Key file location. The file must contain PEM encoded data.
-+ * sslrootcert - The location of the root certificate file. The file
-+ must contain PEM encoded data.
-+
-+Valid values for sslmode are:
-+
-+ * disable - No SSL
-+ * require - Always SSL (skip verification)
-+ * verify-ca - Always SSL (verify that the certificate presented by the
-+ server was signed by a trusted CA)
-+ * verify-full - Always SSL (verify that the certification presented by
-+ the server was signed by a trusted CA and the server host name
-+ matches the one in the certificate)
-+
-+See http://www.postgresql.org/docs/current/static/libpq-connect.html#LIBPQ-CONNSTRING
-+for more information about connection string parameters.
-+
-+Use single quotes for values that contain whitespace:
-+
-+ "user=pqgotest password='with spaces'"
-+
-+A backslash will escape the next character in values:
-+
-+ "user=space\ man password='it\'s valid'"
-+
-+Note that the connection parameter client_encoding (which sets the
-+text encoding for the connection) may be set but must be "UTF8",
-+matching with the same rules as Postgres. It is an error to provide
-+any other value.
-+
-+In addition to the parameters listed above, any run-time parameter that can be
-+set at backend start time can be set in the connection string. For more
-+information, see
-+http://www.postgresql.org/docs/current/static/runtime-config.html.
-+
-+Most environment variables as specified at http://www.postgresql.org/docs/current/static/libpq-envars.html
-+supported by libpq are also supported by pq. If any of the environment
-+variables not supported by pq are set, pq will panic during connection
-+establishment. Environment variables have a lower precedence than explicitly
-+provided connection parameters.
-+
-+The pgpass mechanism as described in http://www.postgresql.org/docs/current/static/libpq-pgpass.html
-+is supported, but on Windows PGPASSFILE must be specified explicitly.
-+
-+
-+Queries
-+
-+
-+database/sql does not dictate any specific format for parameter
-+markers in query strings, and pq uses the Postgres-native ordinal markers,
-+as shown above. The same marker can be reused for the same parameter:
-+
-+ rows, err := db.Query(`SELECT name FROM users WHERE favorite_fruit = $1
-+ OR age BETWEEN $2 AND $2 + 3`, "orange", 64)
-+
-+pq does not support the LastInsertId() method of the Result type in database/sql.
-+To return the identifier of an INSERT (or UPDATE or DELETE), use the Postgres
-+RETURNING clause with a standard Query or QueryRow call:
-+
-+ var userid int
-+ err := db.QueryRow(`INSERT INTO users(name, favorite_fruit, age)
-+ VALUES('beatrice', 'starfruit', 93) RETURNING id`).Scan(&userid)
-+
-+For more details on RETURNING, see the Postgres documentation:
-+
-+ http://www.postgresql.org/docs/current/static/sql-insert.html
-+ http://www.postgresql.org/docs/current/static/sql-update.html
-+ http://www.postgresql.org/docs/current/static/sql-delete.html
-+
-+For additional instructions on querying see the documentation for the database/sql package.
-+
-+
-+Data Types
-+
-+
-+Parameters pass through driver.DefaultParameterConverter before they are handled
-+by this package. When the binary_parameters connection option is enabled,
-+[]byte values are sent directly to the backend as data in binary format.
-+
-+This package returns the following types for values from the PostgreSQL backend:
-+
-+ - integer types smallint, integer, and bigint are returned as int64
-+ - floating-point types real and double precision are returned as float64
-+ - character types char, varchar, and text are returned as string
-+ - temporal types date, time, timetz, timestamp, and timestamptz are
-+ returned as time.Time
-+ - the boolean type is returned as bool
-+ - the bytea type is returned as []byte
-+
-+All other types are returned directly from the backend as []byte values in text format.
-+
-+
-+Errors
-+
-+
-+pq may return errors of type *pq.Error which can be interrogated for error details:
-+
-+ if err, ok := err.(*pq.Error); ok {
-+ fmt.Println("pq error:", err.Code.Name())
-+ }
-+
-+See the pq.Error type for details.
-+
-+
-+Bulk imports
-+
-+You can perform bulk imports by preparing a statement returned by pq.CopyIn (or
-+pq.CopyInSchema) in an explicit transaction (sql.Tx). The returned statement
-+handle can then be repeatedly "executed" to copy data into the target table.
-+After all data has been processed you should call Exec() once with no arguments
-+to flush all buffered data. Any call to Exec() might return an error which
-+should be handled appropriately, but because of the internal buffering an error
-+returned by Exec() might not be related to the data passed in the call that
-+failed.
-+
-+CopyIn uses COPY FROM internally. It is not possible to COPY outside of an
-+explicit transaction in pq.
-+
-+Usage example:
-+
-+ txn, err := db.Begin()
-+ if err != nil {
-+ log.Fatal(err)
-+ }
-+
-+ stmt, err := txn.Prepare(pq.CopyIn("users", "name", "age"))
-+ if err != nil {
-+ log.Fatal(err)
-+ }
-+
-+ for _, user := range users {
-+ _, err = stmt.Exec(user.Name, int64(user.Age))
-+ if err != nil {
-+ log.Fatal(err)
-+ }
-+ }
-+
-+ _, err = stmt.Exec()
-+ if err != nil {
-+ log.Fatal(err)
-+ }
-+
-+ err = stmt.Close()
-+ if err != nil {
-+ log.Fatal(err)
-+ }
-+
-+ err = txn.Commit()
-+ if err != nil {
-+ log.Fatal(err)
-+ }
-+
-+
-+Notifications
-+
-+
-+PostgreSQL supports a simple publish/subscribe model over database
-+connections. See http://www.postgresql.org/docs/current/static/sql-notify.html
-+for more information about the general mechanism.
-+
-+To start listening for notifications, you first have to open a new connection
-+to the database by calling NewListener. This connection can not be used for
-+anything other than LISTEN / NOTIFY. Calling Listen will open a "notification
-+channel"; once a notification channel is open, a notification generated on that
-+channel will effect a send on the Listener.Notify channel. A notification
-+channel will remain open until Unlisten is called, though connection loss might
-+result in some notifications being lost. To solve this problem, Listener sends
-+a nil pointer over the Notify channel any time the connection is re-established
-+following a connection loss. The application can get information about the
-+state of the underlying connection by setting an event callback in the call to
-+NewListener.
-+
-+A single Listener can safely be used from concurrent goroutines, which means
-+that there is often no need to create more than one Listener in your
-+application. However, a Listener is always connected to a single database, so
-+you will need to create a new Listener instance for every database you want to
-+receive notifications in.
-+
-+The channel name in both Listen and Unlisten is case sensitive, and can contain
-+any characters legal in an identifier (see
-+http://www.postgresql.org/docs/current/static/sql-syntax-lexical.html#SQL-SYNTAX-IDENTIFIERS
-+for more information). Note that the channel name will be truncated to 63
-+bytes by the PostgreSQL server.
-+
-+You can find a complete, working example of Listener usage at
-+https://godoc.org/github.com/lib/pq/example/listen.
-+
-+*/
-+package pq
-diff --git a/vendor/github.com/lib/pq/encode.go b/vendor/github.com/lib/pq/encode.go
-new file mode 100644
-index 00000000000..73cafb89443
---- /dev/null
-+++ b/vendor/github.com/lib/pq/encode.go
-@@ -0,0 +1,602 @@
-+package pq
-+
-+import (
-+ "bytes"
-+ "database/sql/driver"
-+ "encoding/binary"
-+ "encoding/hex"
-+ "errors"
-+ "fmt"
-+ "math"
-+ "strconv"
-+ "strings"
-+ "sync"
-+ "time"
-+
-+ "github.com/lib/pq/oid"
-+)
-+
-+func binaryEncode(parameterStatus *parameterStatus, x interface{}) []byte {
-+ switch v := x.(type) {
-+ case []byte:
-+ return v
-+ default:
-+ return encode(parameterStatus, x, oid.T_unknown)
-+ }
-+}
-+
-+func encode(parameterStatus *parameterStatus, x interface{}, pgtypOid oid.Oid) []byte {
-+ switch v := x.(type) {
-+ case int64:
-+ return strconv.AppendInt(nil, v, 10)
-+ case float64:
-+ return strconv.AppendFloat(nil, v, 'f', -1, 64)
-+ case []byte:
-+ if pgtypOid == oid.T_bytea {
-+ return encodeBytea(parameterStatus.serverVersion, v)
-+ }
-+
-+ return v
-+ case string:
-+ if pgtypOid == oid.T_bytea {
-+ return encodeBytea(parameterStatus.serverVersion, []byte(v))
-+ }
-+
-+ return []byte(v)
-+ case bool:
-+ return strconv.AppendBool(nil, v)
-+ case time.Time:
-+ return formatTs(v)
-+
-+ default:
-+ errorf("encode: unknown type for %T", v)
-+ }
-+
-+ panic("not reached")
-+}
-+
-+func decode(parameterStatus *parameterStatus, s []byte, typ oid.Oid, f format) interface{} {
-+ switch f {
-+ case formatBinary:
-+ return binaryDecode(parameterStatus, s, typ)
-+ case formatText:
-+ return textDecode(parameterStatus, s, typ)
-+ default:
-+ panic("not reached")
-+ }
-+}
-+
-+func binaryDecode(parameterStatus *parameterStatus, s []byte, typ oid.Oid) interface{} {
-+ switch typ {
-+ case oid.T_bytea:
-+ return s
-+ case oid.T_int8:
-+ return int64(binary.BigEndian.Uint64(s))
-+ case oid.T_int4:
-+ return int64(int32(binary.BigEndian.Uint32(s)))
-+ case oid.T_int2:
-+ return int64(int16(binary.BigEndian.Uint16(s)))
-+ case oid.T_uuid:
-+ b, err := decodeUUIDBinary(s)
-+ if err != nil {
-+ panic(err)
-+ }
-+ return b
-+
-+ default:
-+ errorf("don't know how to decode binary parameter of type %d", uint32(typ))
-+ }
-+
-+ panic("not reached")
-+}
-+
-+func textDecode(parameterStatus *parameterStatus, s []byte, typ oid.Oid) interface{} {
-+ switch typ {
-+ case oid.T_char, oid.T_varchar, oid.T_text:
-+ return string(s)
-+ case oid.T_bytea:
-+ b, err := parseBytea(s)
-+ if err != nil {
-+ errorf("%s", err)
-+ }
-+ return b
-+ case oid.T_timestamptz:
-+ return parseTs(parameterStatus.currentLocation, string(s))
-+ case oid.T_timestamp, oid.T_date:
-+ return parseTs(nil, string(s))
-+ case oid.T_time:
-+ return mustParse("15:04:05", typ, s)
-+ case oid.T_timetz:
-+ return mustParse("15:04:05-07", typ, s)
-+ case oid.T_bool:
-+ return s[0] == 't'
-+ case oid.T_int8, oid.T_int4, oid.T_int2:
-+ i, err := strconv.ParseInt(string(s), 10, 64)
-+ if err != nil {
-+ errorf("%s", err)
-+ }
-+ return i
-+ case oid.T_float4, oid.T_float8:
-+ // We always use 64 bit parsing, regardless of whether the input text is for
-+ // a float4 or float8, because clients expect float64s for all float datatypes
-+ // and returning a 32-bit parsed float64 produces lossy results.
-+ f, err := strconv.ParseFloat(string(s), 64)
-+ if err != nil {
-+ errorf("%s", err)
-+ }
-+ return f
-+ }
-+
-+ return s
-+}
-+
-+// appendEncodedText encodes item in text format as required by COPY
-+// and appends to buf
-+func appendEncodedText(parameterStatus *parameterStatus, buf []byte, x interface{}) []byte {
-+ switch v := x.(type) {
-+ case int64:
-+ return strconv.AppendInt(buf, v, 10)
-+ case float64:
-+ return strconv.AppendFloat(buf, v, 'f', -1, 64)
-+ case []byte:
-+ encodedBytea := encodeBytea(parameterStatus.serverVersion, v)
-+ return appendEscapedText(buf, string(encodedBytea))
-+ case string:
-+ return appendEscapedText(buf, v)
-+ case bool:
-+ return strconv.AppendBool(buf, v)
-+ case time.Time:
-+ return append(buf, formatTs(v)...)
-+ case nil:
-+ return append(buf, "\\N"...)
-+ default:
-+ errorf("encode: unknown type for %T", v)
-+ }
-+
-+ panic("not reached")
-+}
-+
-+func appendEscapedText(buf []byte, text string) []byte {
-+ escapeNeeded := false
-+ startPos := 0
-+ var c byte
-+
-+ // check if we need to escape
-+ for i := 0; i < len(text); i++ {
-+ c = text[i]
-+ if c == '\\' || c == '\n' || c == '\r' || c == '\t' {
-+ escapeNeeded = true
-+ startPos = i
-+ break
-+ }
-+ }
-+ if !escapeNeeded {
-+ return append(buf, text...)
-+ }
-+
-+ // copy till first char to escape, iterate the rest
-+ result := append(buf, text[:startPos]...)
-+ for i := startPos; i < len(text); i++ {
-+ c = text[i]
-+ switch c {
-+ case '\\':
-+ result = append(result, '\\', '\\')
-+ case '\n':
-+ result = append(result, '\\', 'n')
-+ case '\r':
-+ result = append(result, '\\', 'r')
-+ case '\t':
-+ result = append(result, '\\', 't')
-+ default:
-+ result = append(result, c)
-+ }
-+ }
-+ return result
-+}
-+
-+func mustParse(f string, typ oid.Oid, s []byte) time.Time {
-+ str := string(s)
-+
-+ // check for a 30-minute-offset timezone
-+ if (typ == oid.T_timestamptz || typ == oid.T_timetz) &&
-+ str[len(str)-3] == ':' {
-+ f += ":00"
-+ }
-+ t, err := time.Parse(f, str)
-+ if err != nil {
-+ errorf("decode: %s", err)
-+ }
-+ return t
-+}
-+
-+var errInvalidTimestamp = errors.New("invalid timestamp")
-+
-+type timestampParser struct {
-+ err error
-+}
-+
-+func (p *timestampParser) expect(str string, char byte, pos int) {
-+ if p.err != nil {
-+ return
-+ }
-+ if pos+1 > len(str) {
-+ p.err = errInvalidTimestamp
-+ return
-+ }
-+ if c := str[pos]; c != char && p.err == nil {
-+ p.err = fmt.Errorf("expected '%v' at position %v; got '%v'", char, pos, c)
-+ }
-+}
-+
-+func (p *timestampParser) mustAtoi(str string, begin int, end int) int {
-+ if p.err != nil {
-+ return 0
-+ }
-+ if begin < 0 || end < 0 || begin > end || end > len(str) {
-+ p.err = errInvalidTimestamp
-+ return 0
-+ }
-+ result, err := strconv.Atoi(str[begin:end])
-+ if err != nil {
-+ if p.err == nil {
-+ p.err = fmt.Errorf("expected number; got '%v'", str)
-+ }
-+ return 0
-+ }
-+ return result
-+}
-+
-+// The location cache caches the time zones typically used by the client.
-+type locationCache struct {
-+ cache map[int]*time.Location
-+ lock sync.Mutex
-+}
-+
-+// All connections share the same list of timezones. Benchmarking shows that
-+// about 5% speed could be gained by putting the cache in the connection and
-+// losing the mutex, at the cost of a small amount of memory and a somewhat
-+// significant increase in code complexity.
-+var globalLocationCache = newLocationCache()
-+
-+func newLocationCache() *locationCache {
-+ return &locationCache{cache: make(map[int]*time.Location)}
-+}
-+
-+// Returns the cached timezone for the specified offset, creating and caching
-+// it if necessary.
-+func (c *locationCache) getLocation(offset int) *time.Location {
-+ c.lock.Lock()
-+ defer c.lock.Unlock()
-+
-+ location, ok := c.cache[offset]
-+ if !ok {
-+ location = time.FixedZone("", offset)
-+ c.cache[offset] = location
-+ }
-+
-+ return location
-+}
-+
-+var infinityTsEnabled = false
-+var infinityTsNegative time.Time
-+var infinityTsPositive time.Time
-+
-+const (
-+ infinityTsEnabledAlready = "pq: infinity timestamp enabled already"
-+ infinityTsNegativeMustBeSmaller = "pq: infinity timestamp: negative value must be smaller (before) than positive"
-+)
-+
-+// EnableInfinityTs controls the handling of Postgres' "-infinity" and
-+// "infinity" "timestamp"s.
-+//
-+// If EnableInfinityTs is not called, "-infinity" and "infinity" will return
-+// []byte("-infinity") and []byte("infinity") respectively, and potentially
-+// cause error "sql: Scan error on column index 0: unsupported driver -> Scan
-+// pair: []uint8 -> *time.Time", when scanning into a time.Time value.
-+//
-+// Once EnableInfinityTs has been called, all connections created using this
-+// driver will decode Postgres' "-infinity" and "infinity" for "timestamp",
-+// "timestamp with time zone" and "date" types to the predefined minimum and
-+// maximum times, respectively. When encoding time.Time values, any time which
-+// equals or precedes the predefined minimum time will be encoded to
-+// "-infinity". Any values at or past the maximum time will similarly be
-+// encoded to "infinity".
-+//
-+// If EnableInfinityTs is called with negative >= positive, it will panic.
-+// Calling EnableInfinityTs after a connection has been established results in
-+// undefined behavior. If EnableInfinityTs is called more than once, it will
-+// panic.
-+func EnableInfinityTs(negative time.Time, positive time.Time) {
-+ if infinityTsEnabled {
-+ panic(infinityTsEnabledAlready)
-+ }
-+ if !negative.Before(positive) {
-+ panic(infinityTsNegativeMustBeSmaller)
-+ }
-+ infinityTsEnabled = true
-+ infinityTsNegative = negative
-+ infinityTsPositive = positive
-+}
-+
-+/*
-+ * Testing might want to toggle infinityTsEnabled
-+ */
-+func disableInfinityTs() {
-+ infinityTsEnabled = false
-+}
-+
-+// This is a time function specific to the Postgres default DateStyle
-+// setting ("ISO, MDY"), the only one we currently support. This
-+// accounts for the discrepancies between the parsing available with
-+// time.Parse and the Postgres date formatting quirks.
-+func parseTs(currentLocation *time.Location, str string) interface{} {
-+ switch str {
-+ case "-infinity":
-+ if infinityTsEnabled {
-+ return infinityTsNegative
-+ }
-+ return []byte(str)
-+ case "infinity":
-+ if infinityTsEnabled {
-+ return infinityTsPositive
-+ }
-+ return []byte(str)
-+ }
-+ t, err := ParseTimestamp(currentLocation, str)
-+ if err != nil {
-+ panic(err)
-+ }
-+ return t
-+}
-+
-+// ParseTimestamp parses Postgres' text format. It returns a time.Time in
-+// currentLocation iff that time's offset agrees with the offset sent from the
-+// Postgres server. Otherwise, ParseTimestamp returns a time.Time with the
-+// fixed offset offset provided by the Postgres server.
-+func ParseTimestamp(currentLocation *time.Location, str string) (time.Time, error) {
-+ p := timestampParser{}
-+
-+ monSep := strings.IndexRune(str, '-')
-+ // this is Gregorian year, not ISO Year
-+ // In Gregorian system, the year 1 BC is followed by AD 1
-+ year := p.mustAtoi(str, 0, monSep)
-+ daySep := monSep + 3
-+ month := p.mustAtoi(str, monSep+1, daySep)
-+ p.expect(str, '-', daySep)
-+ timeSep := daySep + 3
-+ day := p.mustAtoi(str, daySep+1, timeSep)
-+
-+ minLen := monSep + len("01-01") + 1
-+
-+ isBC := strings.HasSuffix(str, " BC")
-+ if isBC {
-+ minLen += 3
-+ }
-+
-+ var hour, minute, second int
-+ if len(str) > minLen {
-+ p.expect(str, ' ', timeSep)
-+ minSep := timeSep + 3
-+ p.expect(str, ':', minSep)
-+ hour = p.mustAtoi(str, timeSep+1, minSep)
-+ secSep := minSep + 3
-+ p.expect(str, ':', secSep)
-+ minute = p.mustAtoi(str, minSep+1, secSep)
-+ secEnd := secSep + 3
-+ second = p.mustAtoi(str, secSep+1, secEnd)
-+ }
-+ remainderIdx := monSep + len("01-01 00:00:00") + 1
-+ // Three optional (but ordered) sections follow: the
-+ // fractional seconds, the time zone offset, and the BC
-+ // designation. We set them up here and adjust the other
-+ // offsets if the preceding sections exist.
-+
-+ nanoSec := 0
-+ tzOff := 0
-+
-+ if remainderIdx < len(str) && str[remainderIdx] == '.' {
-+ fracStart := remainderIdx + 1
-+ fracOff := strings.IndexAny(str[fracStart:], "-+ ")
-+ if fracOff < 0 {
-+ fracOff = len(str) - fracStart
-+ }
-+ fracSec := p.mustAtoi(str, fracStart, fracStart+fracOff)
-+ nanoSec = fracSec * (1000000000 / int(math.Pow(10, float64(fracOff))))
-+
-+ remainderIdx += fracOff + 1
-+ }
-+ if tzStart := remainderIdx; tzStart < len(str) && (str[tzStart] == '-' || str[tzStart] == '+') {
-+ // time zone separator is always '-' or '+' (UTC is +00)
-+ var tzSign int
-+ switch c := str[tzStart]; c {
-+ case '-':
-+ tzSign = -1
-+ case '+':
-+ tzSign = +1
-+ default:
-+ return time.Time{}, fmt.Errorf("expected '-' or '+' at position %v; got %v", tzStart, c)
-+ }
-+ tzHours := p.mustAtoi(str, tzStart+1, tzStart+3)
-+ remainderIdx += 3
-+ var tzMin, tzSec int
-+ if remainderIdx < len(str) && str[remainderIdx] == ':' {
-+ tzMin = p.mustAtoi(str, remainderIdx+1, remainderIdx+3)
-+ remainderIdx += 3
-+ }
-+ if remainderIdx < len(str) && str[remainderIdx] == ':' {
-+ tzSec = p.mustAtoi(str, remainderIdx+1, remainderIdx+3)
-+ remainderIdx += 3
-+ }
-+ tzOff = tzSign * ((tzHours * 60 * 60) + (tzMin * 60) + tzSec)
-+ }
-+ var isoYear int
-+
-+ if isBC {
-+ isoYear = 1 - year
-+ remainderIdx += 3
-+ } else {
-+ isoYear = year
-+ }
-+ if remainderIdx < len(str) {
-+ return time.Time{}, fmt.Errorf("expected end of input, got %v", str[remainderIdx:])
-+ }
-+ t := time.Date(isoYear, time.Month(month), day,
-+ hour, minute, second, nanoSec,
-+ globalLocationCache.getLocation(tzOff))
-+
-+ if currentLocation != nil {
-+ // Set the location of the returned Time based on the session's
-+ // TimeZone value, but only if the local time zone database agrees with
-+ // the remote database on the offset.
-+ lt := t.In(currentLocation)
-+ _, newOff := lt.Zone()
-+ if newOff == tzOff {
-+ t = lt
-+ }
-+ }
-+
-+ return t, p.err
-+}
-+
-+// formatTs formats t into a format postgres understands.
-+func formatTs(t time.Time) []byte {
-+ if infinityTsEnabled {
-+ // t <= -infinity : ! (t > -infinity)
-+ if !t.After(infinityTsNegative) {
-+ return []byte("-infinity")
-+ }
-+ // t >= infinity : ! (!t < infinity)
-+ if !t.Before(infinityTsPositive) {
-+ return []byte("infinity")
-+ }
-+ }
-+ return FormatTimestamp(t)
-+}
-+
-+// FormatTimestamp formats t into Postgres' text format for timestamps.
-+func FormatTimestamp(t time.Time) []byte {
-+ // Need to send dates before 0001 A.D. with " BC" suffix, instead of the
-+ // minus sign preferred by Go.
-+ // Beware, "0000" in ISO is "1 BC", "-0001" is "2 BC" and so on
-+ bc := false
-+ if t.Year() <= 0 {
-+ // flip year sign, and add 1, e.g: "0" will be "1", and "-10" will be "11"
-+ t = t.AddDate((-t.Year())*2+1, 0, 0)
-+ bc = true
-+ }
-+ b := []byte(t.Format("2006-01-02 15:04:05.999999999Z07:00"))
-+
-+ _, offset := t.Zone()
-+ offset %= 60
-+ if offset != 0 {
-+ // RFC3339Nano already printed the minus sign
-+ if offset < 0 {
-+ offset = -offset
-+ }
-+
-+ b = append(b, ':')
-+ if offset < 10 {
-+ b = append(b, '0')
-+ }
-+ b = strconv.AppendInt(b, int64(offset), 10)
-+ }
-+
-+ if bc {
-+ b = append(b, " BC"...)
-+ }
-+ return b
-+}
-+
-+// Parse a bytea value received from the server. Both "hex" and the legacy
-+// "escape" format are supported.
-+func parseBytea(s []byte) (result []byte, err error) {
-+ if len(s) >= 2 && bytes.Equal(s[:2], []byte("\\x")) {
-+ // bytea_output = hex
-+ s = s[2:] // trim off leading "\\x"
-+ result = make([]byte, hex.DecodedLen(len(s)))
-+ _, err := hex.Decode(result, s)
-+ if err != nil {
-+ return nil, err
-+ }
-+ } else {
-+ // bytea_output = escape
-+ for len(s) > 0 {
-+ if s[0] == '\\' {
-+ // escaped '\\'
-+ if len(s) >= 2 && s[1] == '\\' {
-+ result = append(result, '\\')
-+ s = s[2:]
-+ continue
-+ }
-+
-+ // '\\' followed by an octal number
-+ if len(s) < 4 {
-+ return nil, fmt.Errorf("invalid bytea sequence %v", s)
-+ }
-+ r, err := strconv.ParseInt(string(s[1:4]), 8, 9)
-+ if err != nil {
-+ return nil, fmt.Errorf("could not parse bytea value: %s", err.Error())
-+ }
-+ result = append(result, byte(r))
-+ s = s[4:]
-+ } else {
-+ // We hit an unescaped, raw byte. Try to read in as many as
-+ // possible in one go.
-+ i := bytes.IndexByte(s, '\\')
-+ if i == -1 {
-+ result = append(result, s...)
-+ break
-+ }
-+ result = append(result, s[:i]...)
-+ s = s[i:]
-+ }
-+ }
-+ }
-+
-+ return result, nil
-+}
-+
-+func encodeBytea(serverVersion int, v []byte) (result []byte) {
-+ if serverVersion >= 90000 {
-+ // Use the hex format if we know that the server supports it
-+ result = make([]byte, 2+hex.EncodedLen(len(v)))
-+ result[0] = '\\'
-+ result[1] = 'x'
-+ hex.Encode(result[2:], v)
-+ } else {
-+ // .. or resort to "escape"
-+ for _, b := range v {
-+ if b == '\\' {
-+ result = append(result, '\\', '\\')
-+ } else if b < 0x20 || b > 0x7e {
-+ result = append(result, []byte(fmt.Sprintf("\\%03o", b))...)
-+ } else {
-+ result = append(result, b)
-+ }
-+ }
-+ }
-+
-+ return result
-+}
-+
-+// NullTime represents a time.Time that may be null. NullTime implements the
-+// sql.Scanner interface so it can be used as a scan destination, similar to
-+// sql.NullString.
-+type NullTime struct {
-+ Time time.Time
-+ Valid bool // Valid is true if Time is not NULL
-+}
-+
-+// Scan implements the Scanner interface.
-+func (nt *NullTime) Scan(value interface{}) error {
-+ nt.Time, nt.Valid = value.(time.Time)
-+ return nil
-+}
-+
-+// Value implements the driver Valuer interface.
-+func (nt NullTime) Value() (driver.Value, error) {
-+ if !nt.Valid {
-+ return nil, nil
-+ }
-+ return nt.Time, nil
-+}
-diff --git a/vendor/github.com/lib/pq/encode_test.go b/vendor/github.com/lib/pq/encode_test.go
-new file mode 100644
-index 00000000000..813643c227b
---- /dev/null
-+++ b/vendor/github.com/lib/pq/encode_test.go
-@@ -0,0 +1,766 @@
-+package pq
-+
-+import (
-+ "bytes"
-+ "database/sql"
-+ "fmt"
-+ "regexp"
-+ "testing"
-+ "time"
-+
-+ "github.com/lib/pq/oid"
-+)
-+
-+func TestScanTimestamp(t *testing.T) {
-+ var nt NullTime
-+ tn := time.Now()
-+ nt.Scan(tn)
-+ if !nt.Valid {
-+ t.Errorf("Expected Valid=false")
-+ }
-+ if nt.Time != tn {
-+ t.Errorf("Time value mismatch")
-+ }
-+}
-+
-+func TestScanNilTimestamp(t *testing.T) {
-+ var nt NullTime
-+ nt.Scan(nil)
-+ if nt.Valid {
-+ t.Errorf("Expected Valid=false")
-+ }
-+}
-+
-+var timeTests = []struct {
-+ str string
-+ timeval time.Time
-+}{
-+ {"22001-02-03", time.Date(22001, time.February, 3, 0, 0, 0, 0, time.FixedZone("", 0))},
-+ {"2001-02-03", time.Date(2001, time.February, 3, 0, 0, 0, 0, time.FixedZone("", 0))},
-+ {"0001-12-31 BC", time.Date(0, time.December, 31, 0, 0, 0, 0, time.FixedZone("", 0))},
-+ {"2001-02-03 BC", time.Date(-2000, time.February, 3, 0, 0, 0, 0, time.FixedZone("", 0))},
-+ {"2001-02-03 04:05:06", time.Date(2001, time.February, 3, 4, 5, 6, 0, time.FixedZone("", 0))},
-+ {"2001-02-03 04:05:06.000001", time.Date(2001, time.February, 3, 4, 5, 6, 1000, time.FixedZone("", 0))},
-+ {"2001-02-03 04:05:06.00001", time.Date(2001, time.February, 3, 4, 5, 6, 10000, time.FixedZone("", 0))},
-+ {"2001-02-03 04:05:06.0001", time.Date(2001, time.February, 3, 4, 5, 6, 100000, time.FixedZone("", 0))},
-+ {"2001-02-03 04:05:06.001", time.Date(2001, time.February, 3, 4, 5, 6, 1000000, time.FixedZone("", 0))},
-+ {"2001-02-03 04:05:06.01", time.Date(2001, time.February, 3, 4, 5, 6, 10000000, time.FixedZone("", 0))},
-+ {"2001-02-03 04:05:06.1", time.Date(2001, time.February, 3, 4, 5, 6, 100000000, time.FixedZone("", 0))},
-+ {"2001-02-03 04:05:06.12", time.Date(2001, time.February, 3, 4, 5, 6, 120000000, time.FixedZone("", 0))},
-+ {"2001-02-03 04:05:06.123", time.Date(2001, time.February, 3, 4, 5, 6, 123000000, time.FixedZone("", 0))},
-+ {"2001-02-03 04:05:06.1234", time.Date(2001, time.February, 3, 4, 5, 6, 123400000, time.FixedZone("", 0))},
-+ {"2001-02-03 04:05:06.12345", time.Date(2001, time.February, 3, 4, 5, 6, 123450000, time.FixedZone("", 0))},
-+ {"2001-02-03 04:05:06.123456", time.Date(2001, time.February, 3, 4, 5, 6, 123456000, time.FixedZone("", 0))},
-+ {"2001-02-03 04:05:06.123-07", time.Date(2001, time.February, 3, 4, 5, 6, 123000000,
-+ time.FixedZone("", -7*60*60))},
-+ {"2001-02-03 04:05:06-07", time.Date(2001, time.February, 3, 4, 5, 6, 0,
-+ time.FixedZone("", -7*60*60))},
-+ {"2001-02-03 04:05:06-07:42", time.Date(2001, time.February, 3, 4, 5, 6, 0,
-+ time.FixedZone("", -(7*60*60+42*60)))},
-+ {"2001-02-03 04:05:06-07:30:09", time.Date(2001, time.February, 3, 4, 5, 6, 0,
-+ time.FixedZone("", -(7*60*60+30*60+9)))},
-+ {"2001-02-03 04:05:06+07", time.Date(2001, time.February, 3, 4, 5, 6, 0,
-+ time.FixedZone("", 7*60*60))},
-+ {"0011-02-03 04:05:06 BC", time.Date(-10, time.February, 3, 4, 5, 6, 0, time.FixedZone("", 0))},
-+ {"0011-02-03 04:05:06.123 BC", time.Date(-10, time.February, 3, 4, 5, 6, 123000000, time.FixedZone("", 0))},
-+ {"0011-02-03 04:05:06.123-07 BC", time.Date(-10, time.February, 3, 4, 5, 6, 123000000,
-+ time.FixedZone("", -7*60*60))},
-+ {"0001-02-03 04:05:06.123", time.Date(1, time.February, 3, 4, 5, 6, 123000000, time.FixedZone("", 0))},
-+ {"0001-02-03 04:05:06.123 BC", time.Date(1, time.February, 3, 4, 5, 6, 123000000, time.FixedZone("", 0)).AddDate(-1, 0, 0)},
-+ {"0001-02-03 04:05:06.123 BC", time.Date(0, time.February, 3, 4, 5, 6, 123000000, time.FixedZone("", 0))},
-+ {"0002-02-03 04:05:06.123 BC", time.Date(0, time.February, 3, 4, 5, 6, 123000000, time.FixedZone("", 0)).AddDate(-1, 0, 0)},
-+ {"0002-02-03 04:05:06.123 BC", time.Date(-1, time.February, 3, 4, 5, 6, 123000000, time.FixedZone("", 0))},
-+ {"12345-02-03 04:05:06.1", time.Date(12345, time.February, 3, 4, 5, 6, 100000000, time.FixedZone("", 0))},
-+ {"123456-02-03 04:05:06.1", time.Date(123456, time.February, 3, 4, 5, 6, 100000000, time.FixedZone("", 0))},
-+}
-+
-+// Test that parsing the string results in the expected value.
-+func TestParseTs(t *testing.T) {
-+ for i, tt := range timeTests {
-+ val, err := ParseTimestamp(nil, tt.str)
-+ if err != nil {
-+ t.Errorf("%d: got error: %v", i, err)
-+ } else if val.String() != tt.timeval.String() {
-+ t.Errorf("%d: expected to parse %q into %q; got %q",
-+ i, tt.str, tt.timeval, val)
-+ }
-+ }
-+}
-+
-+var timeErrorTests = []string{
-+ "BC",
-+ " BC",
-+ "2001",
-+ "2001-2-03",
-+ "2001-02-3",
-+ "2001-02-03 ",
-+ "2001-02-03 B",
-+ "2001-02-03 04",
-+ "2001-02-03 04:",
-+ "2001-02-03 04:05",
-+ "2001-02-03 04:05 B",
-+ "2001-02-03 04:05 BC",
-+ "2001-02-03 04:05:",
-+ "2001-02-03 04:05:6",
-+ "2001-02-03 04:05:06 B",
-+ "2001-02-03 04:05:06BC",
-+ "2001-02-03 04:05:06.123 B",
-+}
-+
-+// Test that parsing the string results in an error.
-+func TestParseTsErrors(t *testing.T) {
-+ for i, tt := range timeErrorTests {
-+ _, err := ParseTimestamp(nil, tt)
-+ if err == nil {
-+ t.Errorf("%d: expected an error from parsing: %v", i, tt)
-+ }
-+ }
-+}
-+
-+// Now test that sending the value into the database and parsing it back
-+// returns the same time.Time value.
-+func TestEncodeAndParseTs(t *testing.T) {
-+ db, err := openTestConnConninfo("timezone='Etc/UTC'")
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+ defer db.Close()
-+
-+ for i, tt := range timeTests {
-+ var dbstr string
-+ err = db.QueryRow("SELECT ($1::timestamptz)::text", tt.timeval).Scan(&dbstr)
-+ if err != nil {
-+ t.Errorf("%d: could not send value %q to the database: %s", i, tt.timeval, err)
-+ continue
-+ }
-+
-+ val, err := ParseTimestamp(nil, dbstr)
-+ if err != nil {
-+ t.Errorf("%d: could not parse value %q: %s", i, dbstr, err)
-+ continue
-+ }
-+ val = val.In(tt.timeval.Location())
-+ if val.String() != tt.timeval.String() {
-+ t.Errorf("%d: expected to parse %q into %q; got %q", i, dbstr, tt.timeval, val)
-+ }
-+ }
-+}
-+
-+var formatTimeTests = []struct {
-+ time time.Time
-+ expected string
-+}{
-+ {time.Time{}, "0001-01-01 00:00:00Z"},
-+ {time.Date(2001, time.February, 3, 4, 5, 6, 123456789, time.FixedZone("", 0)), "2001-02-03 04:05:06.123456789Z"},
-+ {time.Date(2001, time.February, 3, 4, 5, 6, 123456789, time.FixedZone("", 2*60*60)), "2001-02-03 04:05:06.123456789+02:00"},
-+ {time.Date(2001, time.February, 3, 4, 5, 6, 123456789, time.FixedZone("", -6*60*60)), "2001-02-03 04:05:06.123456789-06:00"},
-+ {time.Date(2001, time.February, 3, 4, 5, 6, 0, time.FixedZone("", -(7*60*60+30*60+9))), "2001-02-03 04:05:06-07:30:09"},
-+
-+ {time.Date(1, time.February, 3, 4, 5, 6, 123456789, time.FixedZone("", 0)), "0001-02-03 04:05:06.123456789Z"},
-+ {time.Date(1, time.February, 3, 4, 5, 6, 123456789, time.FixedZone("", 2*60*60)), "0001-02-03 04:05:06.123456789+02:00"},
-+ {time.Date(1, time.February, 3, 4, 5, 6, 123456789, time.FixedZone("", -6*60*60)), "0001-02-03 04:05:06.123456789-06:00"},
-+
-+ {time.Date(0, time.February, 3, 4, 5, 6, 123456789, time.FixedZone("", 0)), "0001-02-03 04:05:06.123456789Z BC"},
-+ {time.Date(0, time.February, 3, 4, 5, 6, 123456789, time.FixedZone("", 2*60*60)), "0001-02-03 04:05:06.123456789+02:00 BC"},
-+ {time.Date(0, time.February, 3, 4, 5, 6, 123456789, time.FixedZone("", -6*60*60)), "0001-02-03 04:05:06.123456789-06:00 BC"},
-+
-+ {time.Date(1, time.February, 3, 4, 5, 6, 0, time.FixedZone("", -(7*60*60+30*60+9))), "0001-02-03 04:05:06-07:30:09"},
-+ {time.Date(0, time.February, 3, 4, 5, 6, 0, time.FixedZone("", -(7*60*60+30*60+9))), "0001-02-03 04:05:06-07:30:09 BC"},
-+}
-+
-+func TestFormatTs(t *testing.T) {
-+ for i, tt := range formatTimeTests {
-+ val := string(formatTs(tt.time))
-+ if val != tt.expected {
-+ t.Errorf("%d: incorrect time format %q, want %q", i, val, tt.expected)
-+ }
-+ }
-+}
-+
-+func TestFormatTsBackend(t *testing.T) {
-+ db := openTestConn(t)
-+ defer db.Close()
-+
-+ var str string
-+ err := db.QueryRow("SELECT '2001-02-03T04:05:06.007-08:09:10'::time::text").Scan(&str)
-+ if err == nil {
-+ t.Fatalf("PostgreSQL is accepting an ISO timestamp input for time")
-+ }
-+
-+ for i, tt := range formatTimeTests {
-+ for _, typ := range []string{"date", "time", "timetz", "timestamp", "timestamptz"} {
-+ err = db.QueryRow("SELECT $1::"+typ+"::text", tt.time).Scan(&str)
-+ if err != nil {
-+ t.Errorf("%d: incorrect time format for %v on the backend: %v", i, typ, err)
-+ }
-+ }
-+ }
-+}
-+
-+func TestTimestampWithTimeZone(t *testing.T) {
-+ db := openTestConn(t)
-+ defer db.Close()
-+
-+ tx, err := db.Begin()
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+ defer tx.Rollback()
-+
-+ // try several different locations, all included in Go's zoneinfo.zip
-+ for _, locName := range []string{
-+ "UTC",
-+ "America/Chicago",
-+ "America/New_York",
-+ "Australia/Darwin",
-+ "Australia/Perth",
-+ } {
-+ loc, err := time.LoadLocation(locName)
-+ if err != nil {
-+ t.Logf("Could not load time zone %s - skipping", locName)
-+ continue
-+ }
-+
-+ // Postgres timestamps have a resolution of 1 microsecond, so don't
-+ // use the full range of the Nanosecond argument
-+ refTime := time.Date(2012, 11, 6, 10, 23, 42, 123456000, loc)
-+
-+ for _, pgTimeZone := range []string{"US/Eastern", "Australia/Darwin"} {
-+ // Switch Postgres's timezone to test different output timestamp formats
-+ _, err = tx.Exec(fmt.Sprintf("set time zone '%s'", pgTimeZone))
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+
-+ var gotTime time.Time
-+ row := tx.QueryRow("select $1::timestamp with time zone", refTime)
-+ err = row.Scan(&gotTime)
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+
-+ if !refTime.Equal(gotTime) {
-+ t.Errorf("timestamps not equal: %s != %s", refTime, gotTime)
-+ }
-+
-+ // check that the time zone is set correctly based on TimeZone
-+ pgLoc, err := time.LoadLocation(pgTimeZone)
-+ if err != nil {
-+ t.Logf("Could not load time zone %s - skipping", pgLoc)
-+ continue
-+ }
-+ translated := refTime.In(pgLoc)
-+ if translated.String() != gotTime.String() {
-+ t.Errorf("timestamps not equal: %s != %s", translated, gotTime)
-+ }
-+ }
-+ }
-+}
-+
-+func TestTimestampWithOutTimezone(t *testing.T) {
-+ db := openTestConn(t)
-+ defer db.Close()
-+
-+ test := func(ts, pgts string) {
-+ r, err := db.Query("SELECT $1::timestamp", pgts)
-+ if err != nil {
-+ t.Fatalf("Could not run query: %v", err)
-+ }
-+
-+ if !r.Next() {
-+ t.Fatal("Expected at least one row")
-+ }
-+
-+ var result time.Time
-+ err = r.Scan(&result)
-+ if err != nil {
-+ t.Fatalf("Did not expect error scanning row: %v", err)
-+ }
-+
-+ expected, err := time.Parse(time.RFC3339, ts)
-+ if err != nil {
-+ t.Fatalf("Could not parse test time literal: %v", err)
-+ }
-+
-+ if !result.Equal(expected) {
-+ t.Fatalf("Expected time to match %v: got mismatch %v",
-+ expected, result)
-+ }
-+
-+ if r.Next() {
-+ t.Fatal("Expected only one row")
-+ }
-+ }
-+
-+ test("2000-01-01T00:00:00Z", "2000-01-01T00:00:00")
-+
-+ // Test higher precision time
-+ test("2013-01-04T20:14:58.80033Z", "2013-01-04 20:14:58.80033")
-+}
-+
-+func TestInfinityTimestamp(t *testing.T) {
-+ db := openTestConn(t)
-+ defer db.Close()
-+ var err error
-+ var resultT time.Time
-+
-+ expectedErrorStrRegexp := regexp.MustCompile(
-+ `^sql: Scan error on column index 0(, name "timestamp(tz)?"|): unsupported`)
-+
-+ type testCases []struct {
-+ Query string
-+ Param string
-+ ExpectedErrorStrRegexp *regexp.Regexp
-+ ExpectedVal interface{}
-+ }
-+ tc := testCases{
-+ {"SELECT $1::timestamp", "-infinity", expectedErrorStrRegexp, "-infinity"},
-+ {"SELECT $1::timestamptz", "-infinity", expectedErrorStrRegexp, "-infinity"},
-+ {"SELECT $1::timestamp", "infinity", expectedErrorStrRegexp, "infinity"},
-+ {"SELECT $1::timestamptz", "infinity", expectedErrorStrRegexp, "infinity"},
-+ }
-+ // try to assert []byte to time.Time
-+ for _, q := range tc {
-+ err = db.QueryRow(q.Query, q.Param).Scan(&resultT)
-+ if err == nil || !q.ExpectedErrorStrRegexp.MatchString(err.Error()) {
-+ t.Errorf("Scanning -/+infinity, expected error to match regexp %q, got %q",
-+ q.ExpectedErrorStrRegexp, err)
-+ }
-+ }
-+ // yield []byte
-+ for _, q := range tc {
-+ var resultI interface{}
-+ err = db.QueryRow(q.Query, q.Param).Scan(&resultI)
-+ if err != nil {
-+ t.Errorf("Scanning -/+infinity, expected no error, got %q", err)
-+ }
-+ result, ok := resultI.([]byte)
-+ if !ok {
-+ t.Errorf("Scanning -/+infinity, expected []byte, got %#v", resultI)
-+ }
-+ if string(result) != q.ExpectedVal {
-+ t.Errorf("Scanning -/+infinity, expected %q, got %q", q.ExpectedVal, result)
-+ }
-+ }
-+
-+ y1500 := time.Date(1500, time.January, 1, 0, 0, 0, 0, time.UTC)
-+ y2500 := time.Date(2500, time.January, 1, 0, 0, 0, 0, time.UTC)
-+ EnableInfinityTs(y1500, y2500)
-+
-+ err = db.QueryRow("SELECT $1::timestamp", "infinity").Scan(&resultT)
-+ if err != nil {
-+ t.Errorf("Scanning infinity, expected no error, got %q", err)
-+ }
-+ if !resultT.Equal(y2500) {
-+ t.Errorf("Scanning infinity, expected %q, got %q", y2500, resultT)
-+ }
-+
-+ err = db.QueryRow("SELECT $1::timestamptz", "infinity").Scan(&resultT)
-+ if err != nil {
-+ t.Errorf("Scanning infinity, expected no error, got %q", err)
-+ }
-+ if !resultT.Equal(y2500) {
-+ t.Errorf("Scanning Infinity, expected time %q, got %q", y2500, resultT.String())
-+ }
-+
-+ err = db.QueryRow("SELECT $1::timestamp", "-infinity").Scan(&resultT)
-+ if err != nil {
-+ t.Errorf("Scanning -infinity, expected no error, got %q", err)
-+ }
-+ if !resultT.Equal(y1500) {
-+ t.Errorf("Scanning -infinity, expected time %q, got %q", y1500, resultT.String())
-+ }
-+
-+ err = db.QueryRow("SELECT $1::timestamptz", "-infinity").Scan(&resultT)
-+ if err != nil {
-+ t.Errorf("Scanning -infinity, expected no error, got %q", err)
-+ }
-+ if !resultT.Equal(y1500) {
-+ t.Errorf("Scanning -infinity, expected time %q, got %q", y1500, resultT.String())
-+ }
-+
-+ ym1500 := time.Date(-1500, time.January, 1, 0, 0, 0, 0, time.UTC)
-+ y11500 := time.Date(11500, time.January, 1, 0, 0, 0, 0, time.UTC)
-+ var s string
-+ err = db.QueryRow("SELECT $1::timestamp::text", ym1500).Scan(&s)
-+ if err != nil {
-+ t.Errorf("Encoding -infinity, expected no error, got %q", err)
-+ }
-+ if s != "-infinity" {
-+ t.Errorf("Encoding -infinity, expected %q, got %q", "-infinity", s)
-+ }
-+ err = db.QueryRow("SELECT $1::timestamptz::text", ym1500).Scan(&s)
-+ if err != nil {
-+ t.Errorf("Encoding -infinity, expected no error, got %q", err)
-+ }
-+ if s != "-infinity" {
-+ t.Errorf("Encoding -infinity, expected %q, got %q", "-infinity", s)
-+ }
-+
-+ err = db.QueryRow("SELECT $1::timestamp::text", y11500).Scan(&s)
-+ if err != nil {
-+ t.Errorf("Encoding infinity, expected no error, got %q", err)
-+ }
-+ if s != "infinity" {
-+ t.Errorf("Encoding infinity, expected %q, got %q", "infinity", s)
-+ }
-+ err = db.QueryRow("SELECT $1::timestamptz::text", y11500).Scan(&s)
-+ if err != nil {
-+ t.Errorf("Encoding infinity, expected no error, got %q", err)
-+ }
-+ if s != "infinity" {
-+ t.Errorf("Encoding infinity, expected %q, got %q", "infinity", s)
-+ }
-+
-+ disableInfinityTs()
-+
-+ var panicErrorString string
-+ func() {
-+ defer func() {
-+ panicErrorString, _ = recover().(string)
-+ }()
-+ EnableInfinityTs(y2500, y1500)
-+ }()
-+ if panicErrorString != infinityTsNegativeMustBeSmaller {
-+ t.Errorf("Expected error, %q, got %q", infinityTsNegativeMustBeSmaller, panicErrorString)
-+ }
-+}
-+
-+func TestStringWithNul(t *testing.T) {
-+ db := openTestConn(t)
-+ defer db.Close()
-+
-+ hello0world := string("hello\x00world")
-+ _, err := db.Query("SELECT $1::text", &hello0world)
-+ if err == nil {
-+ t.Fatal("Postgres accepts a string with nul in it; " +
-+ "injection attacks may be plausible")
-+ }
-+}
-+
-+func TestByteSliceToText(t *testing.T) {
-+ db := openTestConn(t)
-+ defer db.Close()
-+
-+ b := []byte("hello world")
-+ row := db.QueryRow("SELECT $1::text", b)
-+
-+ var result []byte
-+ err := row.Scan(&result)
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+
-+ if string(result) != string(b) {
-+ t.Fatalf("expected %v but got %v", b, result)
-+ }
-+}
-+
-+func TestStringToBytea(t *testing.T) {
-+ db := openTestConn(t)
-+ defer db.Close()
-+
-+ b := "hello world"
-+ row := db.QueryRow("SELECT $1::bytea", b)
-+
-+ var result []byte
-+ err := row.Scan(&result)
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+
-+ if !bytes.Equal(result, []byte(b)) {
-+ t.Fatalf("expected %v but got %v", b, result)
-+ }
-+}
-+
-+func TestTextByteSliceToUUID(t *testing.T) {
-+ db := openTestConn(t)
-+ defer db.Close()
-+
-+ b := []byte("a0eebc99-9c0b-4ef8-bb6d-6bb9bd380a11")
-+ row := db.QueryRow("SELECT $1::uuid", b)
-+
-+ var result string
-+ err := row.Scan(&result)
-+ if forceBinaryParameters() {
-+ pqErr := err.(*Error)
-+ if pqErr == nil {
-+ t.Errorf("Expected to get error")
-+ } else if pqErr.Code != "22P03" {
-+ t.Fatalf("Expected to get invalid binary encoding error (22P03), got %s", pqErr.Code)
-+ }
-+ } else {
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+
-+ if result != string(b) {
-+ t.Fatalf("expected %v but got %v", b, result)
-+ }
-+ }
-+}
-+
-+func TestBinaryByteSlicetoUUID(t *testing.T) {
-+ db := openTestConn(t)
-+ defer db.Close()
-+
-+ b := []byte{'\xa0', '\xee', '\xbc', '\x99',
-+ '\x9c', '\x0b',
-+ '\x4e', '\xf8',
-+ '\xbb', '\x00', '\x6b',
-+ '\xb9', '\xbd', '\x38', '\x0a', '\x11'}
-+ row := db.QueryRow("SELECT $1::uuid", b)
-+
-+ var result string
-+ err := row.Scan(&result)
-+ if forceBinaryParameters() {
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+
-+ if result != string("a0eebc99-9c0b-4ef8-bb00-6bb9bd380a11") {
-+ t.Fatalf("expected %v but got %v", b, result)
-+ }
-+ } else {
-+ pqErr := err.(*Error)
-+ if pqErr == nil {
-+ t.Errorf("Expected to get error")
-+ } else if pqErr.Code != "22021" {
-+ t.Fatalf("Expected to get invalid byte sequence for encoding error (22021), got %s", pqErr.Code)
-+ }
-+ }
-+}
-+
-+func TestStringToUUID(t *testing.T) {
-+ db := openTestConn(t)
-+ defer db.Close()
-+
-+ s := "a0eebc99-9c0b-4ef8-bb00-6bb9bd380a11"
-+ row := db.QueryRow("SELECT $1::uuid", s)
-+
-+ var result string
-+ err := row.Scan(&result)
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+
-+ if result != s {
-+ t.Fatalf("expected %v but got %v", s, result)
-+ }
-+}
-+
-+func TestTextByteSliceToInt(t *testing.T) {
-+ db := openTestConn(t)
-+ defer db.Close()
-+
-+ expected := 12345678
-+ b := []byte(fmt.Sprintf("%d", expected))
-+ row := db.QueryRow("SELECT $1::int", b)
-+
-+ var result int
-+ err := row.Scan(&result)
-+ if forceBinaryParameters() {
-+ pqErr := err.(*Error)
-+ if pqErr == nil {
-+ t.Errorf("Expected to get error")
-+ } else if pqErr.Code != "22P03" {
-+ t.Fatalf("Expected to get invalid binary encoding error (22P03), got %s", pqErr.Code)
-+ }
-+ } else {
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+ if result != expected {
-+ t.Fatalf("expected %v but got %v", expected, result)
-+ }
-+ }
-+}
-+
-+func TestBinaryByteSliceToInt(t *testing.T) {
-+ db := openTestConn(t)
-+ defer db.Close()
-+
-+ expected := 12345678
-+ b := []byte{'\x00', '\xbc', '\x61', '\x4e'}
-+ row := db.QueryRow("SELECT $1::int", b)
-+
-+ var result int
-+ err := row.Scan(&result)
-+ if forceBinaryParameters() {
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+ if result != expected {
-+ t.Fatalf("expected %v but got %v", expected, result)
-+ }
-+ } else {
-+ pqErr := err.(*Error)
-+ if pqErr == nil {
-+ t.Errorf("Expected to get error")
-+ } else if pqErr.Code != "22021" {
-+ t.Fatalf("Expected to get invalid byte sequence for encoding error (22021), got %s", pqErr.Code)
-+ }
-+ }
-+}
-+
-+func TestTextDecodeIntoString(t *testing.T) {
-+ input := []byte("hello world")
-+ want := string(input)
-+ for _, typ := range []oid.Oid{oid.T_char, oid.T_varchar, oid.T_text} {
-+ got := decode(¶meterStatus{}, input, typ, formatText)
-+ if got != want {
-+ t.Errorf("invalid string decoding output for %T(%+v), got %v but expected %v", typ, typ, got, want)
-+ }
-+ }
-+}
-+
-+func TestByteaOutputFormatEncoding(t *testing.T) {
-+ input := []byte("\\x\x00\x01\x02\xFF\xFEabcdefg0123")
-+ want := []byte("\\x5c78000102fffe6162636465666730313233")
-+ got := encode(¶meterStatus{serverVersion: 90000}, input, oid.T_bytea)
-+ if !bytes.Equal(want, got) {
-+ t.Errorf("invalid hex bytea output, got %v but expected %v", got, want)
-+ }
-+
-+ want = []byte("\\\\x\\000\\001\\002\\377\\376abcdefg0123")
-+ got = encode(¶meterStatus{serverVersion: 84000}, input, oid.T_bytea)
-+ if !bytes.Equal(want, got) {
-+ t.Errorf("invalid escape bytea output, got %v but expected %v", got, want)
-+ }
-+}
-+
-+func TestByteaOutputFormats(t *testing.T) {
-+ db := openTestConn(t)
-+ defer db.Close()
-+
-+ if getServerVersion(t, db) < 90000 {
-+ // skip
-+ return
-+ }
-+
-+ testByteaOutputFormat := func(f string, usePrepared bool) {
-+ expectedData := []byte("\x5c\x78\x00\xff\x61\x62\x63\x01\x08")
-+ sqlQuery := "SELECT decode('5c7800ff6162630108', 'hex')"
-+
-+ var data []byte
-+
-+ // use a txn to avoid relying on getting the same connection
-+ txn, err := db.Begin()
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+ defer txn.Rollback()
-+
-+ _, err = txn.Exec("SET LOCAL bytea_output TO " + f)
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+ var rows *sql.Rows
-+ var stmt *sql.Stmt
-+ if usePrepared {
-+ stmt, err = txn.Prepare(sqlQuery)
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+ rows, err = stmt.Query()
-+ } else {
-+ // use Query; QueryRow would hide the actual error
-+ rows, err = txn.Query(sqlQuery)
-+ }
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+ if !rows.Next() {
-+ if rows.Err() != nil {
-+ t.Fatal(rows.Err())
-+ }
-+ t.Fatal("shouldn't happen")
-+ }
-+ err = rows.Scan(&data)
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+ err = rows.Close()
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+ if stmt != nil {
-+ err = stmt.Close()
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+ }
-+ if !bytes.Equal(data, expectedData) {
-+ t.Errorf("unexpected bytea value %v for format %s; expected %v", data, f, expectedData)
-+ }
-+ }
-+
-+ testByteaOutputFormat("hex", false)
-+ testByteaOutputFormat("escape", false)
-+ testByteaOutputFormat("hex", true)
-+ testByteaOutputFormat("escape", true)
-+}
-+
-+func TestAppendEncodedText(t *testing.T) {
-+ var buf []byte
-+
-+ buf = appendEncodedText(¶meterStatus{serverVersion: 90000}, buf, int64(10))
-+ buf = append(buf, '\t')
-+ buf = appendEncodedText(¶meterStatus{serverVersion: 90000}, buf, 42.0000000001)
-+ buf = append(buf, '\t')
-+ buf = appendEncodedText(¶meterStatus{serverVersion: 90000}, buf, "hello\tworld")
-+ buf = append(buf, '\t')
-+ buf = appendEncodedText(¶meterStatus{serverVersion: 90000}, buf, []byte{0, 128, 255})
-+
-+ if string(buf) != "10\t42.0000000001\thello\\tworld\t\\\\x0080ff" {
-+ t.Fatal(string(buf))
-+ }
-+}
-+
-+func TestAppendEscapedText(t *testing.T) {
-+ if esc := appendEscapedText(nil, "hallo\tescape"); string(esc) != "hallo\\tescape" {
-+ t.Fatal(string(esc))
-+ }
-+ if esc := appendEscapedText(nil, "hallo\\tescape\n"); string(esc) != "hallo\\\\tescape\\n" {
-+ t.Fatal(string(esc))
-+ }
-+ if esc := appendEscapedText(nil, "\n\r\t\f"); string(esc) != "\\n\\r\\t\f" {
-+ t.Fatal(string(esc))
-+ }
-+}
-+
-+func TestAppendEscapedTextExistingBuffer(t *testing.T) {
-+ buf := []byte("123\t")
-+ if esc := appendEscapedText(buf, "hallo\tescape"); string(esc) != "123\thallo\\tescape" {
-+ t.Fatal(string(esc))
-+ }
-+ buf = []byte("123\t")
-+ if esc := appendEscapedText(buf, "hallo\\tescape\n"); string(esc) != "123\thallo\\\\tescape\\n" {
-+ t.Fatal(string(esc))
-+ }
-+ buf = []byte("123\t")
-+ if esc := appendEscapedText(buf, "\n\r\t\f"); string(esc) != "123\t\\n\\r\\t\f" {
-+ t.Fatal(string(esc))
-+ }
-+}
-+
-+func BenchmarkAppendEscapedText(b *testing.B) {
-+ longString := ""
-+ for i := 0; i < 100; i++ {
-+ longString += "123456789\n"
-+ }
-+ for i := 0; i < b.N; i++ {
-+ appendEscapedText(nil, longString)
-+ }
-+}
-+
-+func BenchmarkAppendEscapedTextNoEscape(b *testing.B) {
-+ longString := ""
-+ for i := 0; i < 100; i++ {
-+ longString += "1234567890"
-+ }
-+ for i := 0; i < b.N; i++ {
-+ appendEscapedText(nil, longString)
-+ }
-+}
-diff --git a/vendor/github.com/lib/pq/error.go b/vendor/github.com/lib/pq/error.go
-new file mode 100644
-index 00000000000..3d66ba7c52e
---- /dev/null
-+++ b/vendor/github.com/lib/pq/error.go
-@@ -0,0 +1,515 @@
-+package pq
-+
-+import (
-+ "database/sql/driver"
-+ "fmt"
-+ "io"
-+ "net"
-+ "runtime"
-+)
-+
-+// Error severities
-+const (
-+ Efatal = "FATAL"
-+ Epanic = "PANIC"
-+ Ewarning = "WARNING"
-+ Enotice = "NOTICE"
-+ Edebug = "DEBUG"
-+ Einfo = "INFO"
-+ Elog = "LOG"
-+)
-+
-+// Error represents an error communicating with the server.
-+//
-+// See http://www.postgresql.org/docs/current/static/protocol-error-fields.html for details of the fields
-+type Error struct {
-+ Severity string
-+ Code ErrorCode
-+ Message string
-+ Detail string
-+ Hint string
-+ Position string
-+ InternalPosition string
-+ InternalQuery string
-+ Where string
-+ Schema string
-+ Table string
-+ Column string
-+ DataTypeName string
-+ Constraint string
-+ File string
-+ Line string
-+ Routine string
-+}
-+
-+// ErrorCode is a five-character error code.
-+type ErrorCode string
-+
-+// Name returns a more human friendly rendering of the error code, namely the
-+// "condition name".
-+//
-+// See http://www.postgresql.org/docs/9.3/static/errcodes-appendix.html for
-+// details.
-+func (ec ErrorCode) Name() string {
-+ return errorCodeNames[ec]
-+}
-+
-+// ErrorClass is only the class part of an error code.
-+type ErrorClass string
-+
-+// Name returns the condition name of an error class. It is equivalent to the
-+// condition name of the "standard" error code (i.e. the one having the last
-+// three characters "000").
-+func (ec ErrorClass) Name() string {
-+ return errorCodeNames[ErrorCode(ec+"000")]
-+}
-+
-+// Class returns the error class, e.g. "28".
-+//
-+// See http://www.postgresql.org/docs/9.3/static/errcodes-appendix.html for
-+// details.
-+func (ec ErrorCode) Class() ErrorClass {
-+ return ErrorClass(ec[0:2])
-+}
-+
-+// errorCodeNames is a mapping between the five-character error codes and the
-+// human readable "condition names". It is derived from the list at
-+// http://www.postgresql.org/docs/9.3/static/errcodes-appendix.html
-+var errorCodeNames = map[ErrorCode]string{
-+ // Class 00 - Successful Completion
-+ "00000": "successful_completion",
-+ // Class 01 - Warning
-+ "01000": "warning",
-+ "0100C": "dynamic_result_sets_returned",
-+ "01008": "implicit_zero_bit_padding",
-+ "01003": "null_value_eliminated_in_set_function",
-+ "01007": "privilege_not_granted",
-+ "01006": "privilege_not_revoked",
-+ "01004": "string_data_right_truncation",
-+ "01P01": "deprecated_feature",
-+ // Class 02 - No Data (this is also a warning class per the SQL standard)
-+ "02000": "no_data",
-+ "02001": "no_additional_dynamic_result_sets_returned",
-+ // Class 03 - SQL Statement Not Yet Complete
-+ "03000": "sql_statement_not_yet_complete",
-+ // Class 08 - Connection Exception
-+ "08000": "connection_exception",
-+ "08003": "connection_does_not_exist",
-+ "08006": "connection_failure",
-+ "08001": "sqlclient_unable_to_establish_sqlconnection",
-+ "08004": "sqlserver_rejected_establishment_of_sqlconnection",
-+ "08007": "transaction_resolution_unknown",
-+ "08P01": "protocol_violation",
-+ // Class 09 - Triggered Action Exception
-+ "09000": "triggered_action_exception",
-+ // Class 0A - Feature Not Supported
-+ "0A000": "feature_not_supported",
-+ // Class 0B - Invalid Transaction Initiation
-+ "0B000": "invalid_transaction_initiation",
-+ // Class 0F - Locator Exception
-+ "0F000": "locator_exception",
-+ "0F001": "invalid_locator_specification",
-+ // Class 0L - Invalid Grantor
-+ "0L000": "invalid_grantor",
-+ "0LP01": "invalid_grant_operation",
-+ // Class 0P - Invalid Role Specification
-+ "0P000": "invalid_role_specification",
-+ // Class 0Z - Diagnostics Exception
-+ "0Z000": "diagnostics_exception",
-+ "0Z002": "stacked_diagnostics_accessed_without_active_handler",
-+ // Class 20 - Case Not Found
-+ "20000": "case_not_found",
-+ // Class 21 - Cardinality Violation
-+ "21000": "cardinality_violation",
-+ // Class 22 - Data Exception
-+ "22000": "data_exception",
-+ "2202E": "array_subscript_error",
-+ "22021": "character_not_in_repertoire",
-+ "22008": "datetime_field_overflow",
-+ "22012": "division_by_zero",
-+ "22005": "error_in_assignment",
-+ "2200B": "escape_character_conflict",
-+ "22022": "indicator_overflow",
-+ "22015": "interval_field_overflow",
-+ "2201E": "invalid_argument_for_logarithm",
-+ "22014": "invalid_argument_for_ntile_function",
-+ "22016": "invalid_argument_for_nth_value_function",
-+ "2201F": "invalid_argument_for_power_function",
-+ "2201G": "invalid_argument_for_width_bucket_function",
-+ "22018": "invalid_character_value_for_cast",
-+ "22007": "invalid_datetime_format",
-+ "22019": "invalid_escape_character",
-+ "2200D": "invalid_escape_octet",
-+ "22025": "invalid_escape_sequence",
-+ "22P06": "nonstandard_use_of_escape_character",
-+ "22010": "invalid_indicator_parameter_value",
-+ "22023": "invalid_parameter_value",
-+ "2201B": "invalid_regular_expression",
-+ "2201W": "invalid_row_count_in_limit_clause",
-+ "2201X": "invalid_row_count_in_result_offset_clause",
-+ "22009": "invalid_time_zone_displacement_value",
-+ "2200C": "invalid_use_of_escape_character",
-+ "2200G": "most_specific_type_mismatch",
-+ "22004": "null_value_not_allowed",
-+ "22002": "null_value_no_indicator_parameter",
-+ "22003": "numeric_value_out_of_range",
-+ "2200H": "sequence_generator_limit_exceeded",
-+ "22026": "string_data_length_mismatch",
-+ "22001": "string_data_right_truncation",
-+ "22011": "substring_error",
-+ "22027": "trim_error",
-+ "22024": "unterminated_c_string",
-+ "2200F": "zero_length_character_string",
-+ "22P01": "floating_point_exception",
-+ "22P02": "invalid_text_representation",
-+ "22P03": "invalid_binary_representation",
-+ "22P04": "bad_copy_file_format",
-+ "22P05": "untranslatable_character",
-+ "2200L": "not_an_xml_document",
-+ "2200M": "invalid_xml_document",
-+ "2200N": "invalid_xml_content",
-+ "2200S": "invalid_xml_comment",
-+ "2200T": "invalid_xml_processing_instruction",
-+ // Class 23 - Integrity Constraint Violation
-+ "23000": "integrity_constraint_violation",
-+ "23001": "restrict_violation",
-+ "23502": "not_null_violation",
-+ "23503": "foreign_key_violation",
-+ "23505": "unique_violation",
-+ "23514": "check_violation",
-+ "23P01": "exclusion_violation",
-+ // Class 24 - Invalid Cursor State
-+ "24000": "invalid_cursor_state",
-+ // Class 25 - Invalid Transaction State
-+ "25000": "invalid_transaction_state",
-+ "25001": "active_sql_transaction",
-+ "25002": "branch_transaction_already_active",
-+ "25008": "held_cursor_requires_same_isolation_level",
-+ "25003": "inappropriate_access_mode_for_branch_transaction",
-+ "25004": "inappropriate_isolation_level_for_branch_transaction",
-+ "25005": "no_active_sql_transaction_for_branch_transaction",
-+ "25006": "read_only_sql_transaction",
-+ "25007": "schema_and_data_statement_mixing_not_supported",
-+ "25P01": "no_active_sql_transaction",
-+ "25P02": "in_failed_sql_transaction",
-+ // Class 26 - Invalid SQL Statement Name
-+ "26000": "invalid_sql_statement_name",
-+ // Class 27 - Triggered Data Change Violation
-+ "27000": "triggered_data_change_violation",
-+ // Class 28 - Invalid Authorization Specification
-+ "28000": "invalid_authorization_specification",
-+ "28P01": "invalid_password",
-+ // Class 2B - Dependent Privilege Descriptors Still Exist
-+ "2B000": "dependent_privilege_descriptors_still_exist",
-+ "2BP01": "dependent_objects_still_exist",
-+ // Class 2D - Invalid Transaction Termination
-+ "2D000": "invalid_transaction_termination",
-+ // Class 2F - SQL Routine Exception
-+ "2F000": "sql_routine_exception",
-+ "2F005": "function_executed_no_return_statement",
-+ "2F002": "modifying_sql_data_not_permitted",
-+ "2F003": "prohibited_sql_statement_attempted",
-+ "2F004": "reading_sql_data_not_permitted",
-+ // Class 34 - Invalid Cursor Name
-+ "34000": "invalid_cursor_name",
-+ // Class 38 - External Routine Exception
-+ "38000": "external_routine_exception",
-+ "38001": "containing_sql_not_permitted",
-+ "38002": "modifying_sql_data_not_permitted",
-+ "38003": "prohibited_sql_statement_attempted",
-+ "38004": "reading_sql_data_not_permitted",
-+ // Class 39 - External Routine Invocation Exception
-+ "39000": "external_routine_invocation_exception",
-+ "39001": "invalid_sqlstate_returned",
-+ "39004": "null_value_not_allowed",
-+ "39P01": "trigger_protocol_violated",
-+ "39P02": "srf_protocol_violated",
-+ // Class 3B - Savepoint Exception
-+ "3B000": "savepoint_exception",
-+ "3B001": "invalid_savepoint_specification",
-+ // Class 3D - Invalid Catalog Name
-+ "3D000": "invalid_catalog_name",
-+ // Class 3F - Invalid Schema Name
-+ "3F000": "invalid_schema_name",
-+ // Class 40 - Transaction Rollback
-+ "40000": "transaction_rollback",
-+ "40002": "transaction_integrity_constraint_violation",
-+ "40001": "serialization_failure",
-+ "40003": "statement_completion_unknown",
-+ "40P01": "deadlock_detected",
-+ // Class 42 - Syntax Error or Access Rule Violation
-+ "42000": "syntax_error_or_access_rule_violation",
-+ "42601": "syntax_error",
-+ "42501": "insufficient_privilege",
-+ "42846": "cannot_coerce",
-+ "42803": "grouping_error",
-+ "42P20": "windowing_error",
-+ "42P19": "invalid_recursion",
-+ "42830": "invalid_foreign_key",
-+ "42602": "invalid_name",
-+ "42622": "name_too_long",
-+ "42939": "reserved_name",
-+ "42804": "datatype_mismatch",
-+ "42P18": "indeterminate_datatype",
-+ "42P21": "collation_mismatch",
-+ "42P22": "indeterminate_collation",
-+ "42809": "wrong_object_type",
-+ "42703": "undefined_column",
-+ "42883": "undefined_function",
-+ "42P01": "undefined_table",
-+ "42P02": "undefined_parameter",
-+ "42704": "undefined_object",
-+ "42701": "duplicate_column",
-+ "42P03": "duplicate_cursor",
-+ "42P04": "duplicate_database",
-+ "42723": "duplicate_function",
-+ "42P05": "duplicate_prepared_statement",
-+ "42P06": "duplicate_schema",
-+ "42P07": "duplicate_table",
-+ "42712": "duplicate_alias",
-+ "42710": "duplicate_object",
-+ "42702": "ambiguous_column",
-+ "42725": "ambiguous_function",
-+ "42P08": "ambiguous_parameter",
-+ "42P09": "ambiguous_alias",
-+ "42P10": "invalid_column_reference",
-+ "42611": "invalid_column_definition",
-+ "42P11": "invalid_cursor_definition",
-+ "42P12": "invalid_database_definition",
-+ "42P13": "invalid_function_definition",
-+ "42P14": "invalid_prepared_statement_definition",
-+ "42P15": "invalid_schema_definition",
-+ "42P16": "invalid_table_definition",
-+ "42P17": "invalid_object_definition",
-+ // Class 44 - WITH CHECK OPTION Violation
-+ "44000": "with_check_option_violation",
-+ // Class 53 - Insufficient Resources
-+ "53000": "insufficient_resources",
-+ "53100": "disk_full",
-+ "53200": "out_of_memory",
-+ "53300": "too_many_connections",
-+ "53400": "configuration_limit_exceeded",
-+ // Class 54 - Program Limit Exceeded
-+ "54000": "program_limit_exceeded",
-+ "54001": "statement_too_complex",
-+ "54011": "too_many_columns",
-+ "54023": "too_many_arguments",
-+ // Class 55 - Object Not In Prerequisite State
-+ "55000": "object_not_in_prerequisite_state",
-+ "55006": "object_in_use",
-+ "55P02": "cant_change_runtime_param",
-+ "55P03": "lock_not_available",
-+ // Class 57 - Operator Intervention
-+ "57000": "operator_intervention",
-+ "57014": "query_canceled",
-+ "57P01": "admin_shutdown",
-+ "57P02": "crash_shutdown",
-+ "57P03": "cannot_connect_now",
-+ "57P04": "database_dropped",
-+ // Class 58 - System Error (errors external to PostgreSQL itself)
-+ "58000": "system_error",
-+ "58030": "io_error",
-+ "58P01": "undefined_file",
-+ "58P02": "duplicate_file",
-+ // Class F0 - Configuration File Error
-+ "F0000": "config_file_error",
-+ "F0001": "lock_file_exists",
-+ // Class HV - Foreign Data Wrapper Error (SQL/MED)
-+ "HV000": "fdw_error",
-+ "HV005": "fdw_column_name_not_found",
-+ "HV002": "fdw_dynamic_parameter_value_needed",
-+ "HV010": "fdw_function_sequence_error",
-+ "HV021": "fdw_inconsistent_descriptor_information",
-+ "HV024": "fdw_invalid_attribute_value",
-+ "HV007": "fdw_invalid_column_name",
-+ "HV008": "fdw_invalid_column_number",
-+ "HV004": "fdw_invalid_data_type",
-+ "HV006": "fdw_invalid_data_type_descriptors",
-+ "HV091": "fdw_invalid_descriptor_field_identifier",
-+ "HV00B": "fdw_invalid_handle",
-+ "HV00C": "fdw_invalid_option_index",
-+ "HV00D": "fdw_invalid_option_name",
-+ "HV090": "fdw_invalid_string_length_or_buffer_length",
-+ "HV00A": "fdw_invalid_string_format",
-+ "HV009": "fdw_invalid_use_of_null_pointer",
-+ "HV014": "fdw_too_many_handles",
-+ "HV001": "fdw_out_of_memory",
-+ "HV00P": "fdw_no_schemas",
-+ "HV00J": "fdw_option_name_not_found",
-+ "HV00K": "fdw_reply_handle",
-+ "HV00Q": "fdw_schema_not_found",
-+ "HV00R": "fdw_table_not_found",
-+ "HV00L": "fdw_unable_to_create_execution",
-+ "HV00M": "fdw_unable_to_create_reply",
-+ "HV00N": "fdw_unable_to_establish_connection",
-+ // Class P0 - PL/pgSQL Error
-+ "P0000": "plpgsql_error",
-+ "P0001": "raise_exception",
-+ "P0002": "no_data_found",
-+ "P0003": "too_many_rows",
-+ // Class XX - Internal Error
-+ "XX000": "internal_error",
-+ "XX001": "data_corrupted",
-+ "XX002": "index_corrupted",
-+}
-+
-+func parseError(r *readBuf) *Error {
-+ err := new(Error)
-+ for t := r.byte(); t != 0; t = r.byte() {
-+ msg := r.string()
-+ switch t {
-+ case 'S':
-+ err.Severity = msg
-+ case 'C':
-+ err.Code = ErrorCode(msg)
-+ case 'M':
-+ err.Message = msg
-+ case 'D':
-+ err.Detail = msg
-+ case 'H':
-+ err.Hint = msg
-+ case 'P':
-+ err.Position = msg
-+ case 'p':
-+ err.InternalPosition = msg
-+ case 'q':
-+ err.InternalQuery = msg
-+ case 'W':
-+ err.Where = msg
-+ case 's':
-+ err.Schema = msg
-+ case 't':
-+ err.Table = msg
-+ case 'c':
-+ err.Column = msg
-+ case 'd':
-+ err.DataTypeName = msg
-+ case 'n':
-+ err.Constraint = msg
-+ case 'F':
-+ err.File = msg
-+ case 'L':
-+ err.Line = msg
-+ case 'R':
-+ err.Routine = msg
-+ }
-+ }
-+ return err
-+}
-+
-+// Fatal returns true if the Error Severity is fatal.
-+func (err *Error) Fatal() bool {
-+ return err.Severity == Efatal
-+}
-+
-+// Get implements the legacy PGError interface. New code should use the fields
-+// of the Error struct directly.
-+func (err *Error) Get(k byte) (v string) {
-+ switch k {
-+ case 'S':
-+ return err.Severity
-+ case 'C':
-+ return string(err.Code)
-+ case 'M':
-+ return err.Message
-+ case 'D':
-+ return err.Detail
-+ case 'H':
-+ return err.Hint
-+ case 'P':
-+ return err.Position
-+ case 'p':
-+ return err.InternalPosition
-+ case 'q':
-+ return err.InternalQuery
-+ case 'W':
-+ return err.Where
-+ case 's':
-+ return err.Schema
-+ case 't':
-+ return err.Table
-+ case 'c':
-+ return err.Column
-+ case 'd':
-+ return err.DataTypeName
-+ case 'n':
-+ return err.Constraint
-+ case 'F':
-+ return err.File
-+ case 'L':
-+ return err.Line
-+ case 'R':
-+ return err.Routine
-+ }
-+ return ""
-+}
-+
-+func (err Error) Error() string {
-+ return "pq: " + err.Message
-+}
-+
-+// PGError is an interface used by previous versions of pq. It is provided
-+// only to support legacy code. New code should use the Error type.
-+type PGError interface {
-+ Error() string
-+ Fatal() bool
-+ Get(k byte) (v string)
-+}
-+
-+func errorf(s string, args ...interface{}) {
-+ panic(fmt.Errorf("pq: %s", fmt.Sprintf(s, args...)))
-+}
-+
-+// TODO(ainar-g) Rename to errorf after removing panics.
-+func fmterrorf(s string, args ...interface{}) error {
-+ return fmt.Errorf("pq: %s", fmt.Sprintf(s, args...))
-+}
-+
-+func errRecoverNoErrBadConn(err *error) {
-+ e := recover()
-+ if e == nil {
-+ // Do nothing
-+ return
-+ }
-+ var ok bool
-+ *err, ok = e.(error)
-+ if !ok {
-+ *err = fmt.Errorf("pq: unexpected error: %#v", e)
-+ }
-+}
-+
-+func (cn *conn) errRecover(err *error) {
-+ e := recover()
-+ switch v := e.(type) {
-+ case nil:
-+ // Do nothing
-+ case runtime.Error:
-+ cn.bad = true
-+ panic(v)
-+ case *Error:
-+ if v.Fatal() {
-+ *err = driver.ErrBadConn
-+ } else {
-+ *err = v
-+ }
-+ case *net.OpError:
-+ cn.bad = true
-+ *err = v
-+ case error:
-+ if v == io.EOF || v.(error).Error() == "remote error: handshake failure" {
-+ *err = driver.ErrBadConn
-+ } else {
-+ *err = v
-+ }
-+
-+ default:
-+ cn.bad = true
-+ panic(fmt.Sprintf("unknown error: %#v", e))
-+ }
-+
-+ // Any time we return ErrBadConn, we need to remember it since *Tx doesn't
-+ // mark the connection bad in database/sql.
-+ if *err == driver.ErrBadConn {
-+ cn.bad = true
-+ }
-+}
-diff --git a/vendor/github.com/lib/pq/example/listen/doc.go b/vendor/github.com/lib/pq/example/listen/doc.go
-new file mode 100644
-index 00000000000..91e2ddbaddc
---- /dev/null
-+++ b/vendor/github.com/lib/pq/example/listen/doc.go
-@@ -0,0 +1,98 @@
-+/*
-+
-+Package listen is a self-contained Go program which uses the LISTEN / NOTIFY
-+mechanism to avoid polling the database while waiting for more work to arrive.
-+
-+ //
-+ // You can see the program in action by defining a function similar to
-+ // the following:
-+ //
-+ // CREATE OR REPLACE FUNCTION public.get_work()
-+ // RETURNS bigint
-+ // LANGUAGE sql
-+ // AS $$
-+ // SELECT CASE WHEN random() >= 0.2 THEN int8 '1' END
-+ // $$
-+ // ;
-+
-+ package main
-+
-+ import (
-+ "database/sql"
-+ "fmt"
-+ "time"
-+
-+ "github.com/lib/pq"
-+ )
-+
-+ func doWork(db *sql.DB, work int64) {
-+ // work here
-+ }
-+
-+ func getWork(db *sql.DB) {
-+ for {
-+ // get work from the database here
-+ var work sql.NullInt64
-+ err := db.QueryRow("SELECT get_work()").Scan(&work)
-+ if err != nil {
-+ fmt.Println("call to get_work() failed: ", err)
-+ time.Sleep(10 * time.Second)
-+ continue
-+ }
-+ if !work.Valid {
-+ // no more work to do
-+ fmt.Println("ran out of work")
-+ return
-+ }
-+
-+ fmt.Println("starting work on ", work.Int64)
-+ go doWork(db, work.Int64)
-+ }
-+ }
-+
-+ func waitForNotification(l *pq.Listener) {
-+ select {
-+ case <-l.Notify:
-+ fmt.Println("received notification, new work available")
-+ case <-time.After(90 * time.Second):
-+ go l.Ping()
-+ // Check if there's more work available, just in case it takes
-+ // a while for the Listener to notice connection loss and
-+ // reconnect.
-+ fmt.Println("received no work for 90 seconds, checking for new work")
-+ }
-+ }
-+
-+ func main() {
-+ var conninfo string = ""
-+
-+ db, err := sql.Open("postgres", conninfo)
-+ if err != nil {
-+ panic(err)
-+ }
-+
-+ reportProblem := func(ev pq.ListenerEventType, err error) {
-+ if err != nil {
-+ fmt.Println(err.Error())
-+ }
-+ }
-+
-+ minReconn := 10 * time.Second
-+ maxReconn := time.Minute
-+ listener := pq.NewListener(conninfo, minReconn, maxReconn, reportProblem)
-+ err = listener.Listen("getwork")
-+ if err != nil {
-+ panic(err)
-+ }
-+
-+ fmt.Println("entering main loop")
-+ for {
-+ // process all available work before waiting for notifications
-+ getWork(db)
-+ waitForNotification(listener)
-+ }
-+ }
-+
-+
-+*/
-+package listen
-diff --git a/vendor/github.com/lib/pq/go.mod b/vendor/github.com/lib/pq/go.mod
-new file mode 100644
-index 00000000000..edf0b343fd1
---- /dev/null
-+++ b/vendor/github.com/lib/pq/go.mod
-@@ -0,0 +1 @@
-+module github.com/lib/pq
-diff --git a/vendor/github.com/lib/pq/go18_test.go b/vendor/github.com/lib/pq/go18_test.go
-new file mode 100644
-index 00000000000..72cd71fe986
---- /dev/null
-+++ b/vendor/github.com/lib/pq/go18_test.go
-@@ -0,0 +1,319 @@
-+package pq
-+
-+import (
-+ "context"
-+ "database/sql"
-+ "runtime"
-+ "strings"
-+ "testing"
-+ "time"
-+)
-+
-+func TestMultipleSimpleQuery(t *testing.T) {
-+ db := openTestConn(t)
-+ defer db.Close()
-+
-+ rows, err := db.Query("select 1; set time zone default; select 2; select 3")
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+ defer rows.Close()
-+
-+ var i int
-+ for rows.Next() {
-+ if err := rows.Scan(&i); err != nil {
-+ t.Fatal(err)
-+ }
-+ if i != 1 {
-+ t.Fatalf("expected 1, got %d", i)
-+ }
-+ }
-+ if !rows.NextResultSet() {
-+ t.Fatal("expected more result sets", rows.Err())
-+ }
-+ for rows.Next() {
-+ if err := rows.Scan(&i); err != nil {
-+ t.Fatal(err)
-+ }
-+ if i != 2 {
-+ t.Fatalf("expected 2, got %d", i)
-+ }
-+ }
-+
-+ // Make sure that if we ignore a result we can still query.
-+
-+ rows, err = db.Query("select 4; select 5")
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+ defer rows.Close()
-+
-+ for rows.Next() {
-+ if err := rows.Scan(&i); err != nil {
-+ t.Fatal(err)
-+ }
-+ if i != 4 {
-+ t.Fatalf("expected 4, got %d", i)
-+ }
-+ }
-+ if !rows.NextResultSet() {
-+ t.Fatal("expected more result sets", rows.Err())
-+ }
-+ for rows.Next() {
-+ if err := rows.Scan(&i); err != nil {
-+ t.Fatal(err)
-+ }
-+ if i != 5 {
-+ t.Fatalf("expected 5, got %d", i)
-+ }
-+ }
-+ if rows.NextResultSet() {
-+ t.Fatal("unexpected result set")
-+ }
-+}
-+
-+const contextRaceIterations = 100
-+
-+func TestContextCancelExec(t *testing.T) {
-+ db := openTestConn(t)
-+ defer db.Close()
-+
-+ ctx, cancel := context.WithCancel(context.Background())
-+
-+ // Delay execution for just a bit until db.ExecContext has begun.
-+ defer time.AfterFunc(time.Millisecond*10, cancel).Stop()
-+
-+ // Not canceled until after the exec has started.
-+ if _, err := db.ExecContext(ctx, "select pg_sleep(1)"); err == nil {
-+ t.Fatal("expected error")
-+ } else if err.Error() != "pq: canceling statement due to user request" {
-+ t.Fatalf("unexpected error: %s", err)
-+ }
-+
-+ // Context is already canceled, so error should come before execution.
-+ if _, err := db.ExecContext(ctx, "select pg_sleep(1)"); err == nil {
-+ t.Fatal("expected error")
-+ } else if err.Error() != "context canceled" {
-+ t.Fatalf("unexpected error: %s", err)
-+ }
-+
-+ for i := 0; i < contextRaceIterations; i++ {
-+ func() {
-+ ctx, cancel := context.WithCancel(context.Background())
-+ defer cancel()
-+ if _, err := db.ExecContext(ctx, "select 1"); err != nil {
-+ t.Fatal(err)
-+ }
-+ }()
-+
-+ if _, err := db.Exec("select 1"); err != nil {
-+ t.Fatal(err)
-+ }
-+ }
-+}
-+
-+func TestContextCancelQuery(t *testing.T) {
-+ db := openTestConn(t)
-+ defer db.Close()
-+
-+ ctx, cancel := context.WithCancel(context.Background())
-+
-+ // Delay execution for just a bit until db.QueryContext has begun.
-+ defer time.AfterFunc(time.Millisecond*10, cancel).Stop()
-+
-+ // Not canceled until after the exec has started.
-+ if _, err := db.QueryContext(ctx, "select pg_sleep(1)"); err == nil {
-+ t.Fatal("expected error")
-+ } else if err.Error() != "pq: canceling statement due to user request" {
-+ t.Fatalf("unexpected error: %s", err)
-+ }
-+
-+ // Context is already canceled, so error should come before execution.
-+ if _, err := db.QueryContext(ctx, "select pg_sleep(1)"); err == nil {
-+ t.Fatal("expected error")
-+ } else if err.Error() != "context canceled" {
-+ t.Fatalf("unexpected error: %s", err)
-+ }
-+
-+ for i := 0; i < contextRaceIterations; i++ {
-+ func() {
-+ ctx, cancel := context.WithCancel(context.Background())
-+ rows, err := db.QueryContext(ctx, "select 1")
-+ cancel()
-+ if err != nil {
-+ t.Fatal(err)
-+ } else if err := rows.Close(); err != nil {
-+ t.Fatal(err)
-+ }
-+ }()
-+
-+ if rows, err := db.Query("select 1"); err != nil {
-+ t.Fatal(err)
-+ } else if err := rows.Close(); err != nil {
-+ t.Fatal(err)
-+ }
-+ }
-+}
-+
-+// TestIssue617 tests that a failed query in QueryContext doesn't lead to a
-+// goroutine leak.
-+func TestIssue617(t *testing.T) {
-+ db := openTestConn(t)
-+ defer db.Close()
-+
-+ const N = 10
-+
-+ numGoroutineStart := runtime.NumGoroutine()
-+ for i := 0; i < N; i++ {
-+ func() {
-+ ctx, cancel := context.WithCancel(context.Background())
-+ defer cancel()
-+ _, err := db.QueryContext(ctx, `SELECT * FROM DOESNOTEXIST`)
-+ pqErr, _ := err.(*Error)
-+ // Expecting "pq: relation \"doesnotexist\" does not exist" error.
-+ if err == nil || pqErr == nil || pqErr.Code != "42P01" {
-+ t.Fatalf("expected undefined table error, got %v", err)
-+ }
-+ }()
-+ }
-+ numGoroutineFinish := runtime.NumGoroutine()
-+
-+ // We use N/2 and not N because the GC and other actors may increase or
-+ // decrease the number of goroutines.
-+ if numGoroutineFinish-numGoroutineStart >= N/2 {
-+ t.Errorf("goroutine leak detected, was %d, now %d", numGoroutineStart, numGoroutineFinish)
-+ }
-+}
-+
-+func TestContextCancelBegin(t *testing.T) {
-+ db := openTestConn(t)
-+ defer db.Close()
-+
-+ ctx, cancel := context.WithCancel(context.Background())
-+ tx, err := db.BeginTx(ctx, nil)
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+
-+ // Delay execution for just a bit until tx.Exec has begun.
-+ defer time.AfterFunc(time.Millisecond*10, cancel).Stop()
-+
-+ // Not canceled until after the exec has started.
-+ if _, err := tx.Exec("select pg_sleep(1)"); err == nil {
-+ t.Fatal("expected error")
-+ } else if err.Error() != "pq: canceling statement due to user request" {
-+ t.Fatalf("unexpected error: %s", err)
-+ }
-+
-+ // Transaction is canceled, so expect an error.
-+ if _, err := tx.Query("select pg_sleep(1)"); err == nil {
-+ t.Fatal("expected error")
-+ } else if err != sql.ErrTxDone {
-+ t.Fatalf("unexpected error: %s", err)
-+ }
-+
-+ // Context is canceled, so cannot begin a transaction.
-+ if _, err := db.BeginTx(ctx, nil); err == nil {
-+ t.Fatal("expected error")
-+ } else if err.Error() != "context canceled" {
-+ t.Fatalf("unexpected error: %s", err)
-+ }
-+
-+ for i := 0; i < contextRaceIterations; i++ {
-+ func() {
-+ ctx, cancel := context.WithCancel(context.Background())
-+ tx, err := db.BeginTx(ctx, nil)
-+ cancel()
-+ if err != nil {
-+ t.Fatal(err)
-+ } else if err := tx.Rollback(); err != nil &&
-+ err.Error() != "pq: canceling statement due to user request" &&
-+ err != sql.ErrTxDone {
-+ t.Fatal(err)
-+ }
-+ }()
-+
-+ if tx, err := db.Begin(); err != nil {
-+ t.Fatal(err)
-+ } else if err := tx.Rollback(); err != nil {
-+ t.Fatal(err)
-+ }
-+ }
-+}
-+
-+func TestTxOptions(t *testing.T) {
-+ db := openTestConn(t)
-+ defer db.Close()
-+ ctx := context.Background()
-+
-+ tests := []struct {
-+ level sql.IsolationLevel
-+ isolation string
-+ }{
-+ {
-+ level: sql.LevelDefault,
-+ isolation: "",
-+ },
-+ {
-+ level: sql.LevelReadUncommitted,
-+ isolation: "read uncommitted",
-+ },
-+ {
-+ level: sql.LevelReadCommitted,
-+ isolation: "read committed",
-+ },
-+ {
-+ level: sql.LevelRepeatableRead,
-+ isolation: "repeatable read",
-+ },
-+ {
-+ level: sql.LevelSerializable,
-+ isolation: "serializable",
-+ },
-+ }
-+
-+ for _, test := range tests {
-+ for _, ro := range []bool{true, false} {
-+ tx, err := db.BeginTx(ctx, &sql.TxOptions{
-+ Isolation: test.level,
-+ ReadOnly: ro,
-+ })
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+
-+ var isolation string
-+ err = tx.QueryRow("select current_setting('transaction_isolation')").Scan(&isolation)
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+
-+ if test.isolation != "" && isolation != test.isolation {
-+ t.Errorf("wrong isolation level: %s != %s", isolation, test.isolation)
-+ }
-+
-+ var isRO string
-+ err = tx.QueryRow("select current_setting('transaction_read_only')").Scan(&isRO)
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+
-+ if ro != (isRO == "on") {
-+ t.Errorf("read/[write,only] not set: %t != %s for level %s",
-+ ro, isRO, test.isolation)
-+ }
-+
-+ tx.Rollback()
-+ }
-+ }
-+
-+ _, err := db.BeginTx(ctx, &sql.TxOptions{
-+ Isolation: sql.LevelLinearizable,
-+ })
-+ if err == nil {
-+ t.Fatal("expected LevelLinearizable to fail")
-+ }
-+ if !strings.Contains(err.Error(), "isolation level not supported") {
-+ t.Errorf("Expected error to mention isolation level, got %q", err)
-+ }
-+}
-diff --git a/vendor/github.com/lib/pq/go19_test.go b/vendor/github.com/lib/pq/go19_test.go
-new file mode 100644
-index 00000000000..59c57fd4d71
---- /dev/null
-+++ b/vendor/github.com/lib/pq/go19_test.go
-@@ -0,0 +1,91 @@
-+// +build go1.9
-+
-+package pq
-+
-+import (
-+ "context"
-+ "database/sql"
-+ "database/sql/driver"
-+ "reflect"
-+ "testing"
-+)
-+
-+func TestPing(t *testing.T) {
-+ ctx, cancel := context.WithCancel(context.Background())
-+ db := openTestConn(t)
-+ defer db.Close()
-+
-+ if _, ok := reflect.TypeOf(db).MethodByName("Conn"); !ok {
-+ t.Skipf("Conn method undefined on type %T, skipping test (requires at least go1.9)", db)
-+ }
-+
-+ if err := db.PingContext(ctx); err != nil {
-+ t.Fatal("expected Ping to succeed")
-+ }
-+ defer cancel()
-+
-+ // grab a connection
-+ conn, err := db.Conn(ctx)
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+
-+ // start a transaction and read backend pid of our connection
-+ tx, err := conn.BeginTx(ctx, &sql.TxOptions{
-+ Isolation: sql.LevelDefault,
-+ ReadOnly: true,
-+ })
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+
-+ rows, err := tx.Query("SELECT pg_backend_pid()")
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+ defer rows.Close()
-+
-+ // read the pid from result
-+ var pid int
-+ for rows.Next() {
-+ if err := rows.Scan(&pid); err != nil {
-+ t.Fatal(err)
-+ }
-+ }
-+ if rows.Err() != nil {
-+ t.Fatal(err)
-+ }
-+ if err := tx.Rollback(); err != nil {
-+ t.Fatal(err)
-+ }
-+
-+ // kill the process which handles our connection and test if the ping fails
-+ if _, err := db.Exec("SELECT pg_terminate_backend($1)", pid); err != nil {
-+ t.Fatal(err)
-+ }
-+ if err := conn.PingContext(ctx); err != driver.ErrBadConn {
-+ t.Fatalf("expected error %s, instead got %s", driver.ErrBadConn, err)
-+ }
-+}
-+
-+func TestCommitInFailedTransactionWithCancelContext(t *testing.T) {
-+ db := openTestConn(t)
-+ defer db.Close()
-+
-+ ctx, cancel := context.WithCancel(context.Background())
-+ defer cancel()
-+
-+ txn, err := db.BeginTx(ctx, nil)
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+ rows, err := txn.Query("SELECT error")
-+ if err == nil {
-+ rows.Close()
-+ t.Fatal("expected failure")
-+ }
-+ err = txn.Commit()
-+ if err != ErrInFailedTransaction {
-+ t.Fatalf("expected ErrInFailedTransaction; got %#v", err)
-+ }
-+}
-diff --git a/vendor/github.com/lib/pq/hstore/hstore.go b/vendor/github.com/lib/pq/hstore/hstore.go
-new file mode 100644
-index 00000000000..f1470db1406
---- /dev/null
-+++ b/vendor/github.com/lib/pq/hstore/hstore.go
-@@ -0,0 +1,118 @@
-+package hstore
-+
-+import (
-+ "database/sql"
-+ "database/sql/driver"
-+ "strings"
-+)
-+
-+// Hstore is a wrapper for transferring Hstore values back and forth easily.
-+type Hstore struct {
-+ Map map[string]sql.NullString
-+}
-+
-+// escapes and quotes hstore keys/values
-+// s should be a sql.NullString or string
-+func hQuote(s interface{}) string {
-+ var str string
-+ switch v := s.(type) {
-+ case sql.NullString:
-+ if !v.Valid {
-+ return "NULL"
-+ }
-+ str = v.String
-+ case string:
-+ str = v
-+ default:
-+ panic("not a string or sql.NullString")
-+ }
-+
-+ str = strings.Replace(str, "\\", "\\\\", -1)
-+ return `"` + strings.Replace(str, "\"", "\\\"", -1) + `"`
-+}
-+
-+// Scan implements the Scanner interface.
-+//
-+// Note h.Map is reallocated before the scan to clear existing values. If the
-+// hstore column's database value is NULL, then h.Map is set to nil instead.
-+func (h *Hstore) Scan(value interface{}) error {
-+ if value == nil {
-+ h.Map = nil
-+ return nil
-+ }
-+ h.Map = make(map[string]sql.NullString)
-+ var b byte
-+ pair := [][]byte{{}, {}}
-+ pi := 0
-+ inQuote := false
-+ didQuote := false
-+ sawSlash := false
-+ bindex := 0
-+ for bindex, b = range value.([]byte) {
-+ if sawSlash {
-+ pair[pi] = append(pair[pi], b)
-+ sawSlash = false
-+ continue
-+ }
-+
-+ switch b {
-+ case '\\':
-+ sawSlash = true
-+ continue
-+ case '"':
-+ inQuote = !inQuote
-+ if !didQuote {
-+ didQuote = true
-+ }
-+ continue
-+ default:
-+ if !inQuote {
-+ switch b {
-+ case ' ', '\t', '\n', '\r':
-+ continue
-+ case '=':
-+ continue
-+ case '>':
-+ pi = 1
-+ didQuote = false
-+ continue
-+ case ',':
-+ s := string(pair[1])
-+ if !didQuote && len(s) == 4 && strings.ToLower(s) == "null" {
-+ h.Map[string(pair[0])] = sql.NullString{String: "", Valid: false}
-+ } else {
-+ h.Map[string(pair[0])] = sql.NullString{String: string(pair[1]), Valid: true}
-+ }
-+ pair[0] = []byte{}
-+ pair[1] = []byte{}
-+ pi = 0
-+ continue
-+ }
-+ }
-+ }
-+ pair[pi] = append(pair[pi], b)
-+ }
-+ if bindex > 0 {
-+ s := string(pair[1])
-+ if !didQuote && len(s) == 4 && strings.ToLower(s) == "null" {
-+ h.Map[string(pair[0])] = sql.NullString{String: "", Valid: false}
-+ } else {
-+ h.Map[string(pair[0])] = sql.NullString{String: string(pair[1]), Valid: true}
-+ }
-+ }
-+ return nil
-+}
-+
-+// Value implements the driver Valuer interface. Note if h.Map is nil, the
-+// database column value will be set to NULL.
-+func (h Hstore) Value() (driver.Value, error) {
-+ if h.Map == nil {
-+ return nil, nil
-+ }
-+ parts := []string{}
-+ for key, val := range h.Map {
-+ thispart := hQuote(key) + "=>" + hQuote(val)
-+ parts = append(parts, thispart)
-+ }
-+ return []byte(strings.Join(parts, ",")), nil
-+}
-diff --git a/vendor/github.com/lib/pq/hstore/hstore_test.go b/vendor/github.com/lib/pq/hstore/hstore_test.go
-new file mode 100644
-index 00000000000..0d361ae9445
---- /dev/null
-+++ b/vendor/github.com/lib/pq/hstore/hstore_test.go
-@@ -0,0 +1,148 @@
-+package hstore
-+
-+import (
-+ "database/sql"
-+ "os"
-+ "testing"
-+
-+ _ "github.com/lib/pq"
-+)
-+
-+type Fatalistic interface {
-+ Fatal(args ...interface{})
-+}
-+
-+func openTestConn(t Fatalistic) *sql.DB {
-+ datname := os.Getenv("PGDATABASE")
-+ sslmode := os.Getenv("PGSSLMODE")
-+
-+ if datname == "" {
-+ os.Setenv("PGDATABASE", "pqgotest")
-+ }
-+
-+ if sslmode == "" {
-+ os.Setenv("PGSSLMODE", "disable")
-+ }
-+
-+ conn, err := sql.Open("postgres", "")
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+
-+ return conn
-+}
-+
-+func TestHstore(t *testing.T) {
-+ db := openTestConn(t)
-+ defer db.Close()
-+
-+ // quietly create hstore if it doesn't exist
-+ _, err := db.Exec("CREATE EXTENSION IF NOT EXISTS hstore")
-+ if err != nil {
-+ t.Skipf("Skipping hstore tests - hstore extension create failed: %s", err.Error())
-+ }
-+
-+ hs := Hstore{}
-+
-+ // test for null-valued hstores
-+ err = db.QueryRow("SELECT NULL::hstore").Scan(&hs)
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+ if hs.Map != nil {
-+ t.Fatalf("expected null map")
-+ }
-+
-+ err = db.QueryRow("SELECT $1::hstore", hs).Scan(&hs)
-+ if err != nil {
-+ t.Fatalf("re-query null map failed: %s", err.Error())
-+ }
-+ if hs.Map != nil {
-+ t.Fatalf("expected null map")
-+ }
-+
-+ // test for empty hstores
-+ err = db.QueryRow("SELECT ''::hstore").Scan(&hs)
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+ if hs.Map == nil {
-+ t.Fatalf("expected empty map, got null map")
-+ }
-+ if len(hs.Map) != 0 {
-+ t.Fatalf("expected empty map, got len(map)=%d", len(hs.Map))
-+ }
-+
-+ err = db.QueryRow("SELECT $1::hstore", hs).Scan(&hs)
-+ if err != nil {
-+ t.Fatalf("re-query empty map failed: %s", err.Error())
-+ }
-+ if hs.Map == nil {
-+ t.Fatalf("expected empty map, got null map")
-+ }
-+ if len(hs.Map) != 0 {
-+ t.Fatalf("expected empty map, got len(map)=%d", len(hs.Map))
-+ }
-+
-+ // a few example maps to test out
-+ hsOnePair := Hstore{
-+ Map: map[string]sql.NullString{
-+ "key1": {String: "value1", Valid: true},
-+ },
-+ }
-+
-+ hsThreePairs := Hstore{
-+ Map: map[string]sql.NullString{
-+ "key1": {String: "value1", Valid: true},
-+ "key2": {String: "value2", Valid: true},
-+ "key3": {String: "value3", Valid: true},
-+ },
-+ }
-+
-+ hsSmorgasbord := Hstore{
-+ Map: map[string]sql.NullString{
-+ "nullstring": {String: "NULL", Valid: true},
-+ "actuallynull": {String: "", Valid: false},
-+ "NULL": {String: "NULL string key", Valid: true},
-+ "withbracket": {String: "value>42", Valid: true},
-+ "withequal": {String: "value=42", Valid: true},
-+ `"withquotes1"`: {String: `this "should" be fine`, Valid: true},
-+ `"withquotes"2"`: {String: `this "should\" also be fine`, Valid: true},
-+ "embedded1": {String: "value1=>x1", Valid: true},
-+ "embedded2": {String: `"value2"=>x2`, Valid: true},
-+ "withnewlines": {String: "\n\nvalue\t=>2", Valid: true},
-+ "<>": {String: `this, "should,\" also, => be fine`, Valid: true},
-+ },
-+ }
-+
-+ // test encoding in query params, then decoding during Scan
-+ testBidirectional := func(h Hstore) {
-+ err = db.QueryRow("SELECT $1::hstore", h).Scan(&hs)
-+ if err != nil {
-+ t.Fatalf("re-query %d-pair map failed: %s", len(h.Map), err.Error())
-+ }
-+ if hs.Map == nil {
-+ t.Fatalf("expected %d-pair map, got null map", len(h.Map))
-+ }
-+ if len(hs.Map) != len(h.Map) {
-+ t.Fatalf("expected %d-pair map, got len(map)=%d", len(h.Map), len(hs.Map))
-+ }
-+
-+ for key, val := range hs.Map {
-+ otherval, found := h.Map[key]
-+ if !found {
-+ t.Fatalf(" key '%v' not found in %d-pair map", key, len(h.Map))
-+ }
-+ if otherval.Valid != val.Valid {
-+ t.Fatalf(" value %v <> %v in %d-pair map", otherval, val, len(h.Map))
-+ }
-+ if otherval.String != val.String {
-+ t.Fatalf(" value '%v' <> '%v' in %d-pair map", otherval.String, val.String, len(h.Map))
-+ }
-+ }
-+ }
-+
-+ testBidirectional(hsOnePair)
-+ testBidirectional(hsThreePairs)
-+ testBidirectional(hsSmorgasbord)
-+}
-diff --git a/vendor/github.com/lib/pq/issues_test.go b/vendor/github.com/lib/pq/issues_test.go
-new file mode 100644
-index 00000000000..3a330a0a915
---- /dev/null
-+++ b/vendor/github.com/lib/pq/issues_test.go
-@@ -0,0 +1,26 @@
-+package pq
-+
-+import "testing"
-+
-+func TestIssue494(t *testing.T) {
-+ db := openTestConn(t)
-+ defer db.Close()
-+
-+ query := `CREATE TEMP TABLE t (i INT PRIMARY KEY)`
-+ if _, err := db.Exec(query); err != nil {
-+ t.Fatal(err)
-+ }
-+
-+ txn, err := db.Begin()
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+
-+ if _, err := txn.Prepare(CopyIn("t", "i")); err != nil {
-+ t.Fatal(err)
-+ }
-+
-+ if _, err := txn.Query("SELECT 1"); err == nil {
-+ t.Fatal("expected error")
-+ }
-+}
-diff --git a/vendor/github.com/lib/pq/notify.go b/vendor/github.com/lib/pq/notify.go
-new file mode 100644
-index 00000000000..850bb9040c3
---- /dev/null
-+++ b/vendor/github.com/lib/pq/notify.go
-@@ -0,0 +1,797 @@
-+package pq
-+
-+// Package pq is a pure Go Postgres driver for the database/sql package.
-+// This module contains support for Postgres LISTEN/NOTIFY.
-+
-+import (
-+ "errors"
-+ "fmt"
-+ "sync"
-+ "sync/atomic"
-+ "time"
-+)
-+
-+// Notification represents a single notification from the database.
-+type Notification struct {
-+ // Process ID (PID) of the notifying postgres backend.
-+ BePid int
-+ // Name of the channel the notification was sent on.
-+ Channel string
-+ // Payload, or the empty string if unspecified.
-+ Extra string
-+}
-+
-+func recvNotification(r *readBuf) *Notification {
-+ bePid := r.int32()
-+ channel := r.string()
-+ extra := r.string()
-+
-+ return &Notification{bePid, channel, extra}
-+}
-+
-+const (
-+ connStateIdle int32 = iota
-+ connStateExpectResponse
-+ connStateExpectReadyForQuery
-+)
-+
-+type message struct {
-+ typ byte
-+ err error
-+}
-+
-+var errListenerConnClosed = errors.New("pq: ListenerConn has been closed")
-+
-+// ListenerConn is a low-level interface for waiting for notifications. You
-+// should use Listener instead.
-+type ListenerConn struct {
-+ // guards cn and err
-+ connectionLock sync.Mutex
-+ cn *conn
-+ err error
-+
-+ connState int32
-+
-+ // the sending goroutine will be holding this lock
-+ senderLock sync.Mutex
-+
-+ notificationChan chan<- *Notification
-+
-+ replyChan chan message
-+}
-+
-+// NewListenerConn creates a new ListenerConn. Use NewListener instead.
-+func NewListenerConn(name string, notificationChan chan<- *Notification) (*ListenerConn, error) {
-+ return newDialListenerConn(defaultDialer{}, name, notificationChan)
-+}
-+
-+func newDialListenerConn(d Dialer, name string, c chan<- *Notification) (*ListenerConn, error) {
-+ cn, err := DialOpen(d, name)
-+ if err != nil {
-+ return nil, err
-+ }
-+
-+ l := &ListenerConn{
-+ cn: cn.(*conn),
-+ notificationChan: c,
-+ connState: connStateIdle,
-+ replyChan: make(chan message, 2),
-+ }
-+
-+ go l.listenerConnMain()
-+
-+ return l, nil
-+}
-+
-+// We can only allow one goroutine at a time to be running a query on the
-+// connection for various reasons, so the goroutine sending on the connection
-+// must be holding senderLock.
-+//
-+// Returns an error if an unrecoverable error has occurred and the ListenerConn
-+// should be abandoned.
-+func (l *ListenerConn) acquireSenderLock() error {
-+ // we must acquire senderLock first to avoid deadlocks; see ExecSimpleQuery
-+ l.senderLock.Lock()
-+
-+ l.connectionLock.Lock()
-+ err := l.err
-+ l.connectionLock.Unlock()
-+ if err != nil {
-+ l.senderLock.Unlock()
-+ return err
-+ }
-+ return nil
-+}
-+
-+func (l *ListenerConn) releaseSenderLock() {
-+ l.senderLock.Unlock()
-+}
-+
-+// setState advances the protocol state to newState. Returns false if moving
-+// to that state from the current state is not allowed.
-+func (l *ListenerConn) setState(newState int32) bool {
-+ var expectedState int32
-+
-+ switch newState {
-+ case connStateIdle:
-+ expectedState = connStateExpectReadyForQuery
-+ case connStateExpectResponse:
-+ expectedState = connStateIdle
-+ case connStateExpectReadyForQuery:
-+ expectedState = connStateExpectResponse
-+ default:
-+ panic(fmt.Sprintf("unexpected listenerConnState %d", newState))
-+ }
-+
-+ return atomic.CompareAndSwapInt32(&l.connState, expectedState, newState)
-+}
-+
-+// Main logic is here: receive messages from the postgres backend, forward
-+// notifications and query replies and keep the internal state in sync with the
-+// protocol state. Returns when the connection has been lost, is about to go
-+// away or should be discarded because we couldn't agree on the state with the
-+// server backend.
-+func (l *ListenerConn) listenerConnLoop() (err error) {
-+ defer errRecoverNoErrBadConn(&err)
-+
-+ r := &readBuf{}
-+ for {
-+ t, err := l.cn.recvMessage(r)
-+ if err != nil {
-+ return err
-+ }
-+
-+ switch t {
-+ case 'A':
-+ // recvNotification copies all the data so we don't need to worry
-+ // about the scratch buffer being overwritten.
-+ l.notificationChan <- recvNotification(r)
-+
-+ case 'T', 'D':
-+ // only used by tests; ignore
-+
-+ case 'E':
-+ // We might receive an ErrorResponse even when not in a query; it
-+ // is expected that the server will close the connection after
-+ // that, but we should make sure that the error we display is the
-+ // one from the stray ErrorResponse, not io.ErrUnexpectedEOF.
-+ if !l.setState(connStateExpectReadyForQuery) {
-+ return parseError(r)
-+ }
-+ l.replyChan <- message{t, parseError(r)}
-+
-+ case 'C', 'I':
-+ if !l.setState(connStateExpectReadyForQuery) {
-+ // protocol out of sync
-+ return fmt.Errorf("unexpected CommandComplete")
-+ }
-+ // ExecSimpleQuery doesn't need to know about this message
-+
-+ case 'Z':
-+ if !l.setState(connStateIdle) {
-+ // protocol out of sync
-+ return fmt.Errorf("unexpected ReadyForQuery")
-+ }
-+ l.replyChan <- message{t, nil}
-+
-+ case 'N', 'S':
-+ // ignore
-+ default:
-+ return fmt.Errorf("unexpected message %q from server in listenerConnLoop", t)
-+ }
-+ }
-+}
-+
-+// This is the main routine for the goroutine receiving on the database
-+// connection. Most of the main logic is in listenerConnLoop.
-+func (l *ListenerConn) listenerConnMain() {
-+ err := l.listenerConnLoop()
-+
-+ // listenerConnLoop terminated; we're done, but we still have to clean up.
-+ // Make sure nobody tries to start any new queries by making sure the err
-+ // pointer is set. It is important that we do not overwrite its value; a
-+ // connection could be closed by either this goroutine or one sending on
-+ // the connection -- whoever closes the connection is assumed to have the
-+ // more meaningful error message (as the other one will probably get
-+ // net.errClosed), so that goroutine sets the error we expose while the
-+ // other error is discarded. If the connection is lost while two
-+ // goroutines are operating on the socket, it probably doesn't matter which
-+ // error we expose so we don't try to do anything more complex.
-+ l.connectionLock.Lock()
-+ if l.err == nil {
-+ l.err = err
-+ }
-+ l.cn.Close()
-+ l.connectionLock.Unlock()
-+
-+ // There might be a query in-flight; make sure nobody's waiting for a
-+ // response to it, since there's not going to be one.
-+ close(l.replyChan)
-+
-+ // let the listener know we're done
-+ close(l.notificationChan)
-+
-+ // this ListenerConn is done
-+}
-+
-+// Listen sends a LISTEN query to the server. See ExecSimpleQuery.
-+func (l *ListenerConn) Listen(channel string) (bool, error) {
-+ return l.ExecSimpleQuery("LISTEN " + QuoteIdentifier(channel))
-+}
-+
-+// Unlisten sends an UNLISTEN query to the server. See ExecSimpleQuery.
-+func (l *ListenerConn) Unlisten(channel string) (bool, error) {
-+ return l.ExecSimpleQuery("UNLISTEN " + QuoteIdentifier(channel))
-+}
-+
-+// UnlistenAll sends an `UNLISTEN *` query to the server. See ExecSimpleQuery.
-+func (l *ListenerConn) UnlistenAll() (bool, error) {
-+ return l.ExecSimpleQuery("UNLISTEN *")
-+}
-+
-+// Ping the remote server to make sure it's alive. Non-nil error means the
-+// connection has failed and should be abandoned.
-+func (l *ListenerConn) Ping() error {
-+ sent, err := l.ExecSimpleQuery("")
-+ if !sent {
-+ return err
-+ }
-+ if err != nil {
-+ // shouldn't happen
-+ panic(err)
-+ }
-+ return nil
-+}
-+
-+// Attempt to send a query on the connection. Returns an error if sending the
-+// query failed, and the caller should initiate closure of this connection.
-+// The caller must be holding senderLock (see acquireSenderLock and
-+// releaseSenderLock).
-+func (l *ListenerConn) sendSimpleQuery(q string) (err error) {
-+ defer errRecoverNoErrBadConn(&err)
-+
-+ // must set connection state before sending the query
-+ if !l.setState(connStateExpectResponse) {
-+ panic("two queries running at the same time")
-+ }
-+
-+ // Can't use l.cn.writeBuf here because it uses the scratch buffer which
-+ // might get overwritten by listenerConnLoop.
-+ b := &writeBuf{
-+ buf: []byte("Q\x00\x00\x00\x00"),
-+ pos: 1,
-+ }
-+ b.string(q)
-+ l.cn.send(b)
-+
-+ return nil
-+}
-+
-+// ExecSimpleQuery executes a "simple query" (i.e. one with no bindable
-+// parameters) on the connection. The possible return values are:
-+// 1) "executed" is true; the query was executed to completion on the
-+// database server. If the query failed, err will be set to the error
-+// returned by the database, otherwise err will be nil.
-+// 2) If "executed" is false, the query could not be executed on the remote
-+// server. err will be non-nil.
-+//
-+// After a call to ExecSimpleQuery has returned an executed=false value, the
-+// connection has either been closed or will be closed shortly thereafter, and
-+// all subsequently executed queries will return an error.
-+func (l *ListenerConn) ExecSimpleQuery(q string) (executed bool, err error) {
-+ if err = l.acquireSenderLock(); err != nil {
-+ return false, err
-+ }
-+ defer l.releaseSenderLock()
-+
-+ err = l.sendSimpleQuery(q)
-+ if err != nil {
-+ // We can't know what state the protocol is in, so we need to abandon
-+ // this connection.
-+ l.connectionLock.Lock()
-+ // Set the error pointer if it hasn't been set already; see
-+ // listenerConnMain.
-+ if l.err == nil {
-+ l.err = err
-+ }
-+ l.connectionLock.Unlock()
-+ l.cn.c.Close()
-+ return false, err
-+ }
-+
-+ // now we just wait for a reply..
-+ for {
-+ m, ok := <-l.replyChan
-+ if !ok {
-+ // We lost the connection to server, don't bother waiting for a
-+ // a response. err should have been set already.
-+ l.connectionLock.Lock()
-+ err := l.err
-+ l.connectionLock.Unlock()
-+ return false, err
-+ }
-+ switch m.typ {
-+ case 'Z':
-+ // sanity check
-+ if m.err != nil {
-+ panic("m.err != nil")
-+ }
-+ // done; err might or might not be set
-+ return true, err
-+
-+ case 'E':
-+ // sanity check
-+ if m.err == nil {
-+ panic("m.err == nil")
-+ }
-+ // server responded with an error; ReadyForQuery to follow
-+ err = m.err
-+
-+ default:
-+ return false, fmt.Errorf("unknown response for simple query: %q", m.typ)
-+ }
-+ }
-+}
-+
-+// Close closes the connection.
-+func (l *ListenerConn) Close() error {
-+ l.connectionLock.Lock()
-+ if l.err != nil {
-+ l.connectionLock.Unlock()
-+ return errListenerConnClosed
-+ }
-+ l.err = errListenerConnClosed
-+ l.connectionLock.Unlock()
-+ // We can't send anything on the connection without holding senderLock.
-+ // Simply close the net.Conn to wake up everyone operating on it.
-+ return l.cn.c.Close()
-+}
-+
-+// Err returns the reason the connection was closed. It is not safe to call
-+// this function until l.Notify has been closed.
-+func (l *ListenerConn) Err() error {
-+ return l.err
-+}
-+
-+var errListenerClosed = errors.New("pq: Listener has been closed")
-+
-+// ErrChannelAlreadyOpen is returned from Listen when a channel is already
-+// open.
-+var ErrChannelAlreadyOpen = errors.New("pq: channel is already open")
-+
-+// ErrChannelNotOpen is returned from Unlisten when a channel is not open.
-+var ErrChannelNotOpen = errors.New("pq: channel is not open")
-+
-+// ListenerEventType is an enumeration of listener event types.
-+type ListenerEventType int
-+
-+const (
-+ // ListenerEventConnected is emitted only when the database connection
-+ // has been initially initialized. The err argument of the callback
-+ // will always be nil.
-+ ListenerEventConnected ListenerEventType = iota
-+
-+ // ListenerEventDisconnected is emitted after a database connection has
-+ // been lost, either because of an error or because Close has been
-+ // called. The err argument will be set to the reason the database
-+ // connection was lost.
-+ ListenerEventDisconnected
-+
-+ // ListenerEventReconnected is emitted after a database connection has
-+ // been re-established after connection loss. The err argument of the
-+ // callback will always be nil. After this event has been emitted, a
-+ // nil pq.Notification is sent on the Listener.Notify channel.
-+ ListenerEventReconnected
-+
-+ // ListenerEventConnectionAttemptFailed is emitted after a connection
-+ // to the database was attempted, but failed. The err argument will be
-+ // set to an error describing why the connection attempt did not
-+ // succeed.
-+ ListenerEventConnectionAttemptFailed
-+)
-+
-+// EventCallbackType is the event callback type. See also ListenerEventType
-+// constants' documentation.
-+type EventCallbackType func(event ListenerEventType, err error)
-+
-+// Listener provides an interface for listening to notifications from a
-+// PostgreSQL database. For general usage information, see section
-+// "Notifications".
-+//
-+// Listener can safely be used from concurrently running goroutines.
-+type Listener struct {
-+ // Channel for receiving notifications from the database. In some cases a
-+ // nil value will be sent. See section "Notifications" above.
-+ Notify chan *Notification
-+
-+ name string
-+ minReconnectInterval time.Duration
-+ maxReconnectInterval time.Duration
-+ dialer Dialer
-+ eventCallback EventCallbackType
-+
-+ lock sync.Mutex
-+ isClosed bool
-+ reconnectCond *sync.Cond
-+ cn *ListenerConn
-+ connNotificationChan <-chan *Notification
-+ channels map[string]struct{}
-+}
-+
-+// NewListener creates a new database connection dedicated to LISTEN / NOTIFY.
-+//
-+// name should be set to a connection string to be used to establish the
-+// database connection (see section "Connection String Parameters" above).
-+//
-+// minReconnectInterval controls the duration to wait before trying to
-+// re-establish the database connection after connection loss. After each
-+// consecutive failure this interval is doubled, until maxReconnectInterval is
-+// reached. Successfully completing the connection establishment procedure
-+// resets the interval back to minReconnectInterval.
-+//
-+// The last parameter eventCallback can be set to a function which will be
-+// called by the Listener when the state of the underlying database connection
-+// changes. This callback will be called by the goroutine which dispatches the
-+// notifications over the Notify channel, so you should try to avoid doing
-+// potentially time-consuming operations from the callback.
-+func NewListener(name string,
-+ minReconnectInterval time.Duration,
-+ maxReconnectInterval time.Duration,
-+ eventCallback EventCallbackType) *Listener {
-+ return NewDialListener(defaultDialer{}, name, minReconnectInterval, maxReconnectInterval, eventCallback)
-+}
-+
-+// NewDialListener is like NewListener but it takes a Dialer.
-+func NewDialListener(d Dialer,
-+ name string,
-+ minReconnectInterval time.Duration,
-+ maxReconnectInterval time.Duration,
-+ eventCallback EventCallbackType) *Listener {
-+
-+ l := &Listener{
-+ name: name,
-+ minReconnectInterval: minReconnectInterval,
-+ maxReconnectInterval: maxReconnectInterval,
-+ dialer: d,
-+ eventCallback: eventCallback,
-+
-+ channels: make(map[string]struct{}),
-+
-+ Notify: make(chan *Notification, 32),
-+ }
-+ l.reconnectCond = sync.NewCond(&l.lock)
-+
-+ go l.listenerMain()
-+
-+ return l
-+}
-+
-+// NotificationChannel returns the notification channel for this listener.
-+// This is the same channel as Notify, and will not be recreated during the
-+// life time of the Listener.
-+func (l *Listener) NotificationChannel() <-chan *Notification {
-+ return l.Notify
-+}
-+
-+// Listen starts listening for notifications on a channel. Calls to this
-+// function will block until an acknowledgement has been received from the
-+// server. Note that Listener automatically re-establishes the connection
-+// after connection loss, so this function may block indefinitely if the
-+// connection can not be re-established.
-+//
-+// Listen will only fail in three conditions:
-+// 1) The channel is already open. The returned error will be
-+// ErrChannelAlreadyOpen.
-+// 2) The query was executed on the remote server, but PostgreSQL returned an
-+// error message in response to the query. The returned error will be a
-+// pq.Error containing the information the server supplied.
-+// 3) Close is called on the Listener before the request could be completed.
-+//
-+// The channel name is case-sensitive.
-+func (l *Listener) Listen(channel string) error {
-+ l.lock.Lock()
-+ defer l.lock.Unlock()
-+
-+ if l.isClosed {
-+ return errListenerClosed
-+ }
-+
-+ // The server allows you to issue a LISTEN on a channel which is already
-+ // open, but it seems useful to be able to detect this case to spot for
-+ // mistakes in application logic. If the application genuinely does't
-+ // care, it can check the exported error and ignore it.
-+ _, exists := l.channels[channel]
-+ if exists {
-+ return ErrChannelAlreadyOpen
-+ }
-+
-+ if l.cn != nil {
-+ // If gotResponse is true but error is set, the query was executed on
-+ // the remote server, but resulted in an error. This should be
-+ // relatively rare, so it's fine if we just pass the error to our
-+ // caller. However, if gotResponse is false, we could not complete the
-+ // query on the remote server and our underlying connection is about
-+ // to go away, so we only add relname to l.channels, and wait for
-+ // resync() to take care of the rest.
-+ gotResponse, err := l.cn.Listen(channel)
-+ if gotResponse && err != nil {
-+ return err
-+ }
-+ }
-+
-+ l.channels[channel] = struct{}{}
-+ for l.cn == nil {
-+ l.reconnectCond.Wait()
-+ // we let go of the mutex for a while
-+ if l.isClosed {
-+ return errListenerClosed
-+ }
-+ }
-+
-+ return nil
-+}
-+
-+// Unlisten removes a channel from the Listener's channel list. Returns
-+// ErrChannelNotOpen if the Listener is not listening on the specified channel.
-+// Returns immediately with no error if there is no connection. Note that you
-+// might still get notifications for this channel even after Unlisten has
-+// returned.
-+//
-+// The channel name is case-sensitive.
-+func (l *Listener) Unlisten(channel string) error {
-+ l.lock.Lock()
-+ defer l.lock.Unlock()
-+
-+ if l.isClosed {
-+ return errListenerClosed
-+ }
-+
-+ // Similarly to LISTEN, this is not an error in Postgres, but it seems
-+ // useful to distinguish from the normal conditions.
-+ _, exists := l.channels[channel]
-+ if !exists {
-+ return ErrChannelNotOpen
-+ }
-+
-+ if l.cn != nil {
-+ // Similarly to Listen (see comment in that function), the caller
-+ // should only be bothered with an error if it came from the backend as
-+ // a response to our query.
-+ gotResponse, err := l.cn.Unlisten(channel)
-+ if gotResponse && err != nil {
-+ return err
-+ }
-+ }
-+
-+ // Don't bother waiting for resync if there's no connection.
-+ delete(l.channels, channel)
-+ return nil
-+}
-+
-+// UnlistenAll removes all channels from the Listener's channel list. Returns
-+// immediately with no error if there is no connection. Note that you might
-+// still get notifications for any of the deleted channels even after
-+// UnlistenAll has returned.
-+func (l *Listener) UnlistenAll() error {
-+ l.lock.Lock()
-+ defer l.lock.Unlock()
-+
-+ if l.isClosed {
-+ return errListenerClosed
-+ }
-+
-+ if l.cn != nil {
-+ // Similarly to Listen (see comment in that function), the caller
-+ // should only be bothered with an error if it came from the backend as
-+ // a response to our query.
-+ gotResponse, err := l.cn.UnlistenAll()
-+ if gotResponse && err != nil {
-+ return err
-+ }
-+ }
-+
-+ // Don't bother waiting for resync if there's no connection.
-+ l.channels = make(map[string]struct{})
-+ return nil
-+}
-+
-+// Ping the remote server to make sure it's alive. Non-nil return value means
-+// that there is no active connection.
-+func (l *Listener) Ping() error {
-+ l.lock.Lock()
-+ defer l.lock.Unlock()
-+
-+ if l.isClosed {
-+ return errListenerClosed
-+ }
-+ if l.cn == nil {
-+ return errors.New("no connection")
-+ }
-+
-+ return l.cn.Ping()
-+}
-+
-+// Clean up after losing the server connection. Returns l.cn.Err(), which
-+// should have the reason the connection was lost.
-+func (l *Listener) disconnectCleanup() error {
-+ l.lock.Lock()
-+ defer l.lock.Unlock()
-+
-+ // sanity check; can't look at Err() until the channel has been closed
-+ select {
-+ case _, ok := <-l.connNotificationChan:
-+ if ok {
-+ panic("connNotificationChan not closed")
-+ }
-+ default:
-+ panic("connNotificationChan not closed")
-+ }
-+
-+ err := l.cn.Err()
-+ l.cn.Close()
-+ l.cn = nil
-+ return err
-+}
-+
-+// Synchronize the list of channels we want to be listening on with the server
-+// after the connection has been established.
-+func (l *Listener) resync(cn *ListenerConn, notificationChan <-chan *Notification) error {
-+ doneChan := make(chan error)
-+ go func(notificationChan <-chan *Notification) {
-+ for channel := range l.channels {
-+ // If we got a response, return that error to our caller as it's
-+ // going to be more descriptive than cn.Err().
-+ gotResponse, err := cn.Listen(channel)
-+ if gotResponse && err != nil {
-+ doneChan <- err
-+ return
-+ }
-+
-+ // If we couldn't reach the server, wait for notificationChan to
-+ // close and then return the error message from the connection, as
-+ // per ListenerConn's interface.
-+ if err != nil {
-+ for range notificationChan {
-+ }
-+ doneChan <- cn.Err()
-+ return
-+ }
-+ }
-+ doneChan <- nil
-+ }(notificationChan)
-+
-+ // Ignore notifications while synchronization is going on to avoid
-+ // deadlocks. We have to send a nil notification over Notify anyway as
-+ // we can't possibly know which notifications (if any) were lost while
-+ // the connection was down, so there's no reason to try and process
-+ // these messages at all.
-+ for {
-+ select {
-+ case _, ok := <-notificationChan:
-+ if !ok {
-+ notificationChan = nil
-+ }
-+
-+ case err := <-doneChan:
-+ return err
-+ }
-+ }
-+}
-+
-+// caller should NOT be holding l.lock
-+func (l *Listener) closed() bool {
-+ l.lock.Lock()
-+ defer l.lock.Unlock()
-+
-+ return l.isClosed
-+}
-+
-+func (l *Listener) connect() error {
-+ notificationChan := make(chan *Notification, 32)
-+ cn, err := newDialListenerConn(l.dialer, l.name, notificationChan)
-+ if err != nil {
-+ return err
-+ }
-+
-+ l.lock.Lock()
-+ defer l.lock.Unlock()
-+
-+ err = l.resync(cn, notificationChan)
-+ if err != nil {
-+ cn.Close()
-+ return err
-+ }
-+
-+ l.cn = cn
-+ l.connNotificationChan = notificationChan
-+ l.reconnectCond.Broadcast()
-+
-+ return nil
-+}
-+
-+// Close disconnects the Listener from the database and shuts it down.
-+// Subsequent calls to its methods will return an error. Close returns an
-+// error if the connection has already been closed.
-+func (l *Listener) Close() error {
-+ l.lock.Lock()
-+ defer l.lock.Unlock()
-+
-+ if l.isClosed {
-+ return errListenerClosed
-+ }
-+
-+ if l.cn != nil {
-+ l.cn.Close()
-+ }
-+ l.isClosed = true
-+
-+ // Unblock calls to Listen()
-+ l.reconnectCond.Broadcast()
-+
-+ return nil
-+}
-+
-+func (l *Listener) emitEvent(event ListenerEventType, err error) {
-+ if l.eventCallback != nil {
-+ l.eventCallback(event, err)
-+ }
-+}
-+
-+// Main logic here: maintain a connection to the server when possible, wait
-+// for notifications and emit events.
-+func (l *Listener) listenerConnLoop() {
-+ var nextReconnect time.Time
-+
-+ reconnectInterval := l.minReconnectInterval
-+ for {
-+ for {
-+ err := l.connect()
-+ if err == nil {
-+ break
-+ }
-+
-+ if l.closed() {
-+ return
-+ }
-+ l.emitEvent(ListenerEventConnectionAttemptFailed, err)
-+
-+ time.Sleep(reconnectInterval)
-+ reconnectInterval *= 2
-+ if reconnectInterval > l.maxReconnectInterval {
-+ reconnectInterval = l.maxReconnectInterval
-+ }
-+ }
-+
-+ if nextReconnect.IsZero() {
-+ l.emitEvent(ListenerEventConnected, nil)
-+ } else {
-+ l.emitEvent(ListenerEventReconnected, nil)
-+ l.Notify <- nil
-+ }
-+
-+ reconnectInterval = l.minReconnectInterval
-+ nextReconnect = time.Now().Add(reconnectInterval)
-+
-+ for {
-+ notification, ok := <-l.connNotificationChan
-+ if !ok {
-+ // lost connection, loop again
-+ break
-+ }
-+ l.Notify <- notification
-+ }
-+
-+ err := l.disconnectCleanup()
-+ if l.closed() {
-+ return
-+ }
-+ l.emitEvent(ListenerEventDisconnected, err)
-+
-+ time.Sleep(time.Until(nextReconnect))
-+ }
-+}
-+
-+func (l *Listener) listenerMain() {
-+ l.listenerConnLoop()
-+ close(l.Notify)
-+}
-diff --git a/vendor/github.com/lib/pq/notify_test.go b/vendor/github.com/lib/pq/notify_test.go
-new file mode 100644
-index 00000000000..075666ddb70
---- /dev/null
-+++ b/vendor/github.com/lib/pq/notify_test.go
-@@ -0,0 +1,570 @@
-+package pq
-+
-+import (
-+ "errors"
-+ "fmt"
-+ "io"
-+ "os"
-+ "runtime"
-+ "sync"
-+ "testing"
-+ "time"
-+)
-+
-+var errNilNotification = errors.New("nil notification")
-+
-+func expectNotification(t *testing.T, ch <-chan *Notification, relname string, extra string) error {
-+ select {
-+ case n := <-ch:
-+ if n == nil {
-+ return errNilNotification
-+ }
-+ if n.Channel != relname || n.Extra != extra {
-+ return fmt.Errorf("unexpected notification %v", n)
-+ }
-+ return nil
-+ case <-time.After(1500 * time.Millisecond):
-+ return fmt.Errorf("timeout")
-+ }
-+}
-+
-+func expectNoNotification(t *testing.T, ch <-chan *Notification) error {
-+ select {
-+ case n := <-ch:
-+ return fmt.Errorf("unexpected notification %v", n)
-+ case <-time.After(100 * time.Millisecond):
-+ return nil
-+ }
-+}
-+
-+func expectEvent(t *testing.T, eventch <-chan ListenerEventType, et ListenerEventType) error {
-+ select {
-+ case e := <-eventch:
-+ if e != et {
-+ return fmt.Errorf("unexpected event %v", e)
-+ }
-+ return nil
-+ case <-time.After(1500 * time.Millisecond):
-+ panic("expectEvent timeout")
-+ }
-+}
-+
-+func expectNoEvent(t *testing.T, eventch <-chan ListenerEventType) error {
-+ select {
-+ case e := <-eventch:
-+ return fmt.Errorf("unexpected event %v", e)
-+ case <-time.After(100 * time.Millisecond):
-+ return nil
-+ }
-+}
-+
-+func newTestListenerConn(t *testing.T) (*ListenerConn, <-chan *Notification) {
-+ datname := os.Getenv("PGDATABASE")
-+ sslmode := os.Getenv("PGSSLMODE")
-+
-+ if datname == "" {
-+ os.Setenv("PGDATABASE", "pqgotest")
-+ }
-+
-+ if sslmode == "" {
-+ os.Setenv("PGSSLMODE", "disable")
-+ }
-+
-+ notificationChan := make(chan *Notification)
-+ l, err := NewListenerConn("", notificationChan)
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+
-+ return l, notificationChan
-+}
-+
-+func TestNewListenerConn(t *testing.T) {
-+ l, _ := newTestListenerConn(t)
-+
-+ defer l.Close()
-+}
-+
-+func TestConnListen(t *testing.T) {
-+ l, channel := newTestListenerConn(t)
-+
-+ defer l.Close()
-+
-+ db := openTestConn(t)
-+ defer db.Close()
-+
-+ ok, err := l.Listen("notify_test")
-+ if !ok || err != nil {
-+ t.Fatal(err)
-+ }
-+
-+ _, err = db.Exec("NOTIFY notify_test")
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+
-+ err = expectNotification(t, channel, "notify_test", "")
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+}
-+
-+func TestConnUnlisten(t *testing.T) {
-+ l, channel := newTestListenerConn(t)
-+
-+ defer l.Close()
-+
-+ db := openTestConn(t)
-+ defer db.Close()
-+
-+ ok, err := l.Listen("notify_test")
-+ if !ok || err != nil {
-+ t.Fatal(err)
-+ }
-+
-+ _, err = db.Exec("NOTIFY notify_test")
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+
-+ err = expectNotification(t, channel, "notify_test", "")
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+
-+ ok, err = l.Unlisten("notify_test")
-+ if !ok || err != nil {
-+ t.Fatal(err)
-+ }
-+
-+ _, err = db.Exec("NOTIFY notify_test")
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+
-+ err = expectNoNotification(t, channel)
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+}
-+
-+func TestConnUnlistenAll(t *testing.T) {
-+ l, channel := newTestListenerConn(t)
-+
-+ defer l.Close()
-+
-+ db := openTestConn(t)
-+ defer db.Close()
-+
-+ ok, err := l.Listen("notify_test")
-+ if !ok || err != nil {
-+ t.Fatal(err)
-+ }
-+
-+ _, err = db.Exec("NOTIFY notify_test")
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+
-+ err = expectNotification(t, channel, "notify_test", "")
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+
-+ ok, err = l.UnlistenAll()
-+ if !ok || err != nil {
-+ t.Fatal(err)
-+ }
-+
-+ _, err = db.Exec("NOTIFY notify_test")
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+
-+ err = expectNoNotification(t, channel)
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+}
-+
-+func TestConnClose(t *testing.T) {
-+ l, _ := newTestListenerConn(t)
-+ defer l.Close()
-+
-+ err := l.Close()
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+ err = l.Close()
-+ if err != errListenerConnClosed {
-+ t.Fatalf("expected errListenerConnClosed; got %v", err)
-+ }
-+}
-+
-+func TestConnPing(t *testing.T) {
-+ l, _ := newTestListenerConn(t)
-+ defer l.Close()
-+ err := l.Ping()
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+ err = l.Close()
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+ err = l.Ping()
-+ if err != errListenerConnClosed {
-+ t.Fatalf("expected errListenerConnClosed; got %v", err)
-+ }
-+}
-+
-+// Test for deadlock where a query fails while another one is queued
-+func TestConnExecDeadlock(t *testing.T) {
-+ l, _ := newTestListenerConn(t)
-+ defer l.Close()
-+
-+ var wg sync.WaitGroup
-+ wg.Add(2)
-+
-+ go func() {
-+ l.ExecSimpleQuery("SELECT pg_sleep(60)")
-+ wg.Done()
-+ }()
-+ runtime.Gosched()
-+ go func() {
-+ l.ExecSimpleQuery("SELECT 1")
-+ wg.Done()
-+ }()
-+ // give the two goroutines some time to get into position
-+ runtime.Gosched()
-+ // calls Close on the net.Conn; equivalent to a network failure
-+ l.Close()
-+
-+ defer time.AfterFunc(10*time.Second, func() {
-+ panic("timed out")
-+ }).Stop()
-+ wg.Wait()
-+}
-+
-+// Test for ListenerConn being closed while a slow query is executing
-+func TestListenerConnCloseWhileQueryIsExecuting(t *testing.T) {
-+ l, _ := newTestListenerConn(t)
-+ defer l.Close()
-+
-+ var wg sync.WaitGroup
-+ wg.Add(1)
-+
-+ go func() {
-+ sent, err := l.ExecSimpleQuery("SELECT pg_sleep(60)")
-+ if sent {
-+ panic("expected sent=false")
-+ }
-+ // could be any of a number of errors
-+ if err == nil {
-+ panic("expected error")
-+ }
-+ wg.Done()
-+ }()
-+ // give the above goroutine some time to get into position
-+ runtime.Gosched()
-+ err := l.Close()
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+
-+ defer time.AfterFunc(10*time.Second, func() {
-+ panic("timed out")
-+ }).Stop()
-+ wg.Wait()
-+}
-+
-+func TestNotifyExtra(t *testing.T) {
-+ db := openTestConn(t)
-+ defer db.Close()
-+
-+ if getServerVersion(t, db) < 90000 {
-+ t.Skip("skipping NOTIFY payload test since the server does not appear to support it")
-+ }
-+
-+ l, channel := newTestListenerConn(t)
-+ defer l.Close()
-+
-+ ok, err := l.Listen("notify_test")
-+ if !ok || err != nil {
-+ t.Fatal(err)
-+ }
-+
-+ _, err = db.Exec("NOTIFY notify_test, 'something'")
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+
-+ err = expectNotification(t, channel, "notify_test", "something")
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+}
-+
-+// create a new test listener and also set the timeouts
-+func newTestListenerTimeout(t *testing.T, min time.Duration, max time.Duration) (*Listener, <-chan ListenerEventType) {
-+ datname := os.Getenv("PGDATABASE")
-+ sslmode := os.Getenv("PGSSLMODE")
-+
-+ if datname == "" {
-+ os.Setenv("PGDATABASE", "pqgotest")
-+ }
-+
-+ if sslmode == "" {
-+ os.Setenv("PGSSLMODE", "disable")
-+ }
-+
-+ eventch := make(chan ListenerEventType, 16)
-+ l := NewListener("", min, max, func(t ListenerEventType, err error) { eventch <- t })
-+ err := expectEvent(t, eventch, ListenerEventConnected)
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+ return l, eventch
-+}
-+
-+func newTestListener(t *testing.T) (*Listener, <-chan ListenerEventType) {
-+ return newTestListenerTimeout(t, time.Hour, time.Hour)
-+}
-+
-+func TestListenerListen(t *testing.T) {
-+ l, _ := newTestListener(t)
-+ defer l.Close()
-+
-+ db := openTestConn(t)
-+ defer db.Close()
-+
-+ err := l.Listen("notify_listen_test")
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+
-+ _, err = db.Exec("NOTIFY notify_listen_test")
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+
-+ err = expectNotification(t, l.Notify, "notify_listen_test", "")
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+}
-+
-+func TestListenerUnlisten(t *testing.T) {
-+ l, _ := newTestListener(t)
-+ defer l.Close()
-+
-+ db := openTestConn(t)
-+ defer db.Close()
-+
-+ err := l.Listen("notify_listen_test")
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+
-+ _, err = db.Exec("NOTIFY notify_listen_test")
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+
-+ err = l.Unlisten("notify_listen_test")
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+
-+ err = expectNotification(t, l.Notify, "notify_listen_test", "")
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+
-+ _, err = db.Exec("NOTIFY notify_listen_test")
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+
-+ err = expectNoNotification(t, l.Notify)
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+}
-+
-+func TestListenerUnlistenAll(t *testing.T) {
-+ l, _ := newTestListener(t)
-+ defer l.Close()
-+
-+ db := openTestConn(t)
-+ defer db.Close()
-+
-+ err := l.Listen("notify_listen_test")
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+
-+ _, err = db.Exec("NOTIFY notify_listen_test")
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+
-+ err = l.UnlistenAll()
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+
-+ err = expectNotification(t, l.Notify, "notify_listen_test", "")
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+
-+ _, err = db.Exec("NOTIFY notify_listen_test")
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+
-+ err = expectNoNotification(t, l.Notify)
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+}
-+
-+func TestListenerFailedQuery(t *testing.T) {
-+ l, eventch := newTestListener(t)
-+ defer l.Close()
-+
-+ db := openTestConn(t)
-+ defer db.Close()
-+
-+ err := l.Listen("notify_listen_test")
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+
-+ _, err = db.Exec("NOTIFY notify_listen_test")
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+
-+ err = expectNotification(t, l.Notify, "notify_listen_test", "")
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+
-+ // shouldn't cause a disconnect
-+ ok, err := l.cn.ExecSimpleQuery("SELECT error")
-+ if !ok {
-+ t.Fatalf("could not send query to server: %v", err)
-+ }
-+ _, ok = err.(PGError)
-+ if !ok {
-+ t.Fatalf("unexpected error %v", err)
-+ }
-+ err = expectNoEvent(t, eventch)
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+
-+ // should still work
-+ _, err = db.Exec("NOTIFY notify_listen_test")
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+
-+ err = expectNotification(t, l.Notify, "notify_listen_test", "")
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+}
-+
-+func TestListenerReconnect(t *testing.T) {
-+ l, eventch := newTestListenerTimeout(t, 20*time.Millisecond, time.Hour)
-+ defer l.Close()
-+
-+ db := openTestConn(t)
-+ defer db.Close()
-+
-+ err := l.Listen("notify_listen_test")
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+
-+ _, err = db.Exec("NOTIFY notify_listen_test")
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+
-+ err = expectNotification(t, l.Notify, "notify_listen_test", "")
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+
-+ // kill the connection and make sure it comes back up
-+ ok, err := l.cn.ExecSimpleQuery("SELECT pg_terminate_backend(pg_backend_pid())")
-+ if ok {
-+ t.Fatalf("could not kill the connection: %v", err)
-+ }
-+ if err != io.EOF {
-+ t.Fatalf("unexpected error %v", err)
-+ }
-+ err = expectEvent(t, eventch, ListenerEventDisconnected)
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+ err = expectEvent(t, eventch, ListenerEventReconnected)
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+
-+ // should still work
-+ _, err = db.Exec("NOTIFY notify_listen_test")
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+
-+ // should get nil after Reconnected
-+ err = expectNotification(t, l.Notify, "", "")
-+ if err != errNilNotification {
-+ t.Fatal(err)
-+ }
-+
-+ err = expectNotification(t, l.Notify, "notify_listen_test", "")
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+}
-+
-+func TestListenerClose(t *testing.T) {
-+ l, _ := newTestListenerTimeout(t, 20*time.Millisecond, time.Hour)
-+ defer l.Close()
-+
-+ err := l.Close()
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+ err = l.Close()
-+ if err != errListenerClosed {
-+ t.Fatalf("expected errListenerClosed; got %v", err)
-+ }
-+}
-+
-+func TestListenerPing(t *testing.T) {
-+ l, _ := newTestListenerTimeout(t, 20*time.Millisecond, time.Hour)
-+ defer l.Close()
-+
-+ err := l.Ping()
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+
-+ err = l.Close()
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+
-+ err = l.Ping()
-+ if err != errListenerClosed {
-+ t.Fatalf("expected errListenerClosed; got %v", err)
-+ }
-+}
-diff --git a/vendor/github.com/lib/pq/oid/doc.go b/vendor/github.com/lib/pq/oid/doc.go
-new file mode 100644
-index 00000000000..caaede2489d
---- /dev/null
-+++ b/vendor/github.com/lib/pq/oid/doc.go
-@@ -0,0 +1,6 @@
-+// Package oid contains OID constants
-+// as defined by the Postgres server.
-+package oid
-+
-+// Oid is a Postgres Object ID.
-+type Oid uint32
-diff --git a/vendor/github.com/lib/pq/oid/gen.go b/vendor/github.com/lib/pq/oid/gen.go
-new file mode 100644
-index 00000000000..7c634cdc5cd
---- /dev/null
-+++ b/vendor/github.com/lib/pq/oid/gen.go
-@@ -0,0 +1,93 @@
-+// +build ignore
-+
-+// Generate the table of OID values
-+// Run with 'go run gen.go'.
-+package main
-+
-+import (
-+ "database/sql"
-+ "fmt"
-+ "log"
-+ "os"
-+ "os/exec"
-+ "strings"
-+
-+ _ "github.com/lib/pq"
-+)
-+
-+// OID represent a postgres Object Identifier Type.
-+type OID struct {
-+ ID int
-+ Type string
-+}
-+
-+// Name returns an upper case version of the oid type.
-+func (o OID) Name() string {
-+ return strings.ToUpper(o.Type)
-+}
-+
-+func main() {
-+ datname := os.Getenv("PGDATABASE")
-+ sslmode := os.Getenv("PGSSLMODE")
-+
-+ if datname == "" {
-+ os.Setenv("PGDATABASE", "pqgotest")
-+ }
-+
-+ if sslmode == "" {
-+ os.Setenv("PGSSLMODE", "disable")
-+ }
-+
-+ db, err := sql.Open("postgres", "")
-+ if err != nil {
-+ log.Fatal(err)
-+ }
-+ rows, err := db.Query(`
-+ SELECT typname, oid
-+ FROM pg_type WHERE oid < 10000
-+ ORDER BY oid;
-+ `)
-+ if err != nil {
-+ log.Fatal(err)
-+ }
-+ oids := make([]*OID, 0)
-+ for rows.Next() {
-+ var oid OID
-+ if err = rows.Scan(&oid.Type, &oid.ID); err != nil {
-+ log.Fatal(err)
-+ }
-+ oids = append(oids, &oid)
-+ }
-+ if err = rows.Err(); err != nil {
-+ log.Fatal(err)
-+ }
-+ cmd := exec.Command("gofmt")
-+ cmd.Stderr = os.Stderr
-+ w, err := cmd.StdinPipe()
-+ if err != nil {
-+ log.Fatal(err)
-+ }
-+ f, err := os.Create("types.go")
-+ if err != nil {
-+ log.Fatal(err)
-+ }
-+ cmd.Stdout = f
-+ err = cmd.Start()
-+ if err != nil {
-+ log.Fatal(err)
-+ }
-+ fmt.Fprintln(w, "// Code generated by gen.go. DO NOT EDIT.")
-+ fmt.Fprintln(w, "\npackage oid")
-+ fmt.Fprintln(w, "const (")
-+ for _, oid := range oids {
-+ fmt.Fprintf(w, "T_%s Oid = %d\n", oid.Type, oid.ID)
-+ }
-+ fmt.Fprintln(w, ")")
-+ fmt.Fprintln(w, "var TypeName = map[Oid]string{")
-+ for _, oid := range oids {
-+ fmt.Fprintf(w, "T_%s: \"%s\",\n", oid.Type, oid.Name())
-+ }
-+ fmt.Fprintln(w, "}")
-+ w.Close()
-+ cmd.Wait()
-+}
-diff --git a/vendor/github.com/lib/pq/oid/types.go b/vendor/github.com/lib/pq/oid/types.go
-new file mode 100644
-index 00000000000..ecc84c2c862
---- /dev/null
-+++ b/vendor/github.com/lib/pq/oid/types.go
-@@ -0,0 +1,343 @@
-+// Code generated by gen.go. DO NOT EDIT.
-+
-+package oid
-+
-+const (
-+ T_bool Oid = 16
-+ T_bytea Oid = 17
-+ T_char Oid = 18
-+ T_name Oid = 19
-+ T_int8 Oid = 20
-+ T_int2 Oid = 21
-+ T_int2vector Oid = 22
-+ T_int4 Oid = 23
-+ T_regproc Oid = 24
-+ T_text Oid = 25
-+ T_oid Oid = 26
-+ T_tid Oid = 27
-+ T_xid Oid = 28
-+ T_cid Oid = 29
-+ T_oidvector Oid = 30
-+ T_pg_ddl_command Oid = 32
-+ T_pg_type Oid = 71
-+ T_pg_attribute Oid = 75
-+ T_pg_proc Oid = 81
-+ T_pg_class Oid = 83
-+ T_json Oid = 114
-+ T_xml Oid = 142
-+ T__xml Oid = 143
-+ T_pg_node_tree Oid = 194
-+ T__json Oid = 199
-+ T_smgr Oid = 210
-+ T_index_am_handler Oid = 325
-+ T_point Oid = 600
-+ T_lseg Oid = 601
-+ T_path Oid = 602
-+ T_box Oid = 603
-+ T_polygon Oid = 604
-+ T_line Oid = 628
-+ T__line Oid = 629
-+ T_cidr Oid = 650
-+ T__cidr Oid = 651
-+ T_float4 Oid = 700
-+ T_float8 Oid = 701
-+ T_abstime Oid = 702
-+ T_reltime Oid = 703
-+ T_tinterval Oid = 704
-+ T_unknown Oid = 705
-+ T_circle Oid = 718
-+ T__circle Oid = 719
-+ T_money Oid = 790
-+ T__money Oid = 791
-+ T_macaddr Oid = 829
-+ T_inet Oid = 869
-+ T__bool Oid = 1000
-+ T__bytea Oid = 1001
-+ T__char Oid = 1002
-+ T__name Oid = 1003
-+ T__int2 Oid = 1005
-+ T__int2vector Oid = 1006
-+ T__int4 Oid = 1007
-+ T__regproc Oid = 1008
-+ T__text Oid = 1009
-+ T__tid Oid = 1010
-+ T__xid Oid = 1011
-+ T__cid Oid = 1012
-+ T__oidvector Oid = 1013
-+ T__bpchar Oid = 1014
-+ T__varchar Oid = 1015
-+ T__int8 Oid = 1016
-+ T__point Oid = 1017
-+ T__lseg Oid = 1018
-+ T__path Oid = 1019
-+ T__box Oid = 1020
-+ T__float4 Oid = 1021
-+ T__float8 Oid = 1022
-+ T__abstime Oid = 1023
-+ T__reltime Oid = 1024
-+ T__tinterval Oid = 1025
-+ T__polygon Oid = 1027
-+ T__oid Oid = 1028
-+ T_aclitem Oid = 1033
-+ T__aclitem Oid = 1034
-+ T__macaddr Oid = 1040
-+ T__inet Oid = 1041
-+ T_bpchar Oid = 1042
-+ T_varchar Oid = 1043
-+ T_date Oid = 1082
-+ T_time Oid = 1083
-+ T_timestamp Oid = 1114
-+ T__timestamp Oid = 1115
-+ T__date Oid = 1182
-+ T__time Oid = 1183
-+ T_timestamptz Oid = 1184
-+ T__timestamptz Oid = 1185
-+ T_interval Oid = 1186
-+ T__interval Oid = 1187
-+ T__numeric Oid = 1231
-+ T_pg_database Oid = 1248
-+ T__cstring Oid = 1263
-+ T_timetz Oid = 1266
-+ T__timetz Oid = 1270
-+ T_bit Oid = 1560
-+ T__bit Oid = 1561
-+ T_varbit Oid = 1562
-+ T__varbit Oid = 1563
-+ T_numeric Oid = 1700
-+ T_refcursor Oid = 1790
-+ T__refcursor Oid = 2201
-+ T_regprocedure Oid = 2202
-+ T_regoper Oid = 2203
-+ T_regoperator Oid = 2204
-+ T_regclass Oid = 2205
-+ T_regtype Oid = 2206
-+ T__regprocedure Oid = 2207
-+ T__regoper Oid = 2208
-+ T__regoperator Oid = 2209
-+ T__regclass Oid = 2210
-+ T__regtype Oid = 2211
-+ T_record Oid = 2249
-+ T_cstring Oid = 2275
-+ T_any Oid = 2276
-+ T_anyarray Oid = 2277
-+ T_void Oid = 2278
-+ T_trigger Oid = 2279
-+ T_language_handler Oid = 2280
-+ T_internal Oid = 2281
-+ T_opaque Oid = 2282
-+ T_anyelement Oid = 2283
-+ T__record Oid = 2287
-+ T_anynonarray Oid = 2776
-+ T_pg_authid Oid = 2842
-+ T_pg_auth_members Oid = 2843
-+ T__txid_snapshot Oid = 2949
-+ T_uuid Oid = 2950
-+ T__uuid Oid = 2951
-+ T_txid_snapshot Oid = 2970
-+ T_fdw_handler Oid = 3115
-+ T_pg_lsn Oid = 3220
-+ T__pg_lsn Oid = 3221
-+ T_tsm_handler Oid = 3310
-+ T_anyenum Oid = 3500
-+ T_tsvector Oid = 3614
-+ T_tsquery Oid = 3615
-+ T_gtsvector Oid = 3642
-+ T__tsvector Oid = 3643
-+ T__gtsvector Oid = 3644
-+ T__tsquery Oid = 3645
-+ T_regconfig Oid = 3734
-+ T__regconfig Oid = 3735
-+ T_regdictionary Oid = 3769
-+ T__regdictionary Oid = 3770
-+ T_jsonb Oid = 3802
-+ T__jsonb Oid = 3807
-+ T_anyrange Oid = 3831
-+ T_event_trigger Oid = 3838
-+ T_int4range Oid = 3904
-+ T__int4range Oid = 3905
-+ T_numrange Oid = 3906
-+ T__numrange Oid = 3907
-+ T_tsrange Oid = 3908
-+ T__tsrange Oid = 3909
-+ T_tstzrange Oid = 3910
-+ T__tstzrange Oid = 3911
-+ T_daterange Oid = 3912
-+ T__daterange Oid = 3913
-+ T_int8range Oid = 3926
-+ T__int8range Oid = 3927
-+ T_pg_shseclabel Oid = 4066
-+ T_regnamespace Oid = 4089
-+ T__regnamespace Oid = 4090
-+ T_regrole Oid = 4096
-+ T__regrole Oid = 4097
-+)
-+
-+var TypeName = map[Oid]string{
-+ T_bool: "BOOL",
-+ T_bytea: "BYTEA",
-+ T_char: "CHAR",
-+ T_name: "NAME",
-+ T_int8: "INT8",
-+ T_int2: "INT2",
-+ T_int2vector: "INT2VECTOR",
-+ T_int4: "INT4",
-+ T_regproc: "REGPROC",
-+ T_text: "TEXT",
-+ T_oid: "OID",
-+ T_tid: "TID",
-+ T_xid: "XID",
-+ T_cid: "CID",
-+ T_oidvector: "OIDVECTOR",
-+ T_pg_ddl_command: "PG_DDL_COMMAND",
-+ T_pg_type: "PG_TYPE",
-+ T_pg_attribute: "PG_ATTRIBUTE",
-+ T_pg_proc: "PG_PROC",
-+ T_pg_class: "PG_CLASS",
-+ T_json: "JSON",
-+ T_xml: "XML",
-+ T__xml: "_XML",
-+ T_pg_node_tree: "PG_NODE_TREE",
-+ T__json: "_JSON",
-+ T_smgr: "SMGR",
-+ T_index_am_handler: "INDEX_AM_HANDLER",
-+ T_point: "POINT",
-+ T_lseg: "LSEG",
-+ T_path: "PATH",
-+ T_box: "BOX",
-+ T_polygon: "POLYGON",
-+ T_line: "LINE",
-+ T__line: "_LINE",
-+ T_cidr: "CIDR",
-+ T__cidr: "_CIDR",
-+ T_float4: "FLOAT4",
-+ T_float8: "FLOAT8",
-+ T_abstime: "ABSTIME",
-+ T_reltime: "RELTIME",
-+ T_tinterval: "TINTERVAL",
-+ T_unknown: "UNKNOWN",
-+ T_circle: "CIRCLE",
-+ T__circle: "_CIRCLE",
-+ T_money: "MONEY",
-+ T__money: "_MONEY",
-+ T_macaddr: "MACADDR",
-+ T_inet: "INET",
-+ T__bool: "_BOOL",
-+ T__bytea: "_BYTEA",
-+ T__char: "_CHAR",
-+ T__name: "_NAME",
-+ T__int2: "_INT2",
-+ T__int2vector: "_INT2VECTOR",
-+ T__int4: "_INT4",
-+ T__regproc: "_REGPROC",
-+ T__text: "_TEXT",
-+ T__tid: "_TID",
-+ T__xid: "_XID",
-+ T__cid: "_CID",
-+ T__oidvector: "_OIDVECTOR",
-+ T__bpchar: "_BPCHAR",
-+ T__varchar: "_VARCHAR",
-+ T__int8: "_INT8",
-+ T__point: "_POINT",
-+ T__lseg: "_LSEG",
-+ T__path: "_PATH",
-+ T__box: "_BOX",
-+ T__float4: "_FLOAT4",
-+ T__float8: "_FLOAT8",
-+ T__abstime: "_ABSTIME",
-+ T__reltime: "_RELTIME",
-+ T__tinterval: "_TINTERVAL",
-+ T__polygon: "_POLYGON",
-+ T__oid: "_OID",
-+ T_aclitem: "ACLITEM",
-+ T__aclitem: "_ACLITEM",
-+ T__macaddr: "_MACADDR",
-+ T__inet: "_INET",
-+ T_bpchar: "BPCHAR",
-+ T_varchar: "VARCHAR",
-+ T_date: "DATE",
-+ T_time: "TIME",
-+ T_timestamp: "TIMESTAMP",
-+ T__timestamp: "_TIMESTAMP",
-+ T__date: "_DATE",
-+ T__time: "_TIME",
-+ T_timestamptz: "TIMESTAMPTZ",
-+ T__timestamptz: "_TIMESTAMPTZ",
-+ T_interval: "INTERVAL",
-+ T__interval: "_INTERVAL",
-+ T__numeric: "_NUMERIC",
-+ T_pg_database: "PG_DATABASE",
-+ T__cstring: "_CSTRING",
-+ T_timetz: "TIMETZ",
-+ T__timetz: "_TIMETZ",
-+ T_bit: "BIT",
-+ T__bit: "_BIT",
-+ T_varbit: "VARBIT",
-+ T__varbit: "_VARBIT",
-+ T_numeric: "NUMERIC",
-+ T_refcursor: "REFCURSOR",
-+ T__refcursor: "_REFCURSOR",
-+ T_regprocedure: "REGPROCEDURE",
-+ T_regoper: "REGOPER",
-+ T_regoperator: "REGOPERATOR",
-+ T_regclass: "REGCLASS",
-+ T_regtype: "REGTYPE",
-+ T__regprocedure: "_REGPROCEDURE",
-+ T__regoper: "_REGOPER",
-+ T__regoperator: "_REGOPERATOR",
-+ T__regclass: "_REGCLASS",
-+ T__regtype: "_REGTYPE",
-+ T_record: "RECORD",
-+ T_cstring: "CSTRING",
-+ T_any: "ANY",
-+ T_anyarray: "ANYARRAY",
-+ T_void: "VOID",
-+ T_trigger: "TRIGGER",
-+ T_language_handler: "LANGUAGE_HANDLER",
-+ T_internal: "INTERNAL",
-+ T_opaque: "OPAQUE",
-+ T_anyelement: "ANYELEMENT",
-+ T__record: "_RECORD",
-+ T_anynonarray: "ANYNONARRAY",
-+ T_pg_authid: "PG_AUTHID",
-+ T_pg_auth_members: "PG_AUTH_MEMBERS",
-+ T__txid_snapshot: "_TXID_SNAPSHOT",
-+ T_uuid: "UUID",
-+ T__uuid: "_UUID",
-+ T_txid_snapshot: "TXID_SNAPSHOT",
-+ T_fdw_handler: "FDW_HANDLER",
-+ T_pg_lsn: "PG_LSN",
-+ T__pg_lsn: "_PG_LSN",
-+ T_tsm_handler: "TSM_HANDLER",
-+ T_anyenum: "ANYENUM",
-+ T_tsvector: "TSVECTOR",
-+ T_tsquery: "TSQUERY",
-+ T_gtsvector: "GTSVECTOR",
-+ T__tsvector: "_TSVECTOR",
-+ T__gtsvector: "_GTSVECTOR",
-+ T__tsquery: "_TSQUERY",
-+ T_regconfig: "REGCONFIG",
-+ T__regconfig: "_REGCONFIG",
-+ T_regdictionary: "REGDICTIONARY",
-+ T__regdictionary: "_REGDICTIONARY",
-+ T_jsonb: "JSONB",
-+ T__jsonb: "_JSONB",
-+ T_anyrange: "ANYRANGE",
-+ T_event_trigger: "EVENT_TRIGGER",
-+ T_int4range: "INT4RANGE",
-+ T__int4range: "_INT4RANGE",
-+ T_numrange: "NUMRANGE",
-+ T__numrange: "_NUMRANGE",
-+ T_tsrange: "TSRANGE",
-+ T__tsrange: "_TSRANGE",
-+ T_tstzrange: "TSTZRANGE",
-+ T__tstzrange: "_TSTZRANGE",
-+ T_daterange: "DATERANGE",
-+ T__daterange: "_DATERANGE",
-+ T_int8range: "INT8RANGE",
-+ T__int8range: "_INT8RANGE",
-+ T_pg_shseclabel: "PG_SHSECLABEL",
-+ T_regnamespace: "REGNAMESPACE",
-+ T__regnamespace: "_REGNAMESPACE",
-+ T_regrole: "REGROLE",
-+ T__regrole: "_REGROLE",
-+}
-diff --git a/vendor/github.com/lib/pq/rows.go b/vendor/github.com/lib/pq/rows.go
-new file mode 100644
-index 00000000000..c6aa5b9a36a
---- /dev/null
-+++ b/vendor/github.com/lib/pq/rows.go
-@@ -0,0 +1,93 @@
-+package pq
-+
-+import (
-+ "math"
-+ "reflect"
-+ "time"
-+
-+ "github.com/lib/pq/oid"
-+)
-+
-+const headerSize = 4
-+
-+type fieldDesc struct {
-+ // The object ID of the data type.
-+ OID oid.Oid
-+ // The data type size (see pg_type.typlen).
-+ // Note that negative values denote variable-width types.
-+ Len int
-+ // The type modifier (see pg_attribute.atttypmod).
-+ // The meaning of the modifier is type-specific.
-+ Mod int
-+}
-+
-+func (fd fieldDesc) Type() reflect.Type {
-+ switch fd.OID {
-+ case oid.T_int8:
-+ return reflect.TypeOf(int64(0))
-+ case oid.T_int4:
-+ return reflect.TypeOf(int32(0))
-+ case oid.T_int2:
-+ return reflect.TypeOf(int16(0))
-+ case oid.T_varchar, oid.T_text:
-+ return reflect.TypeOf("")
-+ case oid.T_bool:
-+ return reflect.TypeOf(false)
-+ case oid.T_date, oid.T_time, oid.T_timetz, oid.T_timestamp, oid.T_timestamptz:
-+ return reflect.TypeOf(time.Time{})
-+ case oid.T_bytea:
-+ return reflect.TypeOf([]byte(nil))
-+ default:
-+ return reflect.TypeOf(new(interface{})).Elem()
-+ }
-+}
-+
-+func (fd fieldDesc) Name() string {
-+ return oid.TypeName[fd.OID]
-+}
-+
-+func (fd fieldDesc) Length() (length int64, ok bool) {
-+ switch fd.OID {
-+ case oid.T_text, oid.T_bytea:
-+ return math.MaxInt64, true
-+ case oid.T_varchar, oid.T_bpchar:
-+ return int64(fd.Mod - headerSize), true
-+ default:
-+ return 0, false
-+ }
-+}
-+
-+func (fd fieldDesc) PrecisionScale() (precision, scale int64, ok bool) {
-+ switch fd.OID {
-+ case oid.T_numeric, oid.T__numeric:
-+ mod := fd.Mod - headerSize
-+ precision = int64((mod >> 16) & 0xffff)
-+ scale = int64(mod & 0xffff)
-+ return precision, scale, true
-+ default:
-+ return 0, 0, false
-+ }
-+}
-+
-+// ColumnTypeScanType returns the value type that can be used to scan types into.
-+func (rs *rows) ColumnTypeScanType(index int) reflect.Type {
-+ return rs.colTyps[index].Type()
-+}
-+
-+// ColumnTypeDatabaseTypeName return the database system type name.
-+func (rs *rows) ColumnTypeDatabaseTypeName(index int) string {
-+ return rs.colTyps[index].Name()
-+}
-+
-+// ColumnTypeLength returns the length of the column type if the column is a
-+// variable length type. If the column is not a variable length type ok
-+// should return false.
-+func (rs *rows) ColumnTypeLength(index int) (length int64, ok bool) {
-+ return rs.colTyps[index].Length()
-+}
-+
-+// ColumnTypePrecisionScale should return the precision and scale for decimal
-+// types. If not applicable, ok should be false.
-+func (rs *rows) ColumnTypePrecisionScale(index int) (precision, scale int64, ok bool) {
-+ return rs.colTyps[index].PrecisionScale()
-+}
-diff --git a/vendor/github.com/lib/pq/rows_test.go b/vendor/github.com/lib/pq/rows_test.go
-new file mode 100644
-index 00000000000..b3420a29930
---- /dev/null
-+++ b/vendor/github.com/lib/pq/rows_test.go
-@@ -0,0 +1,218 @@
-+package pq
-+
-+import (
-+ "math"
-+ "reflect"
-+ "testing"
-+
-+ "github.com/lib/pq/oid"
-+)
-+
-+func TestDataTypeName(t *testing.T) {
-+ tts := []struct {
-+ typ oid.Oid
-+ name string
-+ }{
-+ {oid.T_int8, "INT8"},
-+ {oid.T_int4, "INT4"},
-+ {oid.T_int2, "INT2"},
-+ {oid.T_varchar, "VARCHAR"},
-+ {oid.T_text, "TEXT"},
-+ {oid.T_bool, "BOOL"},
-+ {oid.T_numeric, "NUMERIC"},
-+ {oid.T_date, "DATE"},
-+ {oid.T_time, "TIME"},
-+ {oid.T_timetz, "TIMETZ"},
-+ {oid.T_timestamp, "TIMESTAMP"},
-+ {oid.T_timestamptz, "TIMESTAMPTZ"},
-+ {oid.T_bytea, "BYTEA"},
-+ }
-+
-+ for i, tt := range tts {
-+ dt := fieldDesc{OID: tt.typ}
-+ if name := dt.Name(); name != tt.name {
-+ t.Errorf("(%d) got: %s want: %s", i, name, tt.name)
-+ }
-+ }
-+}
-+
-+func TestDataType(t *testing.T) {
-+ tts := []struct {
-+ typ oid.Oid
-+ kind reflect.Kind
-+ }{
-+ {oid.T_int8, reflect.Int64},
-+ {oid.T_int4, reflect.Int32},
-+ {oid.T_int2, reflect.Int16},
-+ {oid.T_varchar, reflect.String},
-+ {oid.T_text, reflect.String},
-+ {oid.T_bool, reflect.Bool},
-+ {oid.T_date, reflect.Struct},
-+ {oid.T_time, reflect.Struct},
-+ {oid.T_timetz, reflect.Struct},
-+ {oid.T_timestamp, reflect.Struct},
-+ {oid.T_timestamptz, reflect.Struct},
-+ {oid.T_bytea, reflect.Slice},
-+ }
-+
-+ for i, tt := range tts {
-+ dt := fieldDesc{OID: tt.typ}
-+ if kind := dt.Type().Kind(); kind != tt.kind {
-+ t.Errorf("(%d) got: %s want: %s", i, kind, tt.kind)
-+ }
-+ }
-+}
-+
-+func TestDataTypeLength(t *testing.T) {
-+ tts := []struct {
-+ typ oid.Oid
-+ len int
-+ mod int
-+ length int64
-+ ok bool
-+ }{
-+ {oid.T_int4, 0, -1, 0, false},
-+ {oid.T_varchar, 65535, 9, 5, true},
-+ {oid.T_text, 65535, -1, math.MaxInt64, true},
-+ {oid.T_bytea, 65535, -1, math.MaxInt64, true},
-+ }
-+
-+ for i, tt := range tts {
-+ dt := fieldDesc{OID: tt.typ, Len: tt.len, Mod: tt.mod}
-+ if l, k := dt.Length(); k != tt.ok || l != tt.length {
-+ t.Errorf("(%d) got: %d, %t want: %d, %t", i, l, k, tt.length, tt.ok)
-+ }
-+ }
-+}
-+
-+func TestDataTypePrecisionScale(t *testing.T) {
-+ tts := []struct {
-+ typ oid.Oid
-+ mod int
-+ precision, scale int64
-+ ok bool
-+ }{
-+ {oid.T_int4, -1, 0, 0, false},
-+ {oid.T_numeric, 589830, 9, 2, true},
-+ {oid.T_text, -1, 0, 0, false},
-+ }
-+
-+ for i, tt := range tts {
-+ dt := fieldDesc{OID: tt.typ, Mod: tt.mod}
-+ p, s, k := dt.PrecisionScale()
-+ if k != tt.ok {
-+ t.Errorf("(%d) got: %t want: %t", i, k, tt.ok)
-+ }
-+ if p != tt.precision {
-+ t.Errorf("(%d) wrong precision got: %d want: %d", i, p, tt.precision)
-+ }
-+ if s != tt.scale {
-+ t.Errorf("(%d) wrong scale got: %d want: %d", i, s, tt.scale)
-+ }
-+ }
-+}
-+
-+func TestRowsColumnTypes(t *testing.T) {
-+ columnTypesTests := []struct {
-+ Name string
-+ TypeName string
-+ Length struct {
-+ Len int64
-+ OK bool
-+ }
-+ DecimalSize struct {
-+ Precision int64
-+ Scale int64
-+ OK bool
-+ }
-+ ScanType reflect.Type
-+ }{
-+ {
-+ Name: "a",
-+ TypeName: "INT4",
-+ Length: struct {
-+ Len int64
-+ OK bool
-+ }{
-+ Len: 0,
-+ OK: false,
-+ },
-+ DecimalSize: struct {
-+ Precision int64
-+ Scale int64
-+ OK bool
-+ }{
-+ Precision: 0,
-+ Scale: 0,
-+ OK: false,
-+ },
-+ ScanType: reflect.TypeOf(int32(0)),
-+ }, {
-+ Name: "bar",
-+ TypeName: "TEXT",
-+ Length: struct {
-+ Len int64
-+ OK bool
-+ }{
-+ Len: math.MaxInt64,
-+ OK: true,
-+ },
-+ DecimalSize: struct {
-+ Precision int64
-+ Scale int64
-+ OK bool
-+ }{
-+ Precision: 0,
-+ Scale: 0,
-+ OK: false,
-+ },
-+ ScanType: reflect.TypeOf(""),
-+ },
-+ }
-+
-+ db := openTestConn(t)
-+ defer db.Close()
-+
-+ rows, err := db.Query("SELECT 1 AS a, text 'bar' AS bar, 1.28::numeric(9, 2) AS dec")
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+
-+ columns, err := rows.ColumnTypes()
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+ if len(columns) != 3 {
-+ t.Errorf("expected 3 columns found %d", len(columns))
-+ }
-+
-+ for i, tt := range columnTypesTests {
-+ c := columns[i]
-+ if c.Name() != tt.Name {
-+ t.Errorf("(%d) got: %s, want: %s", i, c.Name(), tt.Name)
-+ }
-+ if c.DatabaseTypeName() != tt.TypeName {
-+ t.Errorf("(%d) got: %s, want: %s", i, c.DatabaseTypeName(), tt.TypeName)
-+ }
-+ l, ok := c.Length()
-+ if l != tt.Length.Len {
-+ t.Errorf("(%d) got: %d, want: %d", i, l, tt.Length.Len)
-+ }
-+ if ok != tt.Length.OK {
-+ t.Errorf("(%d) got: %t, want: %t", i, ok, tt.Length.OK)
-+ }
-+ p, s, ok := c.DecimalSize()
-+ if p != tt.DecimalSize.Precision {
-+ t.Errorf("(%d) got: %d, want: %d", i, p, tt.DecimalSize.Precision)
-+ }
-+ if s != tt.DecimalSize.Scale {
-+ t.Errorf("(%d) got: %d, want: %d", i, s, tt.DecimalSize.Scale)
-+ }
-+ if ok != tt.DecimalSize.OK {
-+ t.Errorf("(%d) got: %t, want: %t", i, ok, tt.DecimalSize.OK)
-+ }
-+ if c.ScanType() != tt.ScanType {
-+ t.Errorf("(%d) got: %v, want: %v", i, c.ScanType(), tt.ScanType)
-+ }
-+ }
-+}
-diff --git a/vendor/github.com/lib/pq/scram/scram.go b/vendor/github.com/lib/pq/scram/scram.go
-new file mode 100644
-index 00000000000..477216b6008
---- /dev/null
-+++ b/vendor/github.com/lib/pq/scram/scram.go
-@@ -0,0 +1,264 @@
-+// Copyright (c) 2014 - Gustavo Niemeyer
-+//
-+// All rights reserved.
-+//
-+// Redistribution and use in source and binary forms, with or without
-+// modification, are permitted provided that the following conditions are met:
-+//
-+// 1. Redistributions of source code must retain the above copyright notice, this
-+// list of conditions and the following disclaimer.
-+// 2. Redistributions in binary form must reproduce the above copyright notice,
-+// this list of conditions and the following disclaimer in the documentation
-+// and/or other materials provided with the distribution.
-+//
-+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
-+// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
-+// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+
-+// Package scram implements a SCRAM-{SHA-1,etc} client per RFC5802.
-+//
-+// http://tools.ietf.org/html/rfc5802
-+//
-+package scram
-+
-+import (
-+ "bytes"
-+ "crypto/hmac"
-+ "crypto/rand"
-+ "encoding/base64"
-+ "fmt"
-+ "hash"
-+ "strconv"
-+ "strings"
-+)
-+
-+// Client implements a SCRAM-* client (SCRAM-SHA-1, SCRAM-SHA-256, etc).
-+//
-+// A Client may be used within a SASL conversation with logic resembling:
-+//
-+// var in []byte
-+// var client = scram.NewClient(sha1.New, user, pass)
-+// for client.Step(in) {
-+// out := client.Out()
-+// // send out to server
-+// in := serverOut
-+// }
-+// if client.Err() != nil {
-+// // auth failed
-+// }
-+//
-+type Client struct {
-+ newHash func() hash.Hash
-+
-+ user string
-+ pass string
-+ step int
-+ out bytes.Buffer
-+ err error
-+
-+ clientNonce []byte
-+ serverNonce []byte
-+ saltedPass []byte
-+ authMsg bytes.Buffer
-+}
-+
-+// NewClient returns a new SCRAM-* client with the provided hash algorithm.
-+//
-+// For SCRAM-SHA-256, for example, use:
-+//
-+// client := scram.NewClient(sha256.New, user, pass)
-+//
-+func NewClient(newHash func() hash.Hash, user, pass string) *Client {
-+ c := &Client{
-+ newHash: newHash,
-+ user: user,
-+ pass: pass,
-+ }
-+ c.out.Grow(256)
-+ c.authMsg.Grow(256)
-+ return c
-+}
-+
-+// Out returns the data to be sent to the server in the current step.
-+func (c *Client) Out() []byte {
-+ if c.out.Len() == 0 {
-+ return nil
-+ }
-+ return c.out.Bytes()
-+}
-+
-+// Err returns the error that occurred, or nil if there were no errors.
-+func (c *Client) Err() error {
-+ return c.err
-+}
-+
-+// SetNonce sets the client nonce to the provided value.
-+// If not set, the nonce is generated automatically out of crypto/rand on the first step.
-+func (c *Client) SetNonce(nonce []byte) {
-+ c.clientNonce = nonce
-+}
-+
-+var escaper = strings.NewReplacer("=", "=3D", ",", "=2C")
-+
-+// Step processes the incoming data from the server and makes the
-+// next round of data for the server available via Client.Out.
-+// Step returns false if there are no errors and more data is
-+// still expected.
-+func (c *Client) Step(in []byte) bool {
-+ c.out.Reset()
-+ if c.step > 2 || c.err != nil {
-+ return false
-+ }
-+ c.step++
-+ switch c.step {
-+ case 1:
-+ c.err = c.step1(in)
-+ case 2:
-+ c.err = c.step2(in)
-+ case 3:
-+ c.err = c.step3(in)
-+ }
-+ return c.step > 2 || c.err != nil
-+}
-+
-+func (c *Client) step1(in []byte) error {
-+ if len(c.clientNonce) == 0 {
-+ const nonceLen = 16
-+ buf := make([]byte, nonceLen+b64.EncodedLen(nonceLen))
-+ if _, err := rand.Read(buf[:nonceLen]); err != nil {
-+ return fmt.Errorf("cannot read random SCRAM-SHA-256 nonce from operating system: %v", err)
-+ }
-+ c.clientNonce = buf[nonceLen:]
-+ b64.Encode(c.clientNonce, buf[:nonceLen])
-+ }
-+ c.authMsg.WriteString("n=")
-+ escaper.WriteString(&c.authMsg, c.user)
-+ c.authMsg.WriteString(",r=")
-+ c.authMsg.Write(c.clientNonce)
-+
-+ c.out.WriteString("n,,")
-+ c.out.Write(c.authMsg.Bytes())
-+ return nil
-+}
-+
-+var b64 = base64.StdEncoding
-+
-+func (c *Client) step2(in []byte) error {
-+ c.authMsg.WriteByte(',')
-+ c.authMsg.Write(in)
-+
-+ fields := bytes.Split(in, []byte(","))
-+ if len(fields) != 3 {
-+ return fmt.Errorf("expected 3 fields in first SCRAM-SHA-256 server message, got %d: %q", len(fields), in)
-+ }
-+ if !bytes.HasPrefix(fields[0], []byte("r=")) || len(fields[0]) < 2 {
-+ return fmt.Errorf("server sent an invalid SCRAM-SHA-256 nonce: %q", fields[0])
-+ }
-+ if !bytes.HasPrefix(fields[1], []byte("s=")) || len(fields[1]) < 6 {
-+ return fmt.Errorf("server sent an invalid SCRAM-SHA-256 salt: %q", fields[1])
-+ }
-+ if !bytes.HasPrefix(fields[2], []byte("i=")) || len(fields[2]) < 6 {
-+ return fmt.Errorf("server sent an invalid SCRAM-SHA-256 iteration count: %q", fields[2])
-+ }
-+
-+ c.serverNonce = fields[0][2:]
-+ if !bytes.HasPrefix(c.serverNonce, c.clientNonce) {
-+ return fmt.Errorf("server SCRAM-SHA-256 nonce is not prefixed by client nonce: got %q, want %q+\"...\"", c.serverNonce, c.clientNonce)
-+ }
-+
-+ salt := make([]byte, b64.DecodedLen(len(fields[1][2:])))
-+ n, err := b64.Decode(salt, fields[1][2:])
-+ if err != nil {
-+ return fmt.Errorf("cannot decode SCRAM-SHA-256 salt sent by server: %q", fields[1])
-+ }
-+ salt = salt[:n]
-+ iterCount, err := strconv.Atoi(string(fields[2][2:]))
-+ if err != nil {
-+ return fmt.Errorf("server sent an invalid SCRAM-SHA-256 iteration count: %q", fields[2])
-+ }
-+ c.saltPassword(salt, iterCount)
-+
-+ c.authMsg.WriteString(",c=biws,r=")
-+ c.authMsg.Write(c.serverNonce)
-+
-+ c.out.WriteString("c=biws,r=")
-+ c.out.Write(c.serverNonce)
-+ c.out.WriteString(",p=")
-+ c.out.Write(c.clientProof())
-+ return nil
-+}
-+
-+func (c *Client) step3(in []byte) error {
-+ var isv, ise bool
-+ var fields = bytes.Split(in, []byte(","))
-+ if len(fields) == 1 {
-+ isv = bytes.HasPrefix(fields[0], []byte("v="))
-+ ise = bytes.HasPrefix(fields[0], []byte("e="))
-+ }
-+ if ise {
-+ return fmt.Errorf("SCRAM-SHA-256 authentication error: %s", fields[0][2:])
-+ } else if !isv {
-+ return fmt.Errorf("unsupported SCRAM-SHA-256 final message from server: %q", in)
-+ }
-+ if !bytes.Equal(c.serverSignature(), fields[0][2:]) {
-+ return fmt.Errorf("cannot authenticate SCRAM-SHA-256 server signature: %q", fields[0][2:])
-+ }
-+ return nil
-+}
-+
-+func (c *Client) saltPassword(salt []byte, iterCount int) {
-+ mac := hmac.New(c.newHash, []byte(c.pass))
-+ mac.Write(salt)
-+ mac.Write([]byte{0, 0, 0, 1})
-+ ui := mac.Sum(nil)
-+ hi := make([]byte, len(ui))
-+ copy(hi, ui)
-+ for i := 1; i < iterCount; i++ {
-+ mac.Reset()
-+ mac.Write(ui)
-+ mac.Sum(ui[:0])
-+ for j, b := range ui {
-+ hi[j] ^= b
-+ }
-+ }
-+ c.saltedPass = hi
-+}
-+
-+func (c *Client) clientProof() []byte {
-+ mac := hmac.New(c.newHash, c.saltedPass)
-+ mac.Write([]byte("Client Key"))
-+ clientKey := mac.Sum(nil)
-+ hash := c.newHash()
-+ hash.Write(clientKey)
-+ storedKey := hash.Sum(nil)
-+ mac = hmac.New(c.newHash, storedKey)
-+ mac.Write(c.authMsg.Bytes())
-+ clientProof := mac.Sum(nil)
-+ for i, b := range clientKey {
-+ clientProof[i] ^= b
-+ }
-+ clientProof64 := make([]byte, b64.EncodedLen(len(clientProof)))
-+ b64.Encode(clientProof64, clientProof)
-+ return clientProof64
-+}
-+
-+func (c *Client) serverSignature() []byte {
-+ mac := hmac.New(c.newHash, c.saltedPass)
-+ mac.Write([]byte("Server Key"))
-+ serverKey := mac.Sum(nil)
-+
-+ mac = hmac.New(c.newHash, serverKey)
-+ mac.Write(c.authMsg.Bytes())
-+ serverSignature := mac.Sum(nil)
-+
-+ encoded := make([]byte, b64.EncodedLen(len(serverSignature)))
-+ b64.Encode(encoded, serverSignature)
-+ return encoded
-+}
-diff --git a/vendor/github.com/lib/pq/ssl.go b/vendor/github.com/lib/pq/ssl.go
-new file mode 100644
-index 00000000000..d9020845585
---- /dev/null
-+++ b/vendor/github.com/lib/pq/ssl.go
-@@ -0,0 +1,175 @@
-+package pq
-+
-+import (
-+ "crypto/tls"
-+ "crypto/x509"
-+ "io/ioutil"
-+ "net"
-+ "os"
-+ "os/user"
-+ "path/filepath"
-+)
-+
-+// ssl generates a function to upgrade a net.Conn based on the "sslmode" and
-+// related settings. The function is nil when no upgrade should take place.
-+func ssl(o values) (func(net.Conn) (net.Conn, error), error) {
-+ verifyCaOnly := false
-+ tlsConf := tls.Config{}
-+ switch mode := o["sslmode"]; mode {
-+ // "require" is the default.
-+ case "", "require":
-+ // We must skip TLS's own verification since it requires full
-+ // verification since Go 1.3.
-+ tlsConf.InsecureSkipVerify = true
-+
-+ // From http://www.postgresql.org/docs/current/static/libpq-ssl.html:
-+ //
-+ // Note: For backwards compatibility with earlier versions of
-+ // PostgreSQL, if a root CA file exists, the behavior of
-+ // sslmode=require will be the same as that of verify-ca, meaning the
-+ // server certificate is validated against the CA. Relying on this
-+ // behavior is discouraged, and applications that need certificate
-+ // validation should always use verify-ca or verify-full.
-+ if sslrootcert, ok := o["sslrootcert"]; ok {
-+ if _, err := os.Stat(sslrootcert); err == nil {
-+ verifyCaOnly = true
-+ } else {
-+ delete(o, "sslrootcert")
-+ }
-+ }
-+ case "verify-ca":
-+ // We must skip TLS's own verification since it requires full
-+ // verification since Go 1.3.
-+ tlsConf.InsecureSkipVerify = true
-+ verifyCaOnly = true
-+ case "verify-full":
-+ tlsConf.ServerName = o["host"]
-+ case "disable":
-+ return nil, nil
-+ default:
-+ return nil, fmterrorf(`unsupported sslmode %q; only "require" (default), "verify-full", "verify-ca", and "disable" supported`, mode)
-+ }
-+
-+ err := sslClientCertificates(&tlsConf, o)
-+ if err != nil {
-+ return nil, err
-+ }
-+ err = sslCertificateAuthority(&tlsConf, o)
-+ if err != nil {
-+ return nil, err
-+ }
-+
-+ // Accept renegotiation requests initiated by the backend.
-+ //
-+ // Renegotiation was deprecated then removed from PostgreSQL 9.5, but
-+ // the default configuration of older versions has it enabled. Redshift
-+ // also initiates renegotiations and cannot be reconfigured.
-+ tlsConf.Renegotiation = tls.RenegotiateFreelyAsClient
-+
-+ return func(conn net.Conn) (net.Conn, error) {
-+ client := tls.Client(conn, &tlsConf)
-+ if verifyCaOnly {
-+ err := sslVerifyCertificateAuthority(client, &tlsConf)
-+ if err != nil {
-+ return nil, err
-+ }
-+ }
-+ return client, nil
-+ }, nil
-+}
-+
-+// sslClientCertificates adds the certificate specified in the "sslcert" and
-+// "sslkey" settings, or if they aren't set, from the .postgresql directory
-+// in the user's home directory. The configured files must exist and have
-+// the correct permissions.
-+func sslClientCertificates(tlsConf *tls.Config, o values) error {
-+ // user.Current() might fail when cross-compiling. We have to ignore the
-+ // error and continue without home directory defaults, since we wouldn't
-+ // know from where to load them.
-+ user, _ := user.Current()
-+
-+ // In libpq, the client certificate is only loaded if the setting is not blank.
-+ //
-+ // https://github.com/postgres/postgres/blob/REL9_6_2/src/interfaces/libpq/fe-secure-openssl.c#L1036-L1037
-+ sslcert := o["sslcert"]
-+ if len(sslcert) == 0 && user != nil {
-+ sslcert = filepath.Join(user.HomeDir, ".postgresql", "postgresql.crt")
-+ }
-+ // https://github.com/postgres/postgres/blob/REL9_6_2/src/interfaces/libpq/fe-secure-openssl.c#L1045
-+ if len(sslcert) == 0 {
-+ return nil
-+ }
-+ // https://github.com/postgres/postgres/blob/REL9_6_2/src/interfaces/libpq/fe-secure-openssl.c#L1050:L1054
-+ if _, err := os.Stat(sslcert); os.IsNotExist(err) {
-+ return nil
-+ } else if err != nil {
-+ return err
-+ }
-+
-+ // In libpq, the ssl key is only loaded if the setting is not blank.
-+ //
-+ // https://github.com/postgres/postgres/blob/REL9_6_2/src/interfaces/libpq/fe-secure-openssl.c#L1123-L1222
-+ sslkey := o["sslkey"]
-+ if len(sslkey) == 0 && user != nil {
-+ sslkey = filepath.Join(user.HomeDir, ".postgresql", "postgresql.key")
-+ }
-+
-+ if len(sslkey) > 0 {
-+ if err := sslKeyPermissions(sslkey); err != nil {
-+ return err
-+ }
-+ }
-+
-+ cert, err := tls.LoadX509KeyPair(sslcert, sslkey)
-+ if err != nil {
-+ return err
-+ }
-+
-+ tlsConf.Certificates = []tls.Certificate{cert}
-+ return nil
-+}
-+
-+// sslCertificateAuthority adds the RootCA specified in the "sslrootcert" setting.
-+func sslCertificateAuthority(tlsConf *tls.Config, o values) error {
-+ // In libpq, the root certificate is only loaded if the setting is not blank.
-+ //
-+ // https://github.com/postgres/postgres/blob/REL9_6_2/src/interfaces/libpq/fe-secure-openssl.c#L950-L951
-+ if sslrootcert := o["sslrootcert"]; len(sslrootcert) > 0 {
-+ tlsConf.RootCAs = x509.NewCertPool()
-+
-+ cert, err := ioutil.ReadFile(sslrootcert)
-+ if err != nil {
-+ return err
-+ }
-+
-+ if !tlsConf.RootCAs.AppendCertsFromPEM(cert) {
-+ return fmterrorf("couldn't parse pem in sslrootcert")
-+ }
-+ }
-+
-+ return nil
-+}
-+
-+// sslVerifyCertificateAuthority carries out a TLS handshake to the server and
-+// verifies the presented certificate against the CA, i.e. the one specified in
-+// sslrootcert or the system CA if sslrootcert was not specified.
-+func sslVerifyCertificateAuthority(client *tls.Conn, tlsConf *tls.Config) error {
-+ err := client.Handshake()
-+ if err != nil {
-+ return err
-+ }
-+ certs := client.ConnectionState().PeerCertificates
-+ opts := x509.VerifyOptions{
-+ DNSName: client.ConnectionState().ServerName,
-+ Intermediates: x509.NewCertPool(),
-+ Roots: tlsConf.RootCAs,
-+ }
-+ for i, cert := range certs {
-+ if i == 0 {
-+ continue
-+ }
-+ opts.Intermediates.AddCert(cert)
-+ }
-+ _, err = certs[0].Verify(opts)
-+ return err
-+}
-diff --git a/vendor/github.com/lib/pq/ssl_permissions.go b/vendor/github.com/lib/pq/ssl_permissions.go
-new file mode 100644
-index 00000000000..3b7c3a2a319
---- /dev/null
-+++ b/vendor/github.com/lib/pq/ssl_permissions.go
-@@ -0,0 +1,20 @@
-+// +build !windows
-+
-+package pq
-+
-+import "os"
-+
-+// sslKeyPermissions checks the permissions on user-supplied ssl key files.
-+// The key file should have very little access.
-+//
-+// libpq does not check key file permissions on Windows.
-+func sslKeyPermissions(sslkey string) error {
-+ info, err := os.Stat(sslkey)
-+ if err != nil {
-+ return err
-+ }
-+ if info.Mode().Perm()&0077 != 0 {
-+ return ErrSSLKeyHasWorldPermissions
-+ }
-+ return nil
-+}
-diff --git a/vendor/github.com/lib/pq/ssl_test.go b/vendor/github.com/lib/pq/ssl_test.go
-new file mode 100644
-index 00000000000..3eafbfd20ff
---- /dev/null
-+++ b/vendor/github.com/lib/pq/ssl_test.go
-@@ -0,0 +1,279 @@
-+package pq
-+
-+// This file contains SSL tests
-+
-+import (
-+ _ "crypto/sha256"
-+ "crypto/x509"
-+ "database/sql"
-+ "os"
-+ "path/filepath"
-+ "testing"
-+)
-+
-+func maybeSkipSSLTests(t *testing.T) {
-+ // Require some special variables for testing certificates
-+ if os.Getenv("PQSSLCERTTEST_PATH") == "" {
-+ t.Skip("PQSSLCERTTEST_PATH not set, skipping SSL tests")
-+ }
-+
-+ value := os.Getenv("PQGOSSLTESTS")
-+ if value == "" || value == "0" {
-+ t.Skip("PQGOSSLTESTS not enabled, skipping SSL tests")
-+ } else if value != "1" {
-+ t.Fatalf("unexpected value %q for PQGOSSLTESTS", value)
-+ }
-+}
-+
-+func openSSLConn(t *testing.T, conninfo string) (*sql.DB, error) {
-+ db, err := openTestConnConninfo(conninfo)
-+ if err != nil {
-+ // should never fail
-+ t.Fatal(err)
-+ }
-+ // Do something with the connection to see whether it's working or not.
-+ tx, err := db.Begin()
-+ if err == nil {
-+ return db, tx.Rollback()
-+ }
-+ _ = db.Close()
-+ return nil, err
-+}
-+
-+func checkSSLSetup(t *testing.T, conninfo string) {
-+ _, err := openSSLConn(t, conninfo)
-+ if pge, ok := err.(*Error); ok {
-+ if pge.Code.Name() != "invalid_authorization_specification" {
-+ t.Fatalf("unexpected error code '%s'", pge.Code.Name())
-+ }
-+ } else {
-+ t.Fatalf("expected %T, got %v", (*Error)(nil), err)
-+ }
-+}
-+
-+// Connect over SSL and run a simple query to test the basics
-+func TestSSLConnection(t *testing.T) {
-+ maybeSkipSSLTests(t)
-+ // Environment sanity check: should fail without SSL
-+ checkSSLSetup(t, "sslmode=disable user=pqgossltest")
-+
-+ db, err := openSSLConn(t, "sslmode=require user=pqgossltest")
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+ rows, err := db.Query("SELECT 1")
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+ rows.Close()
-+}
-+
-+// Test sslmode=verify-full
-+func TestSSLVerifyFull(t *testing.T) {
-+ maybeSkipSSLTests(t)
-+ // Environment sanity check: should fail without SSL
-+ checkSSLSetup(t, "sslmode=disable user=pqgossltest")
-+
-+ // Not OK according to the system CA
-+ _, err := openSSLConn(t, "host=postgres sslmode=verify-full user=pqgossltest")
-+ if err == nil {
-+ t.Fatal("expected error")
-+ }
-+ _, ok := err.(x509.UnknownAuthorityError)
-+ if !ok {
-+ t.Fatalf("expected x509.UnknownAuthorityError, got %#+v", err)
-+ }
-+
-+ rootCertPath := filepath.Join(os.Getenv("PQSSLCERTTEST_PATH"), "root.crt")
-+ rootCert := "sslrootcert=" + rootCertPath + " "
-+ // No match on Common Name
-+ _, err = openSSLConn(t, rootCert+"host=127.0.0.1 sslmode=verify-full user=pqgossltest")
-+ if err == nil {
-+ t.Fatal("expected error")
-+ }
-+ _, ok = err.(x509.HostnameError)
-+ if !ok {
-+ t.Fatalf("expected x509.HostnameError, got %#+v", err)
-+ }
-+ // OK
-+ _, err = openSSLConn(t, rootCert+"host=postgres sslmode=verify-full user=pqgossltest")
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+}
-+
-+// Test sslmode=require sslrootcert=rootCertPath
-+func TestSSLRequireWithRootCert(t *testing.T) {
-+ maybeSkipSSLTests(t)
-+ // Environment sanity check: should fail without SSL
-+ checkSSLSetup(t, "sslmode=disable user=pqgossltest")
-+
-+ bogusRootCertPath := filepath.Join(os.Getenv("PQSSLCERTTEST_PATH"), "bogus_root.crt")
-+ bogusRootCert := "sslrootcert=" + bogusRootCertPath + " "
-+
-+ // Not OK according to the bogus CA
-+ _, err := openSSLConn(t, bogusRootCert+"host=postgres sslmode=require user=pqgossltest")
-+ if err == nil {
-+ t.Fatal("expected error")
-+ }
-+ _, ok := err.(x509.UnknownAuthorityError)
-+ if !ok {
-+ t.Fatalf("expected x509.UnknownAuthorityError, got %s, %#+v", err, err)
-+ }
-+
-+ nonExistentCertPath := filepath.Join(os.Getenv("PQSSLCERTTEST_PATH"), "non_existent.crt")
-+ nonExistentCert := "sslrootcert=" + nonExistentCertPath + " "
-+
-+ // No match on Common Name, but that's OK because we're not validating anything.
-+ _, err = openSSLConn(t, nonExistentCert+"host=127.0.0.1 sslmode=require user=pqgossltest")
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+
-+ rootCertPath := filepath.Join(os.Getenv("PQSSLCERTTEST_PATH"), "root.crt")
-+ rootCert := "sslrootcert=" + rootCertPath + " "
-+
-+ // No match on Common Name, but that's OK because we're not validating the CN.
-+ _, err = openSSLConn(t, rootCert+"host=127.0.0.1 sslmode=require user=pqgossltest")
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+ // Everything OK
-+ _, err = openSSLConn(t, rootCert+"host=postgres sslmode=require user=pqgossltest")
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+}
-+
-+// Test sslmode=verify-ca
-+func TestSSLVerifyCA(t *testing.T) {
-+ maybeSkipSSLTests(t)
-+ // Environment sanity check: should fail without SSL
-+ checkSSLSetup(t, "sslmode=disable user=pqgossltest")
-+
-+ // Not OK according to the system CA
-+ {
-+ _, err := openSSLConn(t, "host=postgres sslmode=verify-ca user=pqgossltest")
-+ if _, ok := err.(x509.UnknownAuthorityError); !ok {
-+ t.Fatalf("expected %T, got %#+v", x509.UnknownAuthorityError{}, err)
-+ }
-+ }
-+
-+ // Still not OK according to the system CA; empty sslrootcert is treated as unspecified.
-+ {
-+ _, err := openSSLConn(t, "host=postgres sslmode=verify-ca user=pqgossltest sslrootcert=''")
-+ if _, ok := err.(x509.UnknownAuthorityError); !ok {
-+ t.Fatalf("expected %T, got %#+v", x509.UnknownAuthorityError{}, err)
-+ }
-+ }
-+
-+ rootCertPath := filepath.Join(os.Getenv("PQSSLCERTTEST_PATH"), "root.crt")
-+ rootCert := "sslrootcert=" + rootCertPath + " "
-+ // No match on Common Name, but that's OK
-+ if _, err := openSSLConn(t, rootCert+"host=127.0.0.1 sslmode=verify-ca user=pqgossltest"); err != nil {
-+ t.Fatal(err)
-+ }
-+ // Everything OK
-+ if _, err := openSSLConn(t, rootCert+"host=postgres sslmode=verify-ca user=pqgossltest"); err != nil {
-+ t.Fatal(err)
-+ }
-+}
-+
-+// Authenticate over SSL using client certificates
-+func TestSSLClientCertificates(t *testing.T) {
-+ maybeSkipSSLTests(t)
-+ // Environment sanity check: should fail without SSL
-+ checkSSLSetup(t, "sslmode=disable user=pqgossltest")
-+
-+ const baseinfo = "sslmode=require user=pqgosslcert"
-+
-+ // Certificate not specified, should fail
-+ {
-+ _, err := openSSLConn(t, baseinfo)
-+ if pge, ok := err.(*Error); ok {
-+ if pge.Code.Name() != "invalid_authorization_specification" {
-+ t.Fatalf("unexpected error code '%s'", pge.Code.Name())
-+ }
-+ } else {
-+ t.Fatalf("expected %T, got %v", (*Error)(nil), err)
-+ }
-+ }
-+
-+ // Empty certificate specified, should fail
-+ {
-+ _, err := openSSLConn(t, baseinfo+" sslcert=''")
-+ if pge, ok := err.(*Error); ok {
-+ if pge.Code.Name() != "invalid_authorization_specification" {
-+ t.Fatalf("unexpected error code '%s'", pge.Code.Name())
-+ }
-+ } else {
-+ t.Fatalf("expected %T, got %v", (*Error)(nil), err)
-+ }
-+ }
-+
-+ // Non-existent certificate specified, should fail
-+ {
-+ _, err := openSSLConn(t, baseinfo+" sslcert=/tmp/filedoesnotexist")
-+ if pge, ok := err.(*Error); ok {
-+ if pge.Code.Name() != "invalid_authorization_specification" {
-+ t.Fatalf("unexpected error code '%s'", pge.Code.Name())
-+ }
-+ } else {
-+ t.Fatalf("expected %T, got %v", (*Error)(nil), err)
-+ }
-+ }
-+
-+ certpath, ok := os.LookupEnv("PQSSLCERTTEST_PATH")
-+ if !ok {
-+ t.Fatalf("PQSSLCERTTEST_PATH not present in environment")
-+ }
-+
-+ sslcert := filepath.Join(certpath, "postgresql.crt")
-+
-+ // Cert present, key not specified, should fail
-+ {
-+ _, err := openSSLConn(t, baseinfo+" sslcert="+sslcert)
-+ if _, ok := err.(*os.PathError); !ok {
-+ t.Fatalf("expected %T, got %#+v", (*os.PathError)(nil), err)
-+ }
-+ }
-+
-+ // Cert present, empty key specified, should fail
-+ {
-+ _, err := openSSLConn(t, baseinfo+" sslcert="+sslcert+" sslkey=''")
-+ if _, ok := err.(*os.PathError); !ok {
-+ t.Fatalf("expected %T, got %#+v", (*os.PathError)(nil), err)
-+ }
-+ }
-+
-+ // Cert present, non-existent key, should fail
-+ {
-+ _, err := openSSLConn(t, baseinfo+" sslcert="+sslcert+" sslkey=/tmp/filedoesnotexist")
-+ if _, ok := err.(*os.PathError); !ok {
-+ t.Fatalf("expected %T, got %#+v", (*os.PathError)(nil), err)
-+ }
-+ }
-+
-+ // Key has wrong permissions (passing the cert as the key), should fail
-+ if _, err := openSSLConn(t, baseinfo+" sslcert="+sslcert+" sslkey="+sslcert); err != ErrSSLKeyHasWorldPermissions {
-+ t.Fatalf("expected %s, got %#+v", ErrSSLKeyHasWorldPermissions, err)
-+ }
-+
-+ sslkey := filepath.Join(certpath, "postgresql.key")
-+
-+ // Should work
-+ if db, err := openSSLConn(t, baseinfo+" sslcert="+sslcert+" sslkey="+sslkey); err != nil {
-+ t.Fatal(err)
-+ } else {
-+ rows, err := db.Query("SELECT 1")
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+ if err := rows.Close(); err != nil {
-+ t.Fatal(err)
-+ }
-+ if err := db.Close(); err != nil {
-+ t.Fatal(err)
-+ }
-+ }
-+}
-diff --git a/vendor/github.com/lib/pq/ssl_windows.go b/vendor/github.com/lib/pq/ssl_windows.go
-new file mode 100644
-index 00000000000..5d2c763cebc
---- /dev/null
-+++ b/vendor/github.com/lib/pq/ssl_windows.go
-@@ -0,0 +1,9 @@
-+// +build windows
-+
-+package pq
-+
-+// sslKeyPermissions checks the permissions on user-supplied ssl key files.
-+// The key file should have very little access.
-+//
-+// libpq does not check key file permissions on Windows.
-+func sslKeyPermissions(string) error { return nil }
-diff --git a/vendor/github.com/lib/pq/url.go b/vendor/github.com/lib/pq/url.go
-new file mode 100644
-index 00000000000..f4d8a7c2062
---- /dev/null
-+++ b/vendor/github.com/lib/pq/url.go
-@@ -0,0 +1,76 @@
-+package pq
-+
-+import (
-+ "fmt"
-+ "net"
-+ nurl "net/url"
-+ "sort"
-+ "strings"
-+)
-+
-+// ParseURL no longer needs to be used by clients of this library since supplying a URL as a
-+// connection string to sql.Open() is now supported:
-+//
-+// sql.Open("postgres", "postgres://bob:secret@1.2.3.4:5432/mydb?sslmode=verify-full")
-+//
-+// It remains exported here for backwards-compatibility.
-+//
-+// ParseURL converts a url to a connection string for driver.Open.
-+// Example:
-+//
-+// "postgres://bob:secret@1.2.3.4:5432/mydb?sslmode=verify-full"
-+//
-+// converts to:
-+//
-+// "user=bob password=secret host=1.2.3.4 port=5432 dbname=mydb sslmode=verify-full"
-+//
-+// A minimal example:
-+//
-+// "postgres://"
-+//
-+// This will be blank, causing driver.Open to use all of the defaults
-+func ParseURL(url string) (string, error) {
-+ u, err := nurl.Parse(url)
-+ if err != nil {
-+ return "", err
-+ }
-+
-+ if u.Scheme != "postgres" && u.Scheme != "postgresql" {
-+ return "", fmt.Errorf("invalid connection protocol: %s", u.Scheme)
-+ }
-+
-+ var kvs []string
-+ escaper := strings.NewReplacer(` `, `\ `, `'`, `\'`, `\`, `\\`)
-+ accrue := func(k, v string) {
-+ if v != "" {
-+ kvs = append(kvs, k+"="+escaper.Replace(v))
-+ }
-+ }
-+
-+ if u.User != nil {
-+ v := u.User.Username()
-+ accrue("user", v)
-+
-+ v, _ = u.User.Password()
-+ accrue("password", v)
-+ }
-+
-+ if host, port, err := net.SplitHostPort(u.Host); err != nil {
-+ accrue("host", u.Host)
-+ } else {
-+ accrue("host", host)
-+ accrue("port", port)
-+ }
-+
-+ if u.Path != "" {
-+ accrue("dbname", u.Path[1:])
-+ }
-+
-+ q := u.Query()
-+ for k := range q {
-+ accrue(k, q.Get(k))
-+ }
-+
-+ sort.Strings(kvs) // Makes testing easier (not a performance concern)
-+ return strings.Join(kvs, " "), nil
-+}
-diff --git a/vendor/github.com/lib/pq/url_test.go b/vendor/github.com/lib/pq/url_test.go
-new file mode 100644
-index 00000000000..4ff0ce034dc
---- /dev/null
-+++ b/vendor/github.com/lib/pq/url_test.go
-@@ -0,0 +1,66 @@
-+package pq
-+
-+import (
-+ "testing"
-+)
-+
-+func TestSimpleParseURL(t *testing.T) {
-+ expected := "host=hostname.remote"
-+ str, err := ParseURL("postgres://hostname.remote")
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+
-+ if str != expected {
-+ t.Fatalf("unexpected result from ParseURL:\n+ %v\n- %v", str, expected)
-+ }
-+}
-+
-+func TestIPv6LoopbackParseURL(t *testing.T) {
-+ expected := "host=::1 port=1234"
-+ str, err := ParseURL("postgres://[::1]:1234")
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+
-+ if str != expected {
-+ t.Fatalf("unexpected result from ParseURL:\n+ %v\n- %v", str, expected)
-+ }
-+}
-+
-+func TestFullParseURL(t *testing.T) {
-+ expected := `dbname=database host=hostname.remote password=top\ secret port=1234 user=username`
-+ str, err := ParseURL("postgres://username:top%20secret@hostname.remote:1234/database")
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+
-+ if str != expected {
-+ t.Fatalf("unexpected result from ParseURL:\n+ %s\n- %s", str, expected)
-+ }
-+}
-+
-+func TestInvalidProtocolParseURL(t *testing.T) {
-+ _, err := ParseURL("http://hostname.remote")
-+ switch err {
-+ case nil:
-+ t.Fatal("Expected an error from parsing invalid protocol")
-+ default:
-+ msg := "invalid connection protocol: http"
-+ if err.Error() != msg {
-+ t.Fatalf("Unexpected error message:\n+ %s\n- %s",
-+ err.Error(), msg)
-+ }
-+ }
-+}
-+
-+func TestMinimalURL(t *testing.T) {
-+ cs, err := ParseURL("postgres://")
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+
-+ if cs != "" {
-+ t.Fatalf("expected blank connection string, got: %q", cs)
-+ }
-+}
-diff --git a/vendor/github.com/lib/pq/user_posix.go b/vendor/github.com/lib/pq/user_posix.go
-new file mode 100644
-index 00000000000..bf982524f93
---- /dev/null
-+++ b/vendor/github.com/lib/pq/user_posix.go
-@@ -0,0 +1,24 @@
-+// Package pq is a pure Go Postgres driver for the database/sql package.
-+
-+// +build darwin dragonfly freebsd linux nacl netbsd openbsd solaris rumprun
-+
-+package pq
-+
-+import (
-+ "os"
-+ "os/user"
-+)
-+
-+func userCurrent() (string, error) {
-+ u, err := user.Current()
-+ if err == nil {
-+ return u.Username, nil
-+ }
-+
-+ name := os.Getenv("USER")
-+ if name != "" {
-+ return name, nil
-+ }
-+
-+ return "", ErrCouldNotDetectUsername
-+}
-diff --git a/vendor/github.com/lib/pq/user_windows.go b/vendor/github.com/lib/pq/user_windows.go
-new file mode 100644
-index 00000000000..2b691267b97
---- /dev/null
-+++ b/vendor/github.com/lib/pq/user_windows.go
-@@ -0,0 +1,27 @@
-+// Package pq is a pure Go Postgres driver for the database/sql package.
-+package pq
-+
-+import (
-+ "path/filepath"
-+ "syscall"
-+)
-+
-+// Perform Windows user name lookup identically to libpq.
-+//
-+// The PostgreSQL code makes use of the legacy Win32 function
-+// GetUserName, and that function has not been imported into stock Go.
-+// GetUserNameEx is available though, the difference being that a
-+// wider range of names are available. To get the output to be the
-+// same as GetUserName, only the base (or last) component of the
-+// result is returned.
-+func userCurrent() (string, error) {
-+ pw_name := make([]uint16, 128)
-+ pwname_size := uint32(len(pw_name)) - 1
-+ err := syscall.GetUserNameEx(syscall.NameSamCompatible, &pw_name[0], &pwname_size)
-+ if err != nil {
-+ return "", ErrCouldNotDetectUsername
-+ }
-+ s := syscall.UTF16ToString(pw_name)
-+ u := filepath.Base(s)
-+ return u, nil
-+}
-diff --git a/vendor/github.com/lib/pq/uuid.go b/vendor/github.com/lib/pq/uuid.go
-new file mode 100644
-index 00000000000..9a1b9e0748e
---- /dev/null
-+++ b/vendor/github.com/lib/pq/uuid.go
-@@ -0,0 +1,23 @@
-+package pq
-+
-+import (
-+ "encoding/hex"
-+ "fmt"
-+)
-+
-+// decodeUUIDBinary interprets the binary format of a uuid, returning it in text format.
-+func decodeUUIDBinary(src []byte) ([]byte, error) {
-+ if len(src) != 16 {
-+ return nil, fmt.Errorf("pq: unable to decode uuid; bad length: %d", len(src))
-+ }
-+
-+ dst := make([]byte, 36)
-+ dst[8], dst[13], dst[18], dst[23] = '-', '-', '-', '-'
-+ hex.Encode(dst[0:], src[0:4])
-+ hex.Encode(dst[9:], src[4:6])
-+ hex.Encode(dst[14:], src[6:8])
-+ hex.Encode(dst[19:], src[8:10])
-+ hex.Encode(dst[24:], src[10:16])
-+
-+ return dst, nil
-+}
-diff --git a/vendor/github.com/lib/pq/uuid_test.go b/vendor/github.com/lib/pq/uuid_test.go
-new file mode 100644
-index 00000000000..8ecee2fdef4
---- /dev/null
-+++ b/vendor/github.com/lib/pq/uuid_test.go
-@@ -0,0 +1,46 @@
-+package pq
-+
-+import (
-+ "reflect"
-+ "strings"
-+ "testing"
-+)
-+
-+func TestDecodeUUIDBinaryError(t *testing.T) {
-+ t.Parallel()
-+ _, err := decodeUUIDBinary([]byte{0x12, 0x34})
-+
-+ if err == nil {
-+ t.Fatal("Expected error, got none")
-+ }
-+ if !strings.HasPrefix(err.Error(), "pq:") {
-+ t.Errorf("Expected error to start with %q, got %q", "pq:", err.Error())
-+ }
-+ if !strings.Contains(err.Error(), "bad length: 2") {
-+ t.Errorf("Expected error to contain length, got %q", err.Error())
-+ }
-+}
-+
-+func BenchmarkDecodeUUIDBinary(b *testing.B) {
-+ x := []byte{0x03, 0xa3, 0x52, 0x2f, 0x89, 0x28, 0x49, 0x87, 0x84, 0xd6, 0x93, 0x7b, 0x36, 0xec, 0x27, 0x6f}
-+
-+ for i := 0; i < b.N; i++ {
-+ decodeUUIDBinary(x)
-+ }
-+}
-+
-+func TestDecodeUUIDBackend(t *testing.T) {
-+ db := openTestConn(t)
-+ defer db.Close()
-+
-+ var s = "a0ecc91d-a13f-4fe4-9fce-7e09777cc70a"
-+ var scanned interface{}
-+
-+ err := db.QueryRow(`SELECT $1::uuid`, s).Scan(&scanned)
-+ if err != nil {
-+ t.Fatalf("Expected no error, got %v", err)
-+ }
-+ if !reflect.DeepEqual(scanned, []byte(s)) {
-+ t.Errorf("Expected []byte(%q), got %T(%q)", s, scanned, scanned)
-+ }
-+}
-diff --git a/vendor/github.com/mattn/go-sqlite3/.github/FUNDING.yml b/vendor/github.com/mattn/go-sqlite3/.github/FUNDING.yml
-new file mode 100644
-index 00000000000..ee449664904
---- /dev/null
-+++ b/vendor/github.com/mattn/go-sqlite3/.github/FUNDING.yml
-@@ -0,0 +1,8 @@
-+# These are supported funding model platforms
-+
-+github: # Replace with up to 4 GitHub Sponsors-enabled usernames e.g., [user1, user2]
-+patreon: mattn # Replace with a single Patreon username
-+open_collective: # Replace with a single Open Collective username
-+ko_fi: # Replace with a single Ko-fi username
-+tidelift: # Replace with a single Tidelift platform-name/package-name e.g., npm/babel
-+custom: # Replace with a single custom sponsorship URL
-diff --git a/vendor/github.com/mattn/go-sqlite3/.gitignore b/vendor/github.com/mattn/go-sqlite3/.gitignore
-new file mode 100644
-index 00000000000..fa0e6b58188
---- /dev/null
-+++ b/vendor/github.com/mattn/go-sqlite3/.gitignore
-@@ -0,0 +1,14 @@
-+*.db
-+*.exe
-+*.dll
-+*.o
-+
-+# VSCode
-+.vscode
-+
-+# Exclude from upgrade
-+upgrade/*.c
-+upgrade/*.h
-+
-+# Exclude upgrade binary
-+upgrade/upgrade
-diff --git a/vendor/github.com/mattn/go-sqlite3/.travis.yml b/vendor/github.com/mattn/go-sqlite3/.travis.yml
-new file mode 100644
-index 00000000000..eb0abbbbea9
---- /dev/null
-+++ b/vendor/github.com/mattn/go-sqlite3/.travis.yml
-@@ -0,0 +1,31 @@
-+language: go
-+
-+os:
-+ - linux
-+ - osx
-+
-+addons:
-+ apt:
-+ update: true
-+
-+go:
-+ - 1.9.x
-+ - 1.10.x
-+ - 1.11.x
-+ - master
-+
-+before_install:
-+ - |
-+ if [[ "$TRAVIS_OS_NAME" == "osx" ]]; then
-+ brew update
-+ fi
-+ - go get github.com/smartystreets/goconvey
-+ - go get github.com/mattn/goveralls
-+ - go get golang.org/x/tools/cmd/cover
-+
-+script:
-+ - $HOME/gopath/bin/goveralls -repotoken 3qJVUE0iQwqnCbmNcDsjYu1nh4J4KIFXx
-+ - go test -race -v . -tags ""
-+ - go test -race -v . -tags "libsqlite3"
-+ - go test -race -v . -tags "sqlite_allow_uri_authority sqlite_app_armor sqlite_foreign_keys sqlite_fts5 sqlite_icu sqlite_introspect sqlite_json sqlite_secure_delete sqlite_see sqlite_stat4 sqlite_trace sqlite_userauth sqlite_vacuum_incr sqlite_vtable sqlite_unlock_notify"
-+ - go test -race -v . -tags "sqlite_vacuum_full"
-\ No newline at end of file
-diff --git a/vendor/github.com/mattn/go-sqlite3/LICENSE b/vendor/github.com/mattn/go-sqlite3/LICENSE
-new file mode 100644
-index 00000000000..ca458bb39ff
---- /dev/null
-+++ b/vendor/github.com/mattn/go-sqlite3/LICENSE
-@@ -0,0 +1,21 @@
-+The MIT License (MIT)
-+
-+Copyright (c) 2014 Yasuhiro Matsumoto
-+
-+Permission is hereby granted, free of charge, to any person obtaining a copy
-+of this software and associated documentation files (the "Software"), to deal
-+in the Software without restriction, including without limitation the rights
-+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-+copies of the Software, and to permit persons to whom the Software is
-+furnished to do so, subject to the following conditions:
-+
-+The above copyright notice and this permission notice shall be included in all
-+copies or substantial portions of the Software.
-+
-+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-+SOFTWARE.
-diff --git a/vendor/github.com/mattn/go-sqlite3/README.md b/vendor/github.com/mattn/go-sqlite3/README.md
-new file mode 100644
-index 00000000000..e6654bbf882
---- /dev/null
-+++ b/vendor/github.com/mattn/go-sqlite3/README.md
-@@ -0,0 +1,521 @@
-+go-sqlite3
-+==========
-+
-+[![GoDoc Reference](https://godoc.org/github.com/mattn/go-sqlite3?status.svg)](http://godoc.org/github.com/mattn/go-sqlite3)
-+[![Build Status](https://travis-ci.org/mattn/go-sqlite3.svg?branch=master)](https://travis-ci.org/mattn/go-sqlite3)
-+[![Coverage Status](https://coveralls.io/repos/mattn/go-sqlite3/badge.svg?branch=master)](https://coveralls.io/r/mattn/go-sqlite3?branch=master)
-+[![Go Report Card](https://goreportcard.com/badge/github.com/mattn/go-sqlite3)](https://goreportcard.com/report/github.com/mattn/go-sqlite3)
-+
-+# Description
-+
-+sqlite3 driver conforming to the built-in database/sql interface
-+
-+Supported Golang version: See .travis.yml
-+
-+[This package follows the official Golang Release Policy.](https://golang.org/doc/devel/release.html#policy)
-+
-+### Overview
-+
-+- [Installation](#installation)
-+- [API Reference](#api-reference)
-+- [Connection String](#connection-string)
-+- [Features](#features)
-+- [Compilation](#compilation)
-+ - [Android](#android)
-+ - [ARM](#arm)
-+ - [Cross Compile](#cross-compile)
-+ - [Google Cloud Platform](#google-cloud-platform)
-+ - [Linux](#linux)
-+ - [Alpine](#alpine)
-+ - [Fedora](#fedora)
-+ - [Ubuntu](#ubuntu)
-+ - [Mac OSX](#mac-osx)
-+ - [Windows](#windows)
-+ - [Errors](#errors)
-+- [User Authentication](#user-authentication)
-+ - [Compile](#compile)
-+ - [Usage](#usage)
-+- [Extensions](#extensions)
-+ - [Spatialite](#spatialite)
-+- [FAQ](#faq)
-+- [License](#license)
-+
-+# Installation
-+
-+This package can be installed with the go get command:
-+
-+ go get github.com/mattn/go-sqlite3
-+
-+_go-sqlite3_ is *cgo* package.
-+If you want to build your app using go-sqlite3, you need gcc.
-+However, after you have built and installed _go-sqlite3_ with `go install github.com/mattn/go-sqlite3` (which requires gcc), you can build your app without relying on gcc in future.
-+
-+***Important: because this is a `CGO` enabled package you are required to set the environment variable `CGO_ENABLED=1` and have a `gcc` compile present within your path.***
-+
-+# API Reference
-+
-+API documentation can be found here: http://godoc.org/github.com/mattn/go-sqlite3
-+
-+Examples can be found under the [examples](./_example) directory
-+
-+# Connection String
-+
-+When creating a new SQLite database or connection to an existing one, with the file name additional options can be given.
-+This is also known as a DSN string. (Data Source Name).
-+
-+Options are append after the filename of the SQLite database.
-+The database filename and options are seperated by an `?` (Question Mark).
-+Options should be URL-encoded (see [url.QueryEscape](https://golang.org/pkg/net/url/#QueryEscape)).
-+
-+This also applies when using an in-memory database instead of a file.
-+
-+Options can be given using the following format: `KEYWORD=VALUE` and multiple options can be combined with the `&` ampersand.
-+
-+This library supports dsn options of SQLite itself and provides additional options.
-+
-+Boolean values can be one of:
-+* `0` `no` `false` `off`
-+* `1` `yes` `true` `on`
-+
-+| Name | Key | Value(s) | Description |
-+|------|-----|----------|-------------|
-+| UA - Create | `_auth` | - | Create User Authentication, for more information see [User Authentication](#user-authentication) |
-+| UA - Username | `_auth_user` | `string` | Username for User Authentication, for more information see [User Authentication](#user-authentication) |
-+| UA - Password | `_auth_pass` | `string` | Password for User Authentication, for more information see [User Authentication](#user-authentication) |
-+| UA - Crypt | `_auth_crypt` | SHA1 SSHA1 SHA256 SSHA256 SHA384 SSHA384 SHA512 SSHA512 | Password encoder to use for User Authentication, for more information see [User Authentication](#user-authentication) |
-+| UA - Salt | `_auth_salt` | `string` | Salt to use if the configure password encoder requires a salt, for User Authentication, for more information see [User Authentication](#user-authentication) |
-+| Auto Vacuum | `_auto_vacuum` \| `_vacuum` | `0` \| `none` `1` \| `full` `2` \| `incremental` | For more information see [PRAGMA auto_vacuum](https://www.sqlite.org/pragma.html#pragma_auto_vacuum) |
-+| Busy Timeout | `_busy_timeout` \| `_timeout` | `int` | Specify value for sqlite3_busy_timeout. For more information see [PRAGMA busy_timeout](https://www.sqlite.org/pragma.html#pragma_busy_timeout) |
-+| Case Sensitive LIKE | `_case_sensitive_like` \| `_cslike` | `boolean` | For more information see [PRAGMA case_sensitive_like](https://www.sqlite.org/pragma.html#pragma_case_sensitive_like) |
-+| Defer Foreign Keys | `_defer_foreign_keys` \| `_defer_fk` | `boolean` | For more information see [PRAGMA defer_foreign_keys](https://www.sqlite.org/pragma.html#pragma_defer_foreign_keys) |
-+| Foreign Keys | `_foreign_keys` \| `_fk` | `boolean` | For more information see [PRAGMA foreign_keys](https://www.sqlite.org/pragma.html#pragma_foreign_keys) |
-+| Ignore CHECK Constraints | `_ignore_check_constraints` | `boolean` | For more information see [PRAGMA ignore_check_constraints](https://www.sqlite.org/pragma.html#pragma_ignore_check_constraints) |
-+| Immutable | `immutable` | `boolean` | For more information see [Immutable](https://www.sqlite.org/c3ref/open.html) |
-+| Journal Mode | `_journal_mode` \| `_journal` | DELETE TRUNCATE PERSIST MEMORY WAL OFF | For more information see [PRAGMA journal_mode](https://www.sqlite.org/pragma.html#pragma_journal_mode) |
-+| Locking Mode | `_locking_mode` \| `_locking` | | For more information see [PRAGMA locking_mode](https://www.sqlite.org/pragma.html#pragma_locking_mode) |
-+| Mode | `mode` | | Access Mode of the database. For more information see [SQLite Open](https://www.sqlite.org/c3ref/open.html) |
-+| Mutex Locking | `_mutex` | | Specify mutex mode. |
-+| Query Only | `_query_only` | `boolean` | For more information see [PRAGMA query_only](https://www.sqlite.org/pragma.html#pragma_query_only) |
-+| Recursive Triggers | `_recursive_triggers` \| `_rt` | `boolean` | For more information see [PRAGMA recursive_triggers](https://www.sqlite.org/pragma.html#pragma_recursive_triggers) |
-+| Secure Delete | `_secure_delete` | `boolean` \| `FAST` | For more information see [PRAGMA secure_delete](https://www.sqlite.org/pragma.html#pragma_secure_delete) |
-+| Shared-Cache Mode | `cache` | | Set cache mode for more information see [sqlite.org](https://www.sqlite.org/sharedcache.html) |
-+| Synchronous | `_synchronous` \| `_sync` | 0 \| OFF 1 \| NORMAL 2 \| FULL 3 \| EXTRA | For more information see [PRAGMA synchronous](https://www.sqlite.org/pragma.html#pragma_synchronous) |
-+| Time Zone Location | `_loc` | auto | Specify location of time format. |
-+| Transaction Lock | `_txlock` | immediate deferred exclusive | Specify locking behavior for transactions. |
-+| Writable Schema | `_writable_schema` | `Boolean` | When this pragma is on, the SQLITE_MASTER tables in which database can be changed using ordinary UPDATE, INSERT, and DELETE statements. Warning: misuse of this pragma can easily result in a corrupt database file. |
-+
-+## DSN Examples
-+
-+```
-+file:test.db?cache=shared&mode=memory
-+```
-+
-+# Features
-+
-+This package allows additional configuration of features available within SQLite3 to be enabled or disabled by golang build constraints also known as build `tags`.
-+
-+[Click here for more information about build tags / constraints.](https://golang.org/pkg/go/build/#hdr-Build_Constraints)
-+
-+### Usage
-+
-+If you wish to build this library with additional extensions / features.
-+Use the following command.
-+
-+```bash
-+go build --tags ""
-+```
-+
-+For available features see the extension list.
-+When using multiple build tags, all the different tags should be space delimted.
-+
-+Example:
-+
-+```bash
-+go build --tags "icu json1 fts5 secure_delete"
-+```
-+
-+### Feature / Extension List
-+
-+| Extension | Build Tag | Description |
-+|-----------|-----------|-------------|
-+| Additional Statistics | sqlite_stat4 | This option adds additional logic to the ANALYZE command and to the query planner that can help SQLite to chose a better query plan under certain situations. The ANALYZE command is enhanced to collect histogram data from all columns of every index and store that data in the sqlite_stat4 table. The query planner will then use the histogram data to help it make better index choices. The downside of this compile-time option is that it violates the query planner stability guarantee making it more difficult to ensure consistent performance in mass-produced applications. SQLITE_ENABLE_STAT4 is an enhancement of SQLITE_ENABLE_STAT3. STAT3 only recorded histogram data for the left-most column of each index whereas the STAT4 enhancement records histogram data from all columns of each index. The SQLITE_ENABLE_STAT3 compile-time option is a no-op and is ignored if the SQLITE_ENABLE_STAT4 compile-time option is used |
-+| Allow URI Authority | sqlite_allow_uri_authority | URI filenames normally throws an error if the authority section is not either empty or "localhost". However, if SQLite is compiled with the SQLITE_ALLOW_URI_AUTHORITY compile-time option, then the URI is converted into a Uniform Naming Convention (UNC) filename and passed down to the underlying operating system that way |
-+| App Armor | sqlite_app_armor | When defined, this C-preprocessor macro activates extra code that attempts to detect misuse of the SQLite API, such as passing in NULL pointers to required parameters or using objects after they have been destroyed. App Armor is not available under `Windows`. |
-+| Disable Load Extensions | sqlite_omit_load_extension | Loading of external extensions is enabled by default. To disable extension loading add the build tag `sqlite_omit_load_extension`. |
-+| Foreign Keys | sqlite_foreign_keys | This macro determines whether enforcement of foreign key constraints is enabled or disabled by default for new database connections. Each database connection can always turn enforcement of foreign key constraints on and off and run-time using the foreign_keys pragma. Enforcement of foreign key constraints is normally off by default, but if this compile-time parameter is set to 1, enforcement of foreign key constraints will be on by default |
-+| Full Auto Vacuum | sqlite_vacuum_full | Set the default auto vacuum to full |
-+| Incremental Auto Vacuum | sqlite_vacuum_incr | Set the default auto vacuum to incremental |
-+| Full Text Search Engine | sqlite_fts5 | When this option is defined in the amalgamation, versions 5 of the full-text search engine (fts5) is added to the build automatically |
-+| International Components for Unicode | sqlite_icu | This option causes the International Components for Unicode or "ICU" extension to SQLite to be added to the build |
-+| Introspect PRAGMAS | sqlite_introspect | This option adds some extra PRAGMA statements. PRAGMA function_list PRAGMA module_list PRAGMA pragma_list |
-+| JSON SQL Functions | sqlite_json | When this option is defined in the amalgamation, the JSON SQL functions are added to the build automatically |
-+| Secure Delete | sqlite_secure_delete | This compile-time option changes the default setting of the secure_delete pragma. When this option is not used, secure_delete defaults to off. When this option is present, secure_delete defaults to on. The secure_delete setting causes deleted content to be overwritten with zeros. There is a small performance penalty since additional I/O must occur. On the other hand, secure_delete can prevent fragments of sensitive information from lingering in unused parts of the database file after it has been deleted. See the documentation on the secure_delete pragma for additional information |
-+| Secure Delete (FAST) | sqlite_secure_delete_fast | For more information see [PRAGMA secure_delete](https://www.sqlite.org/pragma.html#pragma_secure_delete) |
-+| Tracing / Debug | sqlite_trace | Activate trace functions |
-+| User Authentication | sqlite_userauth | SQLite User Authentication see [User Authentication](#user-authentication) for more information. |
-+
-+# Compilation
-+
-+This package requires `CGO_ENABLED=1` ennvironment variable if not set by default, and the presence of the `gcc` compiler.
-+
-+If you need to add additional CFLAGS or LDFLAGS to the build command, and do not want to modify this package. Then this can be achieved by using the `CGO_CFLAGS` and `CGO_LDFLAGS` environment variables.
-+
-+## Android
-+
-+This package can be compiled for android.
-+Compile with:
-+
-+```bash
-+go build --tags "android"
-+```
-+
-+For more information see [#201](https://github.com/mattn/go-sqlite3/issues/201)
-+
-+# ARM
-+
-+To compile for `ARM` use the following environment.
-+
-+```bash
-+env CC=arm-linux-gnueabihf-gcc CXX=arm-linux-gnueabihf-g++ \
-+ CGO_ENABLED=1 GOOS=linux GOARCH=arm GOARM=7 \
-+ go build -v
-+```
-+
-+Additional information:
-+- [#242](https://github.com/mattn/go-sqlite3/issues/242)
-+- [#504](https://github.com/mattn/go-sqlite3/issues/504)
-+
-+# Cross Compile
-+
-+This library can be cross-compiled.
-+
-+In some cases you are required to the `CC` environment variable with the cross compiler.
-+
-+Additional information:
-+- [#491](https://github.com/mattn/go-sqlite3/issues/491)
-+- [#560](https://github.com/mattn/go-sqlite3/issues/560)
-+
-+# Google Cloud Platform
-+
-+Building on GCP is not possible because Google Cloud Platform does not allow `gcc` to be executed.
-+
-+Please work only with compiled final binaries.
-+
-+## Linux
-+
-+To compile this package on Linux you must install the development tools for your linux distribution.
-+
-+To compile under linux use the build tag `linux`.
-+
-+```bash
-+go build --tags "linux"
-+```
-+
-+If you wish to link directly to libsqlite3 then you can use the `libsqlite3` build tag.
-+
-+```
-+go build --tags "libsqlite3 linux"
-+```
-+
-+### Alpine
-+
-+When building in an `alpine` container run the following command before building.
-+
-+```
-+apk add --update gcc musl-dev
-+```
-+
-+### Fedora
-+
-+```bash
-+sudo yum groupinstall "Development Tools" "Development Libraries"
-+```
-+
-+### Ubuntu
-+
-+```bash
-+sudo apt-get install build-essential
-+```
-+
-+## Mac OSX
-+
-+OSX should have all the tools present to compile this package, if not install XCode this will add all the developers tools.
-+
-+Required dependency
-+
-+```bash
-+brew install sqlite3
-+```
-+
-+For OSX there is an additional package install which is required if you wish to build the `icu` extension.
-+
-+This additional package can be installed with `homebrew`.
-+
-+```bash
-+brew upgrade icu4c
-+```
-+
-+To compile for Mac OSX.
-+
-+```bash
-+go build --tags "darwin"
-+```
-+
-+If you wish to link directly to libsqlite3 then you can use the `libsqlite3` build tag.
-+
-+```
-+go build --tags "libsqlite3 darwin"
-+```
-+
-+Additional information:
-+- [#206](https://github.com/mattn/go-sqlite3/issues/206)
-+- [#404](https://github.com/mattn/go-sqlite3/issues/404)
-+
-+## Windows
-+
-+To compile this package on Windows OS you must have the `gcc` compiler installed.
-+
-+1) Install a Windows `gcc` toolchain.
-+2) Add the `bin` folders to the Windows path if the installer did not do this by default.
-+3) Open a terminal for the TDM-GCC toolchain, can be found in the Windows Start menu.
-+4) Navigate to your project folder and run the `go build ...` command for this package.
-+
-+For example the TDM-GCC Toolchain can be found [here](https://sourceforge.net/projects/tdm-gcc/).
-+
-+## Errors
-+
-+- Compile error: `can not be used when making a shared object; recompile with -fPIC`
-+
-+ When receiving a compile time error referencing recompile with `-FPIC` then you
-+ are probably using a hardend system.
-+
-+ You can compile the library on a hardend system with the following command.
-+
-+ ```bash
-+ go build -ldflags '-extldflags=-fno-PIC'
-+ ```
-+
-+ More details see [#120](https://github.com/mattn/go-sqlite3/issues/120)
-+
-+- Can't build go-sqlite3 on windows 64bit.
-+
-+ > Probably, you are using go 1.0, go1.0 has a problem when it comes to compiling/linking on windows 64bit.
-+ > See: [#27](https://github.com/mattn/go-sqlite3/issues/27)
-+
-+- `go get github.com/mattn/go-sqlite3` throws compilation error.
-+
-+ `gcc` throws: `internal compiler error`
-+
-+ Remove the download repository from your disk and try re-install with:
-+
-+ ```bash
-+ go install github.com/mattn/go-sqlite3
-+ ```
-+
-+# User Authentication
-+
-+This package supports the SQLite User Authentication module.
-+
-+## Compile
-+
-+To use the User authentication module the package has to be compiled with the tag `sqlite_userauth`. See [Features](#features).
-+
-+## Usage
-+
-+### Create protected database
-+
-+To create a database protected by user authentication provide the following argument to the connection string `_auth`.
-+This will enable user authentication within the database. This option however requires two additional arguments:
-+
-+- `_auth_user`
-+- `_auth_pass`
-+
-+When `_auth` is present on the connection string user authentication will be enabled and the provided user will be created
-+as an `admin` user. After initial creation, the parameter `_auth` has no effect anymore and can be omitted from the connection string.
-+
-+Example connection string:
-+
-+Create an user authentication database with user `admin` and password `admin`.
-+
-+`file:test.s3db?_auth&_auth_user=admin&_auth_pass=admin`
-+
-+Create an user authentication database with user `admin` and password `admin` and use `SHA1` for the password encoding.
-+
-+`file:test.s3db?_auth&_auth_user=admin&_auth_pass=admin&_auth_crypt=sha1`
-+
-+### Password Encoding
-+
-+The passwords within the user authentication module of SQLite are encoded with the SQLite function `sqlite_cryp`.
-+This function uses a ceasar-cypher which is quite insecure.
-+This library provides several additional password encoders which can be configured through the connection string.
-+
-+The password cypher can be configured with the key `_auth_crypt`. And if the configured password encoder also requires an
-+salt this can be configured with `_auth_salt`.
-+
-+#### Available Encoders
-+
-+- SHA1
-+- SSHA1 (Salted SHA1)
-+- SHA256
-+- SSHA256 (salted SHA256)
-+- SHA384
-+- SSHA384 (salted SHA384)
-+- SHA512
-+- SSHA512 (salted SHA512)
-+
-+### Restrictions
-+
-+Operations on the database regarding to user management can only be preformed by an administrator user.
-+
-+### Support
-+
-+The user authentication supports two kinds of users
-+
-+- administrators
-+- regular users
-+
-+### User Management
-+
-+User management can be done by directly using the `*SQLiteConn` or by SQL.
-+
-+#### SQL
-+
-+The following sql functions are available for user management.
-+
-+| Function | Arguments | Description |
-+|----------|-----------|-------------|
-+| `authenticate` | username `string`, password `string` | Will authenticate an user, this is done by the connection; and should not be used manually. |
-+| `auth_user_add` | username `string`, password `string`, admin `int` | This function will add an user to the database. if the database is not protected by user authentication it will enable it. Argument `admin` is an integer identifying if the added user should be an administrator. Only Administrators can add administrators. |
-+| `auth_user_change` | username `string`, password `string`, admin `int` | Function to modify an user. Users can change their own password, but only an administrator can change the administrator flag. |
-+| `authUserDelete` | username `string` | Delete an user from the database. Can only be used by an administrator. The current logged in administrator cannot be deleted. This is to make sure their is always an administrator remaining. |
-+
-+These functions will return an integer.
-+
-+- 0 (SQLITE_OK)
-+- 23 (SQLITE_AUTH) Failed to perform due to authentication or insufficient privileges
-+
-+##### Examples
-+
-+```sql
-+// Autheticate user
-+// Create Admin User
-+SELECT auth_user_add('admin2', 'admin2', 1);
-+
-+// Change password for user
-+SELECT auth_user_change('user', 'userpassword', 0);
-+
-+// Delete user
-+SELECT user_delete('user');
-+```
-+
-+#### *SQLiteConn
-+
-+The following functions are available for User authentication from the `*SQLiteConn`.
-+
-+| Function | Description |
-+|----------|-------------|
-+| `Authenticate(username, password string) error` | Authenticate user |
-+| `AuthUserAdd(username, password string, admin bool) error` | Add user |
-+| `AuthUserChange(username, password string, admin bool) error` | Modify user |
-+| `AuthUserDelete(username string) error` | Delete user |
-+
-+### Attached database
-+
-+When using attached databases. SQLite will use the authentication from the `main` database for the attached database(s).
-+
-+# Extensions
-+
-+If you want your own extension to be listed here or you want to add a reference to an extension; please submit an Issue for this.
-+
-+## Spatialite
-+
-+Spatialite is available as an extension to SQLite, and can be used in combination with this repository.
-+For an example see [shaxbee/go-spatialite](https://github.com/shaxbee/go-spatialite).
-+
-+# FAQ
-+
-+- Getting insert error while query is opened.
-+
-+ > You can pass some arguments into the connection string, for example, a URI.
-+ > See: [#39](https://github.com/mattn/go-sqlite3/issues/39)
-+
-+- Do you want to cross compile? mingw on Linux or Mac?
-+
-+ > See: [#106](https://github.com/mattn/go-sqlite3/issues/106)
-+ > See also: http://www.limitlessfx.com/cross-compile-golang-app-for-windows-from-linux.html
-+
-+- Want to get time.Time with current locale
-+
-+ Use `_loc=auto` in SQLite3 filename schema like `file:foo.db?_loc=auto`.
-+
-+- Can I use this in multiple routines concurrently?
-+
-+ Yes for readonly. But, No for writable. See [#50](https://github.com/mattn/go-sqlite3/issues/50), [#51](https://github.com/mattn/go-sqlite3/issues/51), [#209](https://github.com/mattn/go-sqlite3/issues/209), [#274](https://github.com/mattn/go-sqlite3/issues/274).
-+
-+- Why I'm getting `no such table` error?
-+
-+ Why is it racy if I use a `sql.Open("sqlite3", ":memory:")` database?
-+
-+ Each connection to `":memory:"` opens a brand new in-memory sql database, so if
-+ the stdlib's sql engine happens to open another connection and you've only
-+ specified `":memory:"`, that connection will see a brand new database. A
-+ workaround is to use `"file::memory:?cache=shared"` (or `"file:foobar?mode=memory&cache=shared"`). Every
-+ connection to this string will point to the same in-memory database.
-+
-+ Note that if the last database connection in the pool closes, the in-memory database is deleted. Make sure the [max idle connection limit](https://golang.org/pkg/database/sql/#DB.SetMaxIdleConns) is > 0, and the [connection lifetime](https://golang.org/pkg/database/sql/#DB.SetConnMaxLifetime) is infinite.
-+
-+ For more information see
-+ * [#204](https://github.com/mattn/go-sqlite3/issues/204)
-+ * [#511](https://github.com/mattn/go-sqlite3/issues/511)
-+ * https://www.sqlite.org/sharedcache.html#shared_cache_and_in_memory_databases
-+ * https://www.sqlite.org/inmemorydb.html#sharedmemdb
-+
-+- Reading from database with large amount of goroutines fails on OSX.
-+
-+ OS X limits OS-wide to not have more than 1000 files open simultaneously by default.
-+
-+ For more information see [#289](https://github.com/mattn/go-sqlite3/issues/289)
-+
-+- Trying to execute a `.` (dot) command throws an error.
-+
-+ Error: `Error: near ".": syntax error`
-+ Dot command are part of SQLite3 CLI not of this library.
-+
-+ You need to implement the feature or call the sqlite3 cli.
-+
-+ More information see [#305](https://github.com/mattn/go-sqlite3/issues/305)
-+
-+- Error: `database is locked`
-+
-+ When you get a database is locked. Please use the following options.
-+
-+ Add to DSN: `cache=shared`
-+
-+ Example:
-+ ```go
-+ db, err := sql.Open("sqlite3", "file:locked.sqlite?cache=shared")
-+ ```
-+
-+ Second please set the database connections of the SQL package to 1.
-+
-+ ```go
-+ db.SetMaxOpenConns(1)
-+ ```
-+
-+ More information see [#209](https://github.com/mattn/go-sqlite3/issues/209)
-+
-+# License
-+
-+MIT: http://mattn.mit-license.org/2018
-+
-+sqlite3-binding.c, sqlite3-binding.h, sqlite3ext.h
-+
-+The -binding suffix was added to avoid build failures under gccgo.
-+
-+In this repository, those files are an amalgamation of code that was copied from SQLite3. The license of that code is the same as the license of SQLite3.
-+
-+# Author
-+
-+Yasuhiro Matsumoto (a.k.a mattn)
-+
-+G.J.R. Timmer
-diff --git a/vendor/github.com/mattn/go-sqlite3/_example/custom_func/main.go b/vendor/github.com/mattn/go-sqlite3/_example/custom_func/main.go
-new file mode 100644
-index 00000000000..85657e62f16
---- /dev/null
-+++ b/vendor/github.com/mattn/go-sqlite3/_example/custom_func/main.go
-@@ -0,0 +1,133 @@
-+package main
-+
-+import (
-+ "database/sql"
-+ "fmt"
-+ "log"
-+ "math"
-+ "math/rand"
-+
-+ sqlite "github.com/mattn/go-sqlite3"
-+)
-+
-+// Computes x^y
-+func pow(x, y int64) int64 {
-+ return int64(math.Pow(float64(x), float64(y)))
-+}
-+
-+// Computes the bitwise exclusive-or of all its arguments
-+func xor(xs ...int64) int64 {
-+ var ret int64
-+ for _, x := range xs {
-+ ret ^= x
-+ }
-+ return ret
-+}
-+
-+// Returns a random number. It's actually deterministic here because
-+// we don't seed the RNG, but it's an example of a non-pure function
-+// from SQLite's POV.
-+func getrand() int64 {
-+ return rand.Int63()
-+}
-+
-+// Computes the standard deviation of a GROUPed BY set of values
-+type stddev struct {
-+ xs []int64
-+ // Running average calculation
-+ sum int64
-+ n int64
-+}
-+
-+func newStddev() *stddev { return &stddev{} }
-+
-+func (s *stddev) Step(x int64) {
-+ s.xs = append(s.xs, x)
-+ s.sum += x
-+ s.n++
-+}
-+
-+func (s *stddev) Done() float64 {
-+ mean := float64(s.sum) / float64(s.n)
-+ var sqDiff []float64
-+ for _, x := range s.xs {
-+ sqDiff = append(sqDiff, math.Pow(float64(x)-mean, 2))
-+ }
-+ var dev float64
-+ for _, x := range sqDiff {
-+ dev += x
-+ }
-+ dev /= float64(len(sqDiff))
-+ return math.Sqrt(dev)
-+}
-+
-+func main() {
-+ sql.Register("sqlite3_custom", &sqlite.SQLiteDriver{
-+ ConnectHook: func(conn *sqlite.SQLiteConn) error {
-+ if err := conn.RegisterFunc("pow", pow, true); err != nil {
-+ return err
-+ }
-+ if err := conn.RegisterFunc("xor", xor, true); err != nil {
-+ return err
-+ }
-+ if err := conn.RegisterFunc("rand", getrand, false); err != nil {
-+ return err
-+ }
-+ if err := conn.RegisterAggregator("stddev", newStddev, true); err != nil {
-+ return err
-+ }
-+ return nil
-+ },
-+ })
-+
-+ db, err := sql.Open("sqlite3_custom", ":memory:")
-+ if err != nil {
-+ log.Fatal("Failed to open database:", err)
-+ }
-+ defer db.Close()
-+
-+ var i int64
-+ err = db.QueryRow("SELECT pow(2,3)").Scan(&i)
-+ if err != nil {
-+ log.Fatal("POW query error:", err)
-+ }
-+ fmt.Println("pow(2,3) =", i) // 8
-+
-+ err = db.QueryRow("SELECT xor(1,2,3,4,5,6)").Scan(&i)
-+ if err != nil {
-+ log.Fatal("XOR query error:", err)
-+ }
-+ fmt.Println("xor(1,2,3,4,5) =", i) // 7
-+
-+ err = db.QueryRow("SELECT rand()").Scan(&i)
-+ if err != nil {
-+ log.Fatal("RAND query error:", err)
-+ }
-+ fmt.Println("rand() =", i) // pseudorandom
-+
-+ _, err = db.Exec("create table foo (department integer, profits integer)")
-+ if err != nil {
-+ log.Fatal("Failed to create table:", err)
-+ }
-+ _, err = db.Exec("insert into foo values (1, 10), (1, 20), (1, 45), (2, 42), (2, 115)")
-+ if err != nil {
-+ log.Fatal("Failed to insert records:", err)
-+ }
-+
-+ rows, err := db.Query("select department, stddev(profits) from foo group by department")
-+ if err != nil {
-+ log.Fatal("STDDEV query error:", err)
-+ }
-+ defer rows.Close()
-+ for rows.Next() {
-+ var dept int64
-+ var dev float64
-+ if err := rows.Scan(&dept, &dev); err != nil {
-+ log.Fatal(err)
-+ }
-+ fmt.Printf("dept=%d stddev=%f\n", dept, dev)
-+ }
-+ if err := rows.Err(); err != nil {
-+ log.Fatal(err)
-+ }
-+}
-diff --git a/vendor/github.com/mattn/go-sqlite3/_example/hook/hook.go b/vendor/github.com/mattn/go-sqlite3/_example/hook/hook.go
-new file mode 100644
-index 00000000000..602318157ba
---- /dev/null
-+++ b/vendor/github.com/mattn/go-sqlite3/_example/hook/hook.go
-@@ -0,0 +1,78 @@
-+package main
-+
-+import (
-+ "database/sql"
-+ "log"
-+ "os"
-+
-+ "github.com/mattn/go-sqlite3"
-+)
-+
-+func main() {
-+ sqlite3conn := []*sqlite3.SQLiteConn{}
-+ sql.Register("sqlite3_with_hook_example",
-+ &sqlite3.SQLiteDriver{
-+ ConnectHook: func(conn *sqlite3.SQLiteConn) error {
-+ sqlite3conn = append(sqlite3conn, conn)
-+ conn.RegisterUpdateHook(func(op int, db string, table string, rowid int64) {
-+ switch op {
-+ case sqlite3.SQLITE_INSERT:
-+ log.Println("Notified of insert on db", db, "table", table, "rowid", rowid)
-+ }
-+ })
-+ return nil
-+ },
-+ })
-+ os.Remove("./foo.db")
-+ os.Remove("./bar.db")
-+
-+ srcDb, err := sql.Open("sqlite3_with_hook_example", "./foo.db")
-+ if err != nil {
-+ log.Fatal(err)
-+ }
-+ defer srcDb.Close()
-+ srcDb.Ping()
-+
-+ _, err = srcDb.Exec("create table foo(id int, value text)")
-+ if err != nil {
-+ log.Fatal(err)
-+ }
-+ _, err = srcDb.Exec("insert into foo values(1, 'foo')")
-+ if err != nil {
-+ log.Fatal(err)
-+ }
-+ _, err = srcDb.Exec("insert into foo values(2, 'bar')")
-+ if err != nil {
-+ log.Fatal(err)
-+ }
-+ _, err = srcDb.Query("select * from foo")
-+ if err != nil {
-+ log.Fatal(err)
-+ }
-+ destDb, err := sql.Open("sqlite3_with_hook_example", "./bar.db")
-+ if err != nil {
-+ log.Fatal(err)
-+ }
-+ defer destDb.Close()
-+ destDb.Ping()
-+
-+ bk, err := sqlite3conn[1].Backup("main", sqlite3conn[0], "main")
-+ if err != nil {
-+ log.Fatal(err)
-+ }
-+
-+ _, err = bk.Step(-1)
-+ if err != nil {
-+ log.Fatal(err)
-+ }
-+ _, err = destDb.Query("select * from foo")
-+ if err != nil {
-+ log.Fatal(err)
-+ }
-+ _, err = destDb.Exec("insert into foo values(3, 'bar')")
-+ if err != nil {
-+ log.Fatal(err)
-+ }
-+
-+ bk.Finish()
-+}
-diff --git a/vendor/github.com/mattn/go-sqlite3/_example/limit/limit.go b/vendor/github.com/mattn/go-sqlite3/_example/limit/limit.go
-new file mode 100644
-index 00000000000..4e4b8977100
---- /dev/null
-+++ b/vendor/github.com/mattn/go-sqlite3/_example/limit/limit.go
-@@ -0,0 +1,113 @@
-+package main
-+
-+import (
-+ "database/sql"
-+ "fmt"
-+ "log"
-+ "os"
-+ "strings"
-+
-+ "github.com/mattn/go-sqlite3"
-+)
-+
-+func createBulkInsertQuery(n int, start int) (query string, args []interface{}) {
-+ values := make([]string, n)
-+ args = make([]interface{}, n*2)
-+ pos := 0
-+ for i := 0; i < n; i++ {
-+ values[i] = "(?, ?)"
-+ args[pos] = start + i
-+ args[pos+1] = fmt.Sprintf("こんにちわ世界%03d", i)
-+ pos += 2
-+ }
-+ query = fmt.Sprintf(
-+ "insert into foo(id, name) values %s",
-+ strings.Join(values, ", "),
-+ )
-+ return
-+}
-+
-+func bukInsert(db *sql.DB, query string, args []interface{}) (err error) {
-+ stmt, err := db.Prepare(query)
-+ if err != nil {
-+ return
-+ }
-+
-+ _, err = stmt.Exec(args...)
-+ if err != nil {
-+ return
-+ }
-+
-+ return
-+}
-+
-+func main() {
-+ var sqlite3conn *sqlite3.SQLiteConn
-+ sql.Register("sqlite3_with_limit", &sqlite3.SQLiteDriver{
-+ ConnectHook: func(conn *sqlite3.SQLiteConn) error {
-+ sqlite3conn = conn
-+ return nil
-+ },
-+ })
-+
-+ os.Remove("./foo.db")
-+ db, err := sql.Open("sqlite3_with_limit", "./foo.db")
-+ if err != nil {
-+ log.Fatal(err)
-+ }
-+ defer db.Close()
-+
-+ sqlStmt := `
-+ create table foo (id integer not null primary key, name text);
-+ delete from foo;
-+ `
-+ _, err = db.Exec(sqlStmt)
-+ if err != nil {
-+ log.Printf("%q: %s\n", err, sqlStmt)
-+ return
-+ }
-+
-+ if sqlite3conn == nil {
-+ log.Fatal("not set sqlite3 connection")
-+ }
-+
-+ limitVariableNumber := sqlite3conn.GetLimit(sqlite3.SQLITE_LIMIT_VARIABLE_NUMBER)
-+ log.Printf("default SQLITE_LIMIT_VARIABLE_NUMBER: %d", limitVariableNumber)
-+
-+ num := 400
-+ query, args := createBulkInsertQuery(num, 0)
-+ err = bukInsert(db, query, args)
-+ if err != nil {
-+ log.Fatal(err)
-+ }
-+
-+ smallLimitVariableNumber := 100
-+ sqlite3conn.SetLimit(sqlite3.SQLITE_LIMIT_VARIABLE_NUMBER, smallLimitVariableNumber)
-+
-+ limitVariableNumber = sqlite3conn.GetLimit(sqlite3.SQLITE_LIMIT_VARIABLE_NUMBER)
-+ log.Printf("updated SQLITE_LIMIT_VARIABLE_NUMBER: %d", limitVariableNumber)
-+
-+ query, args = createBulkInsertQuery(num, num)
-+ err = bukInsert(db, query, args)
-+ if err != nil {
-+ if err != nil {
-+ log.Printf("expect failed since SQLITE_LIMIT_VARIABLE_NUMBER is too small: %v", err)
-+ }
-+ }
-+
-+ bigLimitVariableNumber := 999999
-+ sqlite3conn.SetLimit(sqlite3.SQLITE_LIMIT_VARIABLE_NUMBER, bigLimitVariableNumber)
-+ limitVariableNumber = sqlite3conn.GetLimit(sqlite3.SQLITE_LIMIT_VARIABLE_NUMBER)
-+ log.Printf("set SQLITE_LIMIT_VARIABLE_NUMBER: %d", bigLimitVariableNumber)
-+ log.Printf("updated SQLITE_LIMIT_VARIABLE_NUMBER: %d", limitVariableNumber)
-+
-+ query, args = createBulkInsertQuery(500, num+num)
-+ err = bukInsert(db, query, args)
-+ if err != nil {
-+ if err != nil {
-+ log.Fatal(err)
-+ }
-+ }
-+
-+ log.Println("no error if SQLITE_LIMIT_VARIABLE_NUMBER > 999")
-+}
-diff --git a/vendor/github.com/mattn/go-sqlite3/_example/mod_regexp/Makefile b/vendor/github.com/mattn/go-sqlite3/_example/mod_regexp/Makefile
-new file mode 100644
-index 00000000000..97b1e0f365f
---- /dev/null
-+++ b/vendor/github.com/mattn/go-sqlite3/_example/mod_regexp/Makefile
-@@ -0,0 +1,22 @@
-+ifeq ($(OS),Windows_NT)
-+EXE=extension.exe
-+EXT=sqlite3_mod_regexp.dll
-+RM=cmd /c del
-+LDFLAG=
-+else
-+EXE=extension
-+EXT=sqlite3_mod_regexp.so
-+RM=rm
-+LDFLAG=-fPIC
-+endif
-+
-+all : $(EXE) $(EXT)
-+
-+$(EXE) : extension.go
-+ go build $<
-+
-+$(EXT) : sqlite3_mod_regexp.c
-+ gcc $(LDFLAG) -shared -o $@ $< -lsqlite3 -lpcre
-+
-+clean :
-+ @-$(RM) $(EXE) $(EXT)
-diff --git a/vendor/github.com/mattn/go-sqlite3/_example/mod_regexp/extension.go b/vendor/github.com/mattn/go-sqlite3/_example/mod_regexp/extension.go
-new file mode 100644
-index 00000000000..61ceb55e6b9
---- /dev/null
-+++ b/vendor/github.com/mattn/go-sqlite3/_example/mod_regexp/extension.go
-@@ -0,0 +1,43 @@
-+package main
-+
-+import (
-+ "database/sql"
-+ "fmt"
-+ "github.com/mattn/go-sqlite3"
-+ "log"
-+)
-+
-+func main() {
-+ sql.Register("sqlite3_with_extensions",
-+ &sqlite3.SQLiteDriver{
-+ Extensions: []string{
-+ "sqlite3_mod_regexp",
-+ },
-+ })
-+
-+ db, err := sql.Open("sqlite3_with_extensions", ":memory:")
-+ if err != nil {
-+ log.Fatal(err)
-+ }
-+ defer db.Close()
-+
-+ // Force db to make a new connection in pool
-+ // by putting the original in a transaction
-+ tx, err := db.Begin()
-+ if err != nil {
-+ log.Fatal(err)
-+ }
-+ defer tx.Commit()
-+
-+ // New connection works (hopefully!)
-+ rows, err := db.Query("select 'hello world' where 'hello world' regexp '^hello.*d$'")
-+ if err != nil {
-+ log.Fatal(err)
-+ }
-+ defer rows.Close()
-+ for rows.Next() {
-+ var helloworld string
-+ rows.Scan(&helloworld)
-+ fmt.Println(helloworld)
-+ }
-+}
-diff --git a/vendor/github.com/mattn/go-sqlite3/_example/mod_regexp/sqlite3_mod_regexp.c b/vendor/github.com/mattn/go-sqlite3/_example/mod_regexp/sqlite3_mod_regexp.c
-new file mode 100644
-index 00000000000..cdf674fd9b9
---- /dev/null
-+++ b/vendor/github.com/mattn/go-sqlite3/_example/mod_regexp/sqlite3_mod_regexp.c
-@@ -0,0 +1,31 @@
-+#include
-+#include
-+#include
-+#include
-+
-+SQLITE_EXTENSION_INIT1
-+static void regexp_func(sqlite3_context *context, int argc, sqlite3_value **argv) {
-+ if (argc >= 2) {
-+ const char *target = (const char *)sqlite3_value_text(argv[1]);
-+ const char *pattern = (const char *)sqlite3_value_text(argv[0]);
-+ const char* errstr = NULL;
-+ int erroff = 0;
-+ int vec[500];
-+ int n, rc;
-+ pcre* re = pcre_compile(pattern, 0, &errstr, &erroff, NULL);
-+ rc = pcre_exec(re, NULL, target, strlen(target), 0, 0, vec, 500);
-+ if (rc <= 0) {
-+ sqlite3_result_error(context, errstr, 0);
-+ return;
-+ }
-+ sqlite3_result_int(context, 1);
-+ }
-+}
-+
-+#ifdef _WIN32
-+__declspec(dllexport)
-+#endif
-+int sqlite3_extension_init(sqlite3 *db, char **errmsg, const sqlite3_api_routines *api) {
-+ SQLITE_EXTENSION_INIT2(api);
-+ return sqlite3_create_function(db, "regexp", 2, SQLITE_UTF8, (void*)db, regexp_func, NULL, NULL);
-+}
-diff --git a/vendor/github.com/mattn/go-sqlite3/_example/mod_vtable/Makefile b/vendor/github.com/mattn/go-sqlite3/_example/mod_vtable/Makefile
-new file mode 100644
-index 00000000000..cdd4853d6e4
---- /dev/null
-+++ b/vendor/github.com/mattn/go-sqlite3/_example/mod_vtable/Makefile
-@@ -0,0 +1,24 @@
-+ifeq ($(OS),Windows_NT)
-+EXE=extension.exe
-+EXT=sqlite3_mod_vtable.dll
-+RM=cmd /c del
-+LIBCURL=-lcurldll
-+LDFLAG=
-+else
-+EXE=extension
-+EXT=sqlite3_mod_vtable.so
-+RM=rm
-+LDFLAG=-fPIC
-+LIBCURL=-lcurl
-+endif
-+
-+all : $(EXE) $(EXT)
-+
-+$(EXE) : extension.go
-+ go build $<
-+
-+$(EXT) : sqlite3_mod_vtable.cc
-+ g++ $(LDFLAG) -shared -o $@ $< -lsqlite3 $(LIBCURL)
-+
-+clean :
-+ @-$(RM) $(EXE) $(EXT)
-diff --git a/vendor/github.com/mattn/go-sqlite3/_example/mod_vtable/extension.go b/vendor/github.com/mattn/go-sqlite3/_example/mod_vtable/extension.go
-new file mode 100644
-index 00000000000..f738af6aeaa
---- /dev/null
-+++ b/vendor/github.com/mattn/go-sqlite3/_example/mod_vtable/extension.go
-@@ -0,0 +1,37 @@
-+package main
-+
-+import (
-+ "database/sql"
-+ "fmt"
-+ "log"
-+
-+ "github.com/mattn/go-sqlite3"
-+)
-+
-+func main() {
-+ sql.Register("sqlite3_with_extensions",
-+ &sqlite3.SQLiteDriver{
-+ Extensions: []string{
-+ "sqlite3_mod_vtable",
-+ },
-+ })
-+
-+ db, err := sql.Open("sqlite3_with_extensions", ":memory:")
-+ if err != nil {
-+ log.Fatal(err)
-+ }
-+ defer db.Close()
-+
-+ db.Exec("create virtual table repo using github(id, full_name, description, html_url)")
-+
-+ rows, err := db.Query("select id, full_name, description, html_url from repo")
-+ if err != nil {
-+ log.Fatal(err)
-+ }
-+ defer rows.Close()
-+ for rows.Next() {
-+ var id, fullName, description, htmlURL string
-+ rows.Scan(&id, &fullName, &description, &htmlURL)
-+ fmt.Printf("%s: %s\n\t%s\n\t%s\n\n", id, fullName, description, htmlURL)
-+ }
-+}
-diff --git a/vendor/github.com/mattn/go-sqlite3/_example/mod_vtable/picojson.h b/vendor/github.com/mattn/go-sqlite3/_example/mod_vtable/picojson.h
-new file mode 100644
-index 00000000000..516b2d16295
---- /dev/null
-+++ b/vendor/github.com/mattn/go-sqlite3/_example/mod_vtable/picojson.h
-@@ -0,0 +1,1040 @@
-+/*
-+ * Copyright 2009-2010 Cybozu Labs, Inc.
-+ * Copyright 2011 Kazuho Oku
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ *
-+ * 1. Redistributions of source code must retain the above copyright notice,
-+ * this list of conditions and the following disclaimer.
-+ * 2. Redistributions in binary form must reproduce the above copyright notice,
-+ * this list of conditions and the following disclaimer in the documentation
-+ * and/or other materials provided with the distribution.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY CYBOZU LABS, INC. ``AS IS'' AND ANY EXPRESS OR
-+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
-+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
-+ * EVENT SHALL CYBOZU LABS, INC. OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
-+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ *
-+ * The views and conclusions contained in the software and documentation are
-+ * those of the authors and should not be interpreted as representing official
-+ * policies, either expressed or implied, of Cybozu Labs, Inc.
-+ *
-+ */
-+#ifndef picojson_h
-+#define picojson_h
-+
-+#include
-+#include
-+#include
-+#include
-+#include
-+#include
-+#include
-+#include
-+#include
-+#include
-+#include
-+
-+#ifdef _MSC_VER
-+ #define SNPRINTF _snprintf_s
-+ #pragma warning(push)
-+ #pragma warning(disable : 4244) // conversion from int to char
-+#else
-+ #define SNPRINTF snprintf
-+#endif
-+
-+namespace picojson {
-+
-+ enum {
-+ null_type,
-+ boolean_type,
-+ number_type,
-+ string_type,
-+ array_type,
-+ object_type
-+ };
-+
-+ struct null {};
-+
-+ class value {
-+ public:
-+ typedef std::vector array;
-+ typedef std::map object;
-+ union _storage {
-+ bool boolean_;
-+ double number_;
-+ std::string* string_;
-+ array* array_;
-+ object* object_;
-+ };
-+ protected:
-+ int type_;
-+ _storage u_;
-+ public:
-+ value();
-+ value(int type, bool);
-+ explicit value(bool b);
-+ explicit value(double n);
-+ explicit value(const std::string& s);
-+ explicit value(const array& a);
-+ explicit value(const object& o);
-+ explicit value(const char* s);
-+ value(const char* s, size_t len);
-+ ~value();
-+ value(const value& x);
-+ value& operator=(const value& x);
-+ void swap(value& x);
-+ template bool is() const;
-+ template const T& get() const;
-+ template T& get();
-+ bool evaluate_as_boolean() const;
-+ const value& get(size_t idx) const;
-+ const value& get(const std::string& key) const;
-+ bool contains(size_t idx) const;
-+ bool contains(const std::string& key) const;
-+ std::string to_str() const;
-+ template void serialize(Iter os) const;
-+ std::string serialize() const;
-+ private:
-+ template value(const T*); // intentionally defined to block implicit conversion of pointer to bool
-+ };
-+
-+ typedef value::array array;
-+ typedef value::object object;
-+
-+ inline value::value() : type_(null_type) {}
-+
-+ inline value::value(int type, bool) : type_(type) {
-+ switch (type) {
-+#define INIT(p, v) case p##type: u_.p = v; break
-+ INIT(boolean_, false);
-+ INIT(number_, 0.0);
-+ INIT(string_, new std::string());
-+ INIT(array_, new array());
-+ INIT(object_, new object());
-+#undef INIT
-+ default: break;
-+ }
-+ }
-+
-+ inline value::value(bool b) : type_(boolean_type) {
-+ u_.boolean_ = b;
-+ }
-+
-+ inline value::value(double n) : type_(number_type) {
-+ u_.number_ = n;
-+ }
-+
-+ inline value::value(const std::string& s) : type_(string_type) {
-+ u_.string_ = new std::string(s);
-+ }
-+
-+ inline value::value(const array& a) : type_(array_type) {
-+ u_.array_ = new array(a);
-+ }
-+
-+ inline value::value(const object& o) : type_(object_type) {
-+ u_.object_ = new object(o);
-+ }
-+
-+ inline value::value(const char* s) : type_(string_type) {
-+ u_.string_ = new std::string(s);
-+ }
-+
-+ inline value::value(const char* s, size_t len) : type_(string_type) {
-+ u_.string_ = new std::string(s, len);
-+ }
-+
-+ inline value::~value() {
-+ switch (type_) {
-+#define DEINIT(p) case p##type: delete u_.p; break
-+ DEINIT(string_);
-+ DEINIT(array_);
-+ DEINIT(object_);
-+#undef DEINIT
-+ default: break;
-+ }
-+ }
-+
-+ inline value::value(const value& x) : type_(x.type_) {
-+ switch (type_) {
-+#define INIT(p, v) case p##type: u_.p = v; break
-+ INIT(string_, new std::string(*x.u_.string_));
-+ INIT(array_, new array(*x.u_.array_));
-+ INIT(object_, new object(*x.u_.object_));
-+#undef INIT
-+ default:
-+ u_ = x.u_;
-+ break;
-+ }
-+ }
-+
-+ inline value& value::operator=(const value& x) {
-+ if (this != &x) {
-+ this->~value();
-+ new (this) value(x);
-+ }
-+ return *this;
-+ }
-+
-+ inline void value::swap(value& x) {
-+ std::swap(type_, x.type_);
-+ std::swap(u_, x.u_);
-+ }
-+
-+#define IS(ctype, jtype) \
-+ template <> inline bool value::is() const { \
-+ return type_ == jtype##_type; \
-+ }
-+ IS(null, null)
-+ IS(bool, boolean)
-+ IS(int, number)
-+ IS(double, number)
-+ IS(std::string, string)
-+ IS(array, array)
-+ IS(object, object)
-+#undef IS
-+
-+#define GET(ctype, var) \
-+ template <> inline const ctype& value::get() const { \
-+ assert("type mismatch! call vis() before get()" \
-+ && is()); \
-+ return var; \
-+ } \
-+ template <> inline ctype& value::get() { \
-+ assert("type mismatch! call is() before get()" \
-+ && is()); \
-+ return var; \
-+ }
-+ GET(bool, u_.boolean_)
-+ GET(double, u_.number_)
-+ GET(std::string, *u_.string_)
-+ GET(array, *u_.array_)
-+ GET(object, *u_.object_)
-+#undef GET
-+
-+ inline bool value::evaluate_as_boolean() const {
-+ switch (type_) {
-+ case null_type:
-+ return false;
-+ case boolean_type:
-+ return u_.boolean_;
-+ case number_type:
-+ return u_.number_ != 0;
-+ case string_type:
-+ return ! u_.string_->empty();
-+ default:
-+ return true;
-+ }
-+ }
-+
-+ inline const value& value::get(size_t idx) const {
-+ static value s_null;
-+ assert(is());
-+ return idx < u_.array_->size() ? (*u_.array_)[idx] : s_null;
-+ }
-+
-+ inline const value& value::get(const std::string& key) const {
-+ static value s_null;
-+ assert(is());
-+ object::const_iterator i = u_.object_->find(key);
-+ return i != u_.object_->end() ? i->second : s_null;
-+ }
-+
-+ inline bool value::contains(size_t idx) const {
-+ assert(is());
-+ return idx < u_.array_->size();
-+ }
-+
-+ inline bool value::contains(const std::string& key) const {
-+ assert(is());
-+ object::const_iterator i = u_.object_->find(key);
-+ return i != u_.object_->end();
-+ }
-+
-+ inline std::string value::to_str() const {
-+ switch (type_) {
-+ case null_type: return "null";
-+ case boolean_type: return u_.boolean_ ? "true" : "false";
-+ case number_type: {
-+ char buf[256];
-+ double tmp;
-+ SNPRINTF(buf, sizeof(buf), fabs(u_.number_) < (1ULL << 53) && modf(u_.number_, &tmp) == 0 ? "%.f" : "%.17g", u_.number_);
-+ return buf;
-+ }
-+ case string_type: return *u_.string_;
-+ case array_type: return "array";
-+ case object_type: return "object";
-+ default: assert(0);
-+#ifdef _MSC_VER
-+ __assume(0);
-+#endif
-+ }
-+ return std::string();
-+ }
-+
-+ template void copy(const std::string& s, Iter oi) {
-+ std::copy(s.begin(), s.end(), oi);
-+ }
-+
-+ template void serialize_str(const std::string& s, Iter oi) {
-+ *oi++ = '"';
-+ for (std::string::const_iterator i = s.begin(); i != s.end(); ++i) {
-+ switch (*i) {
-+#define MAP(val, sym) case val: copy(sym, oi); break
-+ MAP('"', "\\\"");
-+ MAP('\\', "\\\\");
-+ MAP('/', "\\/");
-+ MAP('\b', "\\b");
-+ MAP('\f', "\\f");
-+ MAP('\n', "\\n");
-+ MAP('\r', "\\r");
-+ MAP('\t', "\\t");
-+#undef MAP
-+ default:
-+ if ((unsigned char)*i < 0x20 || *i == 0x7f) {
-+ char buf[7];
-+ SNPRINTF(buf, sizeof(buf), "\\u%04x", *i & 0xff);
-+ copy(buf, buf + 6, oi);
-+ } else {
-+ *oi++ = *i;
-+ }
-+ break;
-+ }
-+ }
-+ *oi++ = '"';
-+ }
-+
-+ template void value::serialize(Iter oi) const {
-+ switch (type_) {
-+ case string_type:
-+ serialize_str(*u_.string_, oi);
-+ break;
-+ case array_type: {
-+ *oi++ = '[';
-+ for (array::const_iterator i = u_.array_->begin();
-+ i != u_.array_->end();
-+ ++i) {
-+ if (i != u_.array_->begin()) {
-+ *oi++ = ',';
-+ }
-+ i->serialize(oi);
-+ }
-+ *oi++ = ']';
-+ break;
-+ }
-+ case object_type: {
-+ *oi++ = '{';
-+ for (object::const_iterator i = u_.object_->begin();
-+ i != u_.object_->end();
-+ ++i) {
-+ if (i != u_.object_->begin()) {
-+ *oi++ = ',';
-+ }
-+ serialize_str(i->first, oi);
-+ *oi++ = ':';
-+ i->second.serialize(oi);
-+ }
-+ *oi++ = '}';
-+ break;
-+ }
-+ default:
-+ copy(to_str(), oi);
-+ break;
-+ }
-+ }
-+
-+ inline std::string value::serialize() const {
-+ std::string s;
-+ serialize(std::back_inserter(s));
-+ return s;
-+ }
-+
-+ template class input {
-+ protected:
-+ Iter cur_, end_;
-+ int last_ch_;
-+ bool ungot_;
-+ int line_;
-+ public:
-+ input(const Iter& first, const Iter& last) : cur_(first), end_(last), last_ch_(-1), ungot_(false), line_(1) {}
-+ int getc() {
-+ if (ungot_) {
-+ ungot_ = false;
-+ return last_ch_;
-+ }
-+ if (cur_ == end_) {
-+ last_ch_ = -1;
-+ return -1;
-+ }
-+ if (last_ch_ == '\n') {
-+ line_++;
-+ }
-+ last_ch_ = *cur_++ & 0xff;
-+ return last_ch_;
-+ }
-+ void ungetc() {
-+ if (last_ch_ != -1) {
-+ assert(! ungot_);
-+ ungot_ = true;
-+ }
-+ }
-+ Iter cur() const { return cur_; }
-+ int line() const { return line_; }
-+ void skip_ws() {
-+ while (1) {
-+ int ch = getc();
-+ if (! (ch == ' ' || ch == '\t' || ch == '\n' || ch == '\r')) {
-+ ungetc();
-+ break;
-+ }
-+ }
-+ }
-+ bool expect(int expect) {
-+ skip_ws();
-+ if (getc() != expect) {
-+ ungetc();
-+ return false;
-+ }
-+ return true;
-+ }
-+ bool match(const std::string& pattern) {
-+ for (std::string::const_iterator pi(pattern.begin());
-+ pi != pattern.end();
-+ ++pi) {
-+ if (getc() != *pi) {
-+ ungetc();
-+ return false;
-+ }
-+ }
-+ return true;
-+ }
-+ };
-+
-+ template inline int _parse_quadhex(input &in) {
-+ int uni_ch = 0, hex;
-+ for (int i = 0; i < 4; i++) {
-+ if ((hex = in.getc()) == -1) {
-+ return -1;
-+ }
-+ if ('0' <= hex && hex <= '9') {
-+ hex -= '0';
-+ } else if ('A' <= hex && hex <= 'F') {
-+ hex -= 'A' - 0xa;
-+ } else if ('a' <= hex && hex <= 'f') {
-+ hex -= 'a' - 0xa;
-+ } else {
-+ in.ungetc();
-+ return -1;
-+ }
-+ uni_ch = uni_ch * 16 + hex;
-+ }
-+ return uni_ch;
-+ }
-+
-+ template inline bool _parse_codepoint(String& out, input& in) {
-+ int uni_ch;
-+ if ((uni_ch = _parse_quadhex(in)) == -1) {
-+ return false;
-+ }
-+ if (0xd800 <= uni_ch && uni_ch <= 0xdfff) {
-+ if (0xdc00 <= uni_ch) {
-+ // a second 16-bit of a surrogate pair appeared
-+ return false;
-+ }
-+ // first 16-bit of surrogate pair, get the next one
-+ if (in.getc() != '\\' || in.getc() != 'u') {
-+ in.ungetc();
-+ return false;
-+ }
-+ int second = _parse_quadhex(in);
-+ if (! (0xdc00 <= second && second <= 0xdfff)) {
-+ return false;
-+ }
-+ uni_ch = ((uni_ch - 0xd800) << 10) | ((second - 0xdc00) & 0x3ff);
-+ uni_ch += 0x10000;
-+ }
-+ if (uni_ch < 0x80) {
-+ out.push_back(uni_ch);
-+ } else {
-+ if (uni_ch < 0x800) {
-+ out.push_back(0xc0 | (uni_ch >> 6));
-+ } else {
-+ if (uni_ch < 0x10000) {
-+ out.push_back(0xe0 | (uni_ch >> 12));
-+ } else {
-+ out.push_back(0xf0 | (uni_ch >> 18));
-+ out.push_back(0x80 | ((uni_ch >> 12) & 0x3f));
-+ }
-+ out.push_back(0x80 | ((uni_ch >> 6) & 0x3f));
-+ }
-+ out.push_back(0x80 | (uni_ch & 0x3f));
-+ }
-+ return true;
-+ }
-+
-+ template inline bool _parse_string(String& out, input& in) {
-+ while (1) {
-+ int ch = in.getc();
-+ if (ch < ' ') {
-+ in.ungetc();
-+ return false;
-+ } else if (ch == '"') {
-+ return true;
-+ } else if (ch == '\\') {
-+ if ((ch = in.getc()) == -1) {
-+ return false;
-+ }
-+ switch (ch) {
-+#define MAP(sym, val) case sym: out.push_back(val); break
-+ MAP('"', '\"');
-+ MAP('\\', '\\');
-+ MAP('/', '/');
-+ MAP('b', '\b');
-+ MAP('f', '\f');
-+ MAP('n', '\n');
-+ MAP('r', '\r');
-+ MAP('t', '\t');
-+#undef MAP
-+ case 'u':
-+ if (! _parse_codepoint(out, in)) {
-+ return false;
-+ }
-+ break;
-+ default:
-+ return false;
-+ }
-+ } else {
-+ out.push_back(ch);
-+ }
-+ }
-+ return false;
-+ }
-+
-+ template inline bool _parse_array(Context& ctx, input& in) {
-+ if (! ctx.parse_array_start()) {
-+ return false;
-+ }
-+ size_t idx = 0;
-+ if (in.expect(']')) {
-+ return ctx.parse_array_stop(idx);
-+ }
-+ do {
-+ if (! ctx.parse_array_item(in, idx)) {
-+ return false;
-+ }
-+ idx++;
-+ } while (in.expect(','));
-+ return in.expect(']') && ctx.parse_array_stop(idx);
-+ }
-+
-+ template inline bool _parse_object(Context& ctx, input& in) {
-+ if (! ctx.parse_object_start()) {
-+ return false;
-+ }
-+ if (in.expect('}')) {
-+ return true;
-+ }
-+ do {
-+ std::string key;
-+ if (! in.expect('"')
-+ || ! _parse_string(key, in)
-+ || ! in.expect(':')) {
-+ return false;
-+ }
-+ if (! ctx.parse_object_item(in, key)) {
-+ return false;
-+ }
-+ } while (in.expect(','));
-+ return in.expect('}');
-+ }
-+
-+ template inline bool _parse_number(double& out, input& in) {
-+ std::string num_str;
-+ while (1) {
-+ int ch = in.getc();
-+ if (('0' <= ch && ch <= '9') || ch == '+' || ch == '-' || ch == '.'
-+ || ch == 'e' || ch == 'E') {
-+ num_str.push_back(ch);
-+ } else {
-+ in.ungetc();
-+ break;
-+ }
-+ }
-+ char* endp;
-+ out = strtod(num_str.c_str(), &endp);
-+ return endp == num_str.c_str() + num_str.size();
-+ }
-+
-+ template inline bool _parse(Context& ctx, input& in) {
-+ in.skip_ws();
-+ int ch = in.getc();
-+ switch (ch) {
-+#define IS(ch, text, op) case ch: \
-+ if (in.match(text) && op) { \
-+ return true; \
-+ } else { \
-+ return false; \
-+ }
-+ IS('n', "ull", ctx.set_null());
-+ IS('f', "alse", ctx.set_bool(false));
-+ IS('t', "rue", ctx.set_bool(true));
-+#undef IS
-+ case '"':
-+ return ctx.parse_string(in);
-+ case '[':
-+ return _parse_array(ctx, in);
-+ case '{':
-+ return _parse_object(ctx, in);
-+ default:
-+ if (('0' <= ch && ch <= '9') || ch == '-') {
-+ in.ungetc();
-+ double f;
-+ if (_parse_number(f, in)) {
-+ ctx.set_number(f);
-+ return true;
-+ } else {
-+ return false;
-+ }
-+ }
-+ break;
-+ }
-+ in.ungetc();
-+ return false;
-+ }
-+
-+ class deny_parse_context {
-+ public:
-+ bool set_null() { return false; }
-+ bool set_bool(bool) { return false; }
-+ bool set_number(double) { return false; }
-+ template bool parse_string(input&) { return false; }
-+ bool parse_array_start() { return false; }
-+ template bool parse_array_item(input&, size_t) {
-+ return false;
-+ }
-+ bool parse_array_stop(size_t) { return false; }
-+ bool parse_object_start() { return false; }
-+ template bool parse_object_item(input&, const std::string&) {
-+ return false;
-+ }
-+ };
-+
-+ class default_parse_context {
-+ protected:
-+ value* out_;
-+ public:
-+ default_parse_context(value* out) : out_(out) {}
-+ bool set_null() {
-+ *out_ = value();
-+ return true;
-+ }
-+ bool set_bool(bool b) {
-+ *out_ = value(b);
-+ return true;
-+ }
-+ bool set_number(double f) {
-+ *out_ = value(f);
-+ return true;
-+ }
-+ template bool parse_string(input& in) {
-+ *out_ = value(string_type, false);
-+ return _parse_string(out_->get(), in);
-+ }
-+ bool parse_array_start() {
-+ *out_ = value(array_type, false);
-+ return true;
-+ }
-+ template bool parse_array_item(input& in, size_t) {
-+ array& a = out_->get();
-+ a.push_back(value());
-+ default_parse_context ctx(&a.back());
-+ return _parse(ctx, in);
-+ }
-+ bool parse_array_stop(size_t) { return true; }
-+ bool parse_object_start() {
-+ *out_ = value(object_type, false);
-+ return true;
-+ }
-+ template bool parse_object_item(input& in, const std::string& key) {
-+ object& o = out_->get();
-+ default_parse_context ctx(&o[key]);
-+ return _parse(ctx, in);
-+ }
-+ private:
-+ default_parse_context(const default_parse_context&);
-+ default_parse_context& operator=(const default_parse_context&);
-+ };
-+
-+ class null_parse_context {
-+ public:
-+ struct dummy_str {
-+ void push_back(int) {}
-+ };
-+ public:
-+ null_parse_context() {}
-+ bool set_null() { return true; }
-+ bool set_bool(bool) { return true; }
-+ bool set_number(double) { return true; }
-+ template bool parse_string(input& in) {
-+ dummy_str s;
-+ return _parse_string(s, in);
-+ }
-+ bool parse_array_start() { return true; }
-+ template bool parse_array_item(input& in, size_t) {
-+ return _parse(*this, in);
-+ }
-+ bool parse_array_stop(size_t) { return true; }
-+ bool parse_object_start() { return true; }
-+ template bool parse_object_item(input& in, const std::string&) {
-+ return _parse(*this, in);
-+ }
-+ private:
-+ null_parse_context(const null_parse_context&);
-+ null_parse_context& operator=(const null_parse_context&);
-+ };
-+
-+ // obsolete, use the version below
-+ template inline std::string parse(value& out, Iter& pos, const Iter& last) {
-+ std::string err;
-+ pos = parse(out, pos, last, &err);
-+ return err;
-+ }
-+
-+ template inline Iter _parse(Context& ctx, const Iter& first, const Iter& last, std::string* err) {
-+ input in(first, last);
-+ if (! _parse(ctx, in) && err != NULL) {
-+ char buf[64];
-+ SNPRINTF(buf, sizeof(buf), "syntax error at line %d near: ", in.line());
-+ *err = buf;
-+ while (1) {
-+ int ch = in.getc();
-+ if (ch == -1 || ch == '\n') {
-+ break;
-+ } else if (ch >= ' ') {
-+ err->push_back(ch);
-+ }
-+ }
-+ }
-+ return in.cur();
-+ }
-+
-+ template inline Iter parse(value& out, const Iter& first, const Iter& last, std::string* err) {
-+ default_parse_context ctx(&out);
-+ return _parse(ctx, first, last, err);
-+ }
-+
-+ inline std::string parse(value& out, std::istream& is) {
-+ std::string err;
-+ parse(out, std::istreambuf_iterator(is.rdbuf()),
-+ std::istreambuf_iterator(), &err);
-+ return err;
-+ }
-+
-+ template struct last_error_t {
-+ static std::string s;
-+ };
-+ template std::string last_error_t::s;
-+
-+ inline void set_last_error(const std::string& s) {
-+ last_error_t::s = s;
-+ }
-+
-+ inline const std::string& get_last_error() {
-+ return last_error_t::s;
-+ }
-+
-+ inline bool operator==(const value& x, const value& y) {
-+ if (x.is())
-+ return y.is();
-+#define PICOJSON_CMP(type) \
-+ if (x.is()) \
-+ return y.is() && x.get() == y.get()
-+ PICOJSON_CMP(bool);
-+ PICOJSON_CMP(double);
-+ PICOJSON_CMP(std::string);
-+ PICOJSON_CMP(array);
-+ PICOJSON_CMP(object);
-+#undef PICOJSON_CMP
-+ assert(0);
-+#ifdef _MSC_VER
-+ __assume(0);
-+#endif
-+ return false;
-+ }
-+
-+ inline bool operator!=(const value& x, const value& y) {
-+ return ! (x == y);
-+ }
-+}
-+
-+namespace std {
-+ template<> inline void swap(picojson::value& x, picojson::value& y)
-+ {
-+ x.swap(y);
-+ }
-+}
-+
-+inline std::istream& operator>>(std::istream& is, picojson::value& x)
-+{
-+ picojson::set_last_error(std::string());
-+ std::string err = picojson::parse(x, is);
-+ if (! err.empty()) {
-+ picojson::set_last_error(err);
-+ is.setstate(std::ios::failbit);
-+ }
-+ return is;
-+}
-+
-+inline std::ostream& operator<<(std::ostream& os, const picojson::value& x)
-+{
-+ x.serialize(std::ostream_iterator(os));
-+ return os;
-+}
-+#ifdef _MSC_VER
-+ #pragma warning(pop)
-+#endif
-+
-+#endif
-+#ifdef TEST_PICOJSON
-+#ifdef _MSC_VER
-+ #pragma warning(disable : 4127) // conditional expression is constant
-+#endif
-+
-+using namespace std;
-+
-+static void plan(int num)
-+{
-+ printf("1..%d\n", num);
-+}
-+
-+static bool success = true;
-+
-+static void ok(bool b, const char* name = "")
-+{
-+ static int n = 1;
-+ if (! b)
-+ success = false;
-+ printf("%s %d - %s\n", b ? "ok" : "ng", n++, name);
-+}
-+
-+template void is(const T& x, const T& y, const char* name = "")
-+{
-+ if (x == y) {
-+ ok(true, name);
-+ } else {
-+ ok(false, name);
-+ }
-+}
-+
-+#include
-+#include