diff --git a/.github/workflows/minikube.yaml b/.github/workflows/minikube.yaml
index 5a19b8b5c..5713a83a0 100644
--- a/.github/workflows/minikube.yaml
+++ b/.github/workflows/minikube.yaml
@@ -76,6 +76,7 @@ jobs:
export JVM_BUILD_SERVICE_IMAGE=quay.io/$JBS_QUAY_ORG/$JBS_QUAY_IMAGE_CONTROLLER:$JBS_QUAY_IMAGE_TAG
export JVM_BUILD_SERVICE_CACHE_IMAGE=quay.io/$JBS_QUAY_ORG/konflux-jbs-pnc-tenant/jvm-build-service/cache:$JBS_QUAY_IMAGE_TAG
export JVM_BUILD_SERVICE_REQPROCESSOR_IMAGE=quay.io/$JBS_QUAY_ORG/konflux-jbs-pnc-tenant/jvm-build-service/build-request-processor:$JBS_QUAY_IMAGE_TAG
+ export JVM_BUILD_SERVICE_DOMAIN_PROXY_IMAGE=quay.io/$JBS_QUAY_ORG/konflux-jbs-pnc-tenant/jvm-build-service/domain-proxy:$JBS_QUAY_IMAGE_TAG
echo "Using worker namespace $JBS_WORKER_NAMESPACE DEV_IP $DEV_IP JVM_BUILD_SERVICE_IMAGE $JVM_BUILD_SERVICE_IMAGE"
@@ -139,6 +140,7 @@ jobs:
export JVM_BUILD_SERVICE_IMAGE=quay.io/$JBS_QUAY_ORG/$JBS_QUAY_IMAGE_CONTROLLER:$JBS_QUAY_IMAGE_TAG
export JVM_BUILD_SERVICE_CACHE_IMAGE=quay.io/$JBS_QUAY_ORG/konflux-jbs-pnc-tenant/jvm-build-service/cache:$JBS_QUAY_IMAGE_TAG
export JVM_BUILD_SERVICE_REQPROCESSOR_IMAGE=quay.io/$JBS_QUAY_ORG/konflux-jbs-pnc-tenant/jvm-build-service/build-request-processor:$JBS_QUAY_IMAGE_TAG
+ export JVM_BUILD_SERVICE_DOMAIN_PROXY_IMAGE=quay.io/$JBS_QUAY_ORG/konflux-jbs-pnc-tenant/jvm-build-service/domain-proxy:$JBS_QUAY_IMAGE_TAG
echo "Using worker namespace $JBS_WORKER_NAMESPACE DEV_IP $DEV_IP JVM_BUILD_SERVICE_IMAGE $JVM_BUILD_SERVICE_IMAGE"
diff --git a/.tekton/domain-proxy-pull-request.yaml b/.tekton/domain-proxy-pull-request.yaml
index 0d3446b48..0e6042fdd 100644
--- a/.tekton/domain-proxy-pull-request.yaml
+++ b/.tekton/domain-proxy-pull-request.yaml
@@ -27,18 +27,15 @@ spec:
- name: image-expires-after
value: 5d
- name: dockerfile
- value: Dockerfile
+ value: /cmd/domainproxy/docker/Dockerfile.all-in-one
+ - name: path-context
+ value: .
pipelineSpec:
- description: |
- This pipeline is ideal for building container images from a Containerfile while reducing network traffic.
-
- _Uses `buildah` to create a container image. It also optionally creates a source image and runs some build-time tests. EC will flag a violation for [`trusted_task.trusted`](https://enterprisecontract.dev/docs/ec-policies/release_policy.html#trusted_task__trusted) if any tasks are added to the pipeline.
- This pipeline is pushed as a Tekton bundle to [quay.io](https://quay.io/repository/konflux-ci/tekton-catalog/pipeline-docker-build?tab=tags)_
finally:
- name: show-sbom
params:
- name: IMAGE_URL
- value: $(tasks.build-image-index.results.IMAGE_URL)
+ value: $(tasks.build-container.results.IMAGE_URL)
taskRef:
params:
- name: name
@@ -57,7 +54,7 @@ spec:
- name: image-url
value: $(params.output-image)
- name: build-task-status
- value: $(tasks.build-image-index.status)
+ value: $(tasks.build-container.status)
taskRef:
params:
- name: name
@@ -107,6 +104,10 @@ spec:
description: Build dependencies to be prefetched by Cachi2
name: prefetch-input
type: string
+ - default: "false"
+ description: Java build
+ name: java
+ type: string
- default: ""
description: Image tag expiration time, time values could be something like
1h, 2d, 3w for hours, days, and weeks, respectively.
@@ -115,10 +116,6 @@ spec:
description: Build a source image.
name: build-source-image
type: string
- - default: "false"
- description: Add built image into an OCI image index
- name: build-image-index
- type: string
- default: []
description: Array of --build-arg values ("arg=value" strings) for buildah
name: build-args
@@ -130,16 +127,19 @@ spec:
results:
- description: ""
name: IMAGE_URL
- value: $(tasks.build-image-index.results.IMAGE_URL)
+ value: $(tasks.build-container.results.IMAGE_URL)
- description: ""
name: IMAGE_DIGEST
- value: $(tasks.build-image-index.results.IMAGE_DIGEST)
+ value: $(tasks.build-container.results.IMAGE_DIGEST)
- description: ""
name: CHAINS-GIT_URL
value: $(tasks.clone-repository.results.url)
- description: ""
name: CHAINS-GIT_COMMIT
value: $(tasks.clone-repository.results.commit)
+ - description: ""
+ name: JAVA_COMMUNITY_DEPENDENCIES
+ value: $(tasks.build-container.results.JAVA_COMMUNITY_DEPENDENCIES)
tasks:
- name: init
params:
@@ -252,41 +252,12 @@ spec:
workspaces:
- name: source
workspace: workspace
- - name: build-image-index
- params:
- - name: IMAGE
- value: $(params.output-image)
- - name: COMMIT_SHA
- value: $(tasks.clone-repository.results.commit)
- - name: IMAGE_EXPIRES_AFTER
- value: $(params.image-expires-after)
- - name: ALWAYS_BUILD_INDEX
- value: $(params.build-image-index)
- - name: IMAGES
- value:
- - $(tasks.build-container.results.IMAGE_URL)@$(tasks.build-container.results.IMAGE_DIGEST)
- runAfter:
- - build-container
- taskRef:
- params:
- - name: name
- value: build-image-index
- - name: bundle
- value: quay.io/konflux-ci/tekton-catalog/task-build-image-index:0.1@sha256:ebc17bb22481160eec6eb7277df1e48b90f599bebe563cd4f046807f4e32ced3
- - name: kind
- value: task
- resolver: bundles
- when:
- - input: $(tasks.init.results.build)
- operator: in
- values:
- - "true"
- name: build-source-image
params:
- name: BINARY_IMAGE
value: $(params.output-image)
runAfter:
- - build-image-index
+ - build-container
taskRef:
params:
- name: name
@@ -311,11 +282,11 @@ spec:
- name: deprecated-base-image-check
params:
- name: IMAGE_URL
- value: $(tasks.build-image-index.results.IMAGE_URL)
+ value: $(tasks.build-container.results.IMAGE_URL)
- name: IMAGE_DIGEST
- value: $(tasks.build-image-index.results.IMAGE_DIGEST)
+ value: $(tasks.build-container.results.IMAGE_DIGEST)
runAfter:
- - build-image-index
+ - build-container
taskRef:
params:
- name: name
@@ -333,11 +304,11 @@ spec:
- name: clair-scan
params:
- name: image-digest
- value: $(tasks.build-image-index.results.IMAGE_DIGEST)
+ value: $(tasks.build-container.results.IMAGE_DIGEST)
- name: image-url
- value: $(tasks.build-image-index.results.IMAGE_URL)
+ value: $(tasks.build-container.results.IMAGE_URL)
runAfter:
- - build-image-index
+ - build-container
taskRef:
params:
- name: name
@@ -355,9 +326,9 @@ spec:
- name: ecosystem-cert-preflight-checks
params:
- name: image-url
- value: $(tasks.build-image-index.results.IMAGE_URL)
+ value: $(tasks.build-container.results.IMAGE_URL)
runAfter:
- - build-image-index
+ - build-container
taskRef:
params:
- name: name
@@ -375,11 +346,11 @@ spec:
- name: sast-snyk-check
params:
- name: image-digest
- value: $(tasks.build-image-index.results.IMAGE_DIGEST)
+ value: $(tasks.build-container.results.IMAGE_DIGEST)
- name: image-url
- value: $(tasks.build-image-index.results.IMAGE_URL)
+ value: $(tasks.build-container.results.IMAGE_URL)
runAfter:
- - build-image-index
+ - build-container
taskRef:
params:
- name: name
@@ -400,11 +371,11 @@ spec:
- name: clamav-scan
params:
- name: image-digest
- value: $(tasks.build-image-index.results.IMAGE_DIGEST)
+ value: $(tasks.build-container.results.IMAGE_DIGEST)
- name: image-url
- value: $(tasks.build-image-index.results.IMAGE_URL)
+ value: $(tasks.build-container.results.IMAGE_URL)
runAfter:
- - build-image-index
+ - build-container
taskRef:
params:
- name: name
@@ -422,9 +393,9 @@ spec:
- name: apply-tags
params:
- name: IMAGE
- value: $(tasks.build-image-index.results.IMAGE_URL)
+ value: $(tasks.build-container.results.IMAGE_URL)
runAfter:
- - build-image-index
+ - build-container
taskRef:
params:
- name: name
@@ -437,15 +408,15 @@ spec:
- name: push-dockerfile
params:
- name: IMAGE
- value: $(tasks.build-image-index.results.IMAGE_URL)
+ value: $(tasks.build-container.results.IMAGE_URL)
- name: IMAGE_DIGEST
- value: $(tasks.build-image-index.results.IMAGE_DIGEST)
+ value: $(tasks.build-container.results.IMAGE_DIGEST)
- name: DOCKERFILE
value: $(params.dockerfile)
- name: CONTEXT
value: $(params.path-context)
runAfter:
- - build-image-index
+ - build-container
taskRef:
params:
- name: name
diff --git a/.tekton/domain-proxy-push.yaml b/.tekton/domain-proxy-push.yaml
index 49627c829..b49a28960 100644
--- a/.tekton/domain-proxy-push.yaml
+++ b/.tekton/domain-proxy-push.yaml
@@ -24,24 +24,21 @@ spec:
- name: output-image
value: quay.io/redhat-user-workloads/konflux-jbs-pnc-tenant/jvm-build-service/domain-proxy:{{revision}}
- name: dockerfile
- value: Dockerfile
+ value: /cmd/domainproxy/docker/Dockerfile.all-in-one
+ - name: path-context
+ value: .
pipelineSpec:
- description: |
- This pipeline is ideal for building container images from a Containerfile while reducing network traffic.
-
- _Uses `buildah` to create a container image. It also optionally creates a source image and runs some build-time tests. EC will flag a violation for [`trusted_task.trusted`](https://enterprisecontract.dev/docs/ec-policies/release_policy.html#trusted_task__trusted) if any tasks are added to the pipeline.
- This pipeline is pushed as a Tekton bundle to [quay.io](https://quay.io/repository/konflux-ci/tekton-catalog/pipeline-docker-build?tab=tags)_
finally:
- name: show-sbom
params:
- name: IMAGE_URL
- value: $(tasks.build-image-index.results.IMAGE_URL)
+ value: $(tasks.build-container.results.IMAGE_URL)
taskRef:
params:
- name: name
value: show-sbom
- name: bundle
- value: quay.io/konflux-ci/tekton-catalog/task-show-sbom:0.1@sha256:945a7c9066d3e0a95d3fddb7e8a6992e4d632a2a75d8f3a9bd2ff2fef0ec9aa0
+ value: quay.io/konflux-ci/tekton-catalog/task-show-sbom:0.1@sha256:9bfc6b99ef038800fe131d7b45ff3cd4da3a415dd536f7c657b3527b01c4a13b
- name: kind
value: task
resolver: bundles
@@ -54,7 +51,7 @@ spec:
- name: image-url
value: $(params.output-image)
- name: build-task-status
- value: $(tasks.build-image-index.status)
+ value: $(tasks.build-container.status)
taskRef:
params:
- name: name
@@ -104,6 +101,10 @@ spec:
description: Build dependencies to be prefetched by Cachi2
name: prefetch-input
type: string
+ - default: "false"
+ description: Java build
+ name: java
+ type: string
- default: ""
description: Image tag expiration time, time values could be something like
1h, 2d, 3w for hours, days, and weeks, respectively.
@@ -112,10 +113,6 @@ spec:
description: Build a source image.
name: build-source-image
type: string
- - default: "false"
- description: Add built image into an OCI image index
- name: build-image-index
- type: string
- default: []
description: Array of --build-arg values ("arg=value" strings) for buildah
name: build-args
@@ -127,16 +124,19 @@ spec:
results:
- description: ""
name: IMAGE_URL
- value: $(tasks.build-image-index.results.IMAGE_URL)
+ value: $(tasks.build-container.results.IMAGE_URL)
- description: ""
name: IMAGE_DIGEST
- value: $(tasks.build-image-index.results.IMAGE_DIGEST)
+ value: $(tasks.build-container.results.IMAGE_DIGEST)
- description: ""
name: CHAINS-GIT_URL
value: $(tasks.clone-repository.results.url)
- description: ""
name: CHAINS-GIT_COMMIT
value: $(tasks.clone-repository.results.commit)
+ - description: ""
+ name: JAVA_COMMUNITY_DEPENDENCIES
+ value: $(tasks.build-container.results.JAVA_COMMUNITY_DEPENDENCIES)
tasks:
- name: init
params:
@@ -249,41 +249,12 @@ spec:
workspaces:
- name: source
workspace: workspace
- - name: build-image-index
- params:
- - name: IMAGE
- value: $(params.output-image)
- - name: COMMIT_SHA
- value: $(tasks.clone-repository.results.commit)
- - name: IMAGE_EXPIRES_AFTER
- value: $(params.image-expires-after)
- - name: ALWAYS_BUILD_INDEX
- value: $(params.build-image-index)
- - name: IMAGES
- value:
- - $(tasks.build-container.results.IMAGE_URL)@$(tasks.build-container.results.IMAGE_DIGEST)
- runAfter:
- - build-container
- taskRef:
- params:
- - name: name
- value: build-image-index
- - name: bundle
- value: quay.io/konflux-ci/tekton-catalog/task-build-image-index:0.1@sha256:ebc17bb22481160eec6eb7277df1e48b90f599bebe563cd4f046807f4e32ced3
- - name: kind
- value: task
- resolver: bundles
- when:
- - input: $(tasks.init.results.build)
- operator: in
- values:
- - "true"
- name: build-source-image
params:
- name: BINARY_IMAGE
value: $(params.output-image)
runAfter:
- - build-image-index
+ - build-container
taskRef:
params:
- name: name
@@ -308,11 +279,11 @@ spec:
- name: deprecated-base-image-check
params:
- name: IMAGE_URL
- value: $(tasks.build-image-index.results.IMAGE_URL)
+ value: $(tasks.build-container.results.IMAGE_URL)
- name: IMAGE_DIGEST
- value: $(tasks.build-image-index.results.IMAGE_DIGEST)
+ value: $(tasks.build-container.results.IMAGE_DIGEST)
runAfter:
- - build-image-index
+ - build-container
taskRef:
params:
- name: name
@@ -330,11 +301,11 @@ spec:
- name: clair-scan
params:
- name: image-digest
- value: $(tasks.build-image-index.results.IMAGE_DIGEST)
+ value: $(tasks.build-container.results.IMAGE_DIGEST)
- name: image-url
- value: $(tasks.build-image-index.results.IMAGE_URL)
+ value: $(tasks.build-container.results.IMAGE_URL)
runAfter:
- - build-image-index
+ - build-container
taskRef:
params:
- name: name
@@ -352,9 +323,9 @@ spec:
- name: ecosystem-cert-preflight-checks
params:
- name: image-url
- value: $(tasks.build-image-index.results.IMAGE_URL)
+ value: $(tasks.build-container.results.IMAGE_URL)
runAfter:
- - build-image-index
+ - build-container
taskRef:
params:
- name: name
@@ -372,11 +343,11 @@ spec:
- name: sast-snyk-check
params:
- name: image-digest
- value: $(tasks.build-image-index.results.IMAGE_DIGEST)
+ value: $(tasks.build-container.results.IMAGE_DIGEST)
- name: image-url
- value: $(tasks.build-image-index.results.IMAGE_URL)
+ value: $(tasks.build-container.results.IMAGE_URL)
runAfter:
- - build-image-index
+ - build-container
taskRef:
params:
- name: name
@@ -397,11 +368,11 @@ spec:
- name: clamav-scan
params:
- name: image-digest
- value: $(tasks.build-image-index.results.IMAGE_DIGEST)
+ value: $(tasks.build-container.results.IMAGE_DIGEST)
- name: image-url
- value: $(tasks.build-image-index.results.IMAGE_URL)
+ value: $(tasks.build-container.results.IMAGE_URL)
runAfter:
- - build-image-index
+ - build-container
taskRef:
params:
- name: name
@@ -419,11 +390,11 @@ spec:
- name: apply-tags
params:
- name: IMAGE
- value: $(tasks.build-image-index.results.IMAGE_URL)
+ value: $(tasks.build-container.results.IMAGE_URL)
- name: ADDITIONAL_TAGS
value: ["latest"]
runAfter:
- - build-image-index
+ - build-container
taskRef:
params:
- name: name
@@ -436,15 +407,15 @@ spec:
- name: push-dockerfile
params:
- name: IMAGE
- value: $(tasks.build-image-index.results.IMAGE_URL)
+ value: $(tasks.build-container.results.IMAGE_URL)
- name: IMAGE_DIGEST
- value: $(tasks.build-image-index.results.IMAGE_DIGEST)
+ value: $(tasks.build-container.results.IMAGE_DIGEST)
- name: DOCKERFILE
value: $(params.dockerfile)
- name: CONTEXT
value: $(params.path-context)
runAfter:
- - build-image-index
+ - build-container
taskRef:
params:
- name: name
diff --git a/Makefile b/Makefile
index 370b6877d..40bd5a19e 100644
--- a/Makefile
+++ b/Makefile
@@ -44,6 +44,8 @@ minikube-test:
build:
go build -o out/jvmbuildservice cmd/controller/main.go
env GOOS=linux GOARCH=amd64 GOTOOLCHAIN=auto GOSUMDB=sum.golang.org go build -mod=vendor -o out/jvmbuildservice ./cmd/controller
+ go build -o out/domainproxyserver cmd/domainproxy/server/main.go
+ go build -o out/domainproxyclient cmd/domainproxy/client/main.go
clean:
rm -rf out
@@ -60,13 +62,15 @@ generate: generate-crds
verify-generate-deepcopy-client: generate-deepcopy-client
hack/verify-codegen.sh
-dev-image:
+dev-image: build
@if [ -z "$$QUAY_USERNAME" ]; then \
echo "ERROR: QUAY_USERNAME is not set"; \
exit 1; \
fi
docker build . -t quay.io/$(QUAY_USERNAME)/hacbs-jvm-controller:"$${JBS_QUAY_IMAGE_TAG:-dev}"
docker push quay.io/$(QUAY_USERNAME)/hacbs-jvm-controller:"$${JBS_QUAY_IMAGE_TAG:-dev}"
+ docker build . -f cmd/domainproxy/docker/Dockerfile.local -t quay.io/$(QUAY_USERNAME)/hacbs-jvm-domain-proxy:"$${JBS_QUAY_IMAGE_TAG:-dev}"
+ docker push quay.io/$(QUAY_USERNAME)/hacbs-jvm-domain-proxy:"$${JBS_QUAY_IMAGE_TAG:-dev}"
dev: dev-image
cd java-components && mvn clean install -Dlocal -DskipTests -Ddev
diff --git a/cmd/domainproxy/client/main.go b/cmd/domainproxy/client/main.go
new file mode 100644
index 000000000..17ef0e3ac
--- /dev/null
+++ b/cmd/domainproxy/client/main.go
@@ -0,0 +1,19 @@
+package main
+
+import (
+ . "github.com/redhat-appstudio/jvm-build-service/pkg/domainproxy/client"
+ "os"
+ "os/signal"
+ "syscall"
+)
+
+func main() {
+ domainProxyClient := NewDomainProxyClient()
+ ready := make(chan bool)
+ domainProxyClient.Start(ready)
+ <-ready
+ signals := make(chan os.Signal, 1)
+ signal.Notify(signals, syscall.SIGINT, syscall.SIGTERM)
+ <-signals
+ domainProxyClient.Stop()
+}
diff --git a/cmd/domainproxy/docker/Dockerfile.all-in-one b/cmd/domainproxy/docker/Dockerfile.all-in-one
new file mode 100644
index 000000000..604d90b2f
--- /dev/null
+++ b/cmd/domainproxy/docker/Dockerfile.all-in-one
@@ -0,0 +1,14 @@
+FROM registry.access.redhat.com/ubi9/go-toolset:1.22.5-1731639025@sha256:45170b6e45114849b5d2c0e55d730ffa4a709ddf5f58b9e810548097b085e78f as builder
+USER 0
+WORKDIR /work
+COPY ./ .
+
+RUN go build -o domainproxyserver cmd/domainproxy/server/main.go
+RUN go build -o domainproxyclient cmd/domainproxy/client/main.go
+
+FROM quay.io/konflux-ci/buildah-task:latest@sha256:5cbd487022fb7ac476cbfdea25513b810f7e343ec48f89dc6a4e8c3c39fa37a2
+USER 0
+WORKDIR /work/
+
+COPY --from=builder /work/domainproxyserver /app/domain-proxy-server
+COPY --from=builder /work/domainproxyclient /app/domain-proxy-client
diff --git a/cmd/domainproxy/docker/Dockerfile.local b/cmd/domainproxy/docker/Dockerfile.local
new file mode 100644
index 000000000..09ba5af38
--- /dev/null
+++ b/cmd/domainproxy/docker/Dockerfile.local
@@ -0,0 +1,5 @@
+FROM quay.io/konflux-ci/buildah-task:latest@sha256:5cbd487022fb7ac476cbfdea25513b810f7e343ec48f89dc6a4e8c3c39fa37a2
+USER 0
+COPY out/domainproxyserver /app/domain-proxy-server
+COPY out/domainproxyclient /app/domain-proxy-client
+
diff --git a/cmd/domainproxy/server/main.go b/cmd/domainproxy/server/main.go
new file mode 100644
index 000000000..2656411b3
--- /dev/null
+++ b/cmd/domainproxy/server/main.go
@@ -0,0 +1,19 @@
+package main
+
+import (
+ . "github.com/redhat-appstudio/jvm-build-service/pkg/domainproxy/server"
+ "os"
+ "os/signal"
+ "syscall"
+)
+
+func main() {
+ domainProxyServer := NewDomainProxyServer()
+ ready := make(chan bool)
+ domainProxyServer.Start(ready)
+ <-ready
+ signals := make(chan os.Signal, 1)
+ signal.Notify(signals, syscall.SIGINT, syscall.SIGTERM)
+ <-signals
+ domainProxyServer.Stop()
+}
diff --git a/deploy/openshift-ci.sh b/deploy/openshift-ci.sh
index b06f1af65..5390692b7 100755
--- a/deploy/openshift-ci.sh
+++ b/deploy/openshift-ci.sh
@@ -11,6 +11,8 @@ echo "jvm build service jvm cache image:"
echo ${JVM_BUILD_SERVICE_CACHE_IMAGE}
echo "jvm build service jvm reqprocessor image:"
echo ${JVM_BUILD_SERVICE_REQPROCESSOR_IMAGE}
+echo "jvm build service jvm domain proxy image:"
+echo ${JVM_BUILD_SERVICE_DOMAIN_PROXY_IMAGE}
function waitFor() {
endTime=$(( $(date +%s) + 600 ))
@@ -50,3 +52,4 @@ oc set env deployment/hacbs-jvm-operator -n jvm-build-service \
JVM_BUILD_SERVICE_IMAGE=${JVM_BUILD_SERVICE_IMAGE} \
JVM_BUILD_SERVICE_CACHE_IMAGE=${JVM_BUILD_SERVICE_CACHE_IMAGE} \
JVM_BUILD_SERVICE_REQPROCESSOR_IMAGE=${JVM_BUILD_SERVICE_REQPROCESSOR_IMAGE}
+JVM_BUILD_SERVICE_DOMAIN_PROXY_IMAGE=${JVM_BUILD_SERVICE_DOMAIN_PROXY_IMAGE}
diff --git a/deploy/tasks/README.md b/deploy/tasks/README.md
index c7505677c..0ca092865 100644
--- a/deploy/tasks/README.md
+++ b/deploy/tasks/README.md
@@ -9,14 +9,21 @@ Note that while `pre-build.yaml` and `maven-deployment.yaml` are created by our
It should be base-lined to the most recent definition from Konflux build-definitions repository and then the following changes should be applied to that:
+### Domain Proxy
+Adds Domain Proxy to the build:
+* https://github.com/redhat-appstudio/jvm-build-service/blob/main/deploy/tasks/buildah-oci-ta.yaml#L134-L189
+* https://github.com/redhat-appstudio/jvm-build-service/blob/main/deploy/tasks/buildah-oci-ta.yaml#L288-L313
+* https://github.com/redhat-appstudio/jvm-build-service/blob/main/deploy/tasks/buildah-oci-ta.yaml#L327
+* https://github.com/redhat-appstudio/jvm-build-service/blob/main/deploy/tasks/buildah-oci-ta.yaml#L621-L657
+
### Indy Sidecar
Adds Indy configuration to the build:
-* https://github.com/redhat-appstudio/jvm-build-service/blob/main/deploy/tasks/buildah-oci-ta.yaml#L134-L137
-* https://github.com/redhat-appstudio/jvm-build-service/blob/main/deploy/tasks/buildah-oci-ta.yaml#L170-L180
-* https://github.com/redhat-appstudio/jvm-build-service/blob/main/deploy/tasks/buildah-oci-ta.yaml#L737-L757
+* https://github.com/redhat-appstudio/jvm-build-service/blob/main/deploy/tasks/buildah-oci-ta.yaml#L190-L193
+* https://github.com/redhat-appstudio/jvm-build-service/blob/main/deploy/tasks/buildah-oci-ta.yaml#L226-L236
+* https://github.com/redhat-appstudio/jvm-build-service/blob/main/deploy/tasks/buildah-oci-ta.yaml#L864-L884
### Trusted CA
Adds the trusted ca to the build:
-* https://github.com/redhat-appstudio/jvm-build-service/blob/main/deploy/tasks/buildah-oci-ta.yaml#L504-L508
+* https://github.com/redhat-appstudio/jvm-build-service/blob/main/deploy/tasks/buildah-oci-ta.yaml#L591-L595
diff --git a/deploy/tasks/buildah-oci-ta.yaml b/deploy/tasks/buildah-oci-ta.yaml
index 95c014ab5..9d49aa951 100644
--- a/deploy/tasks/buildah-oci-ta.yaml
+++ b/deploy/tasks/buildah-oci-ta.yaml
@@ -7,7 +7,7 @@ metadata:
tekton.dev/pipelines.minVersion: 0.12.1
tekton.dev/tags: image-build, konflux
labels:
- app.kubernetes.io/version: "0.2"
+ app.kubernetes.io/version: 0.2.1
build.appstudio.redhat.com/build_type: docker
spec:
description: |-
@@ -131,6 +131,62 @@ spec:
description: The name of the ConfigMap to read CA bundle data from.
type: string
default: trusted-ca
+ - name: BUILD_IMAGE
+ description: The buildah image to use.
+ type: string
+ default: quay.io/konflux-ci/buildah-task:latest@sha256:b2d6c32d1e05e91920cd4475b2761d58bb7ee11ad5dff3ecb59831c7572b4d0c
+ - name: ENABLE_DOMAIN_PROXY
+ description: Determines if domain proxy will be used when hermetic mode is enabled.
+ type: string
+ default: "false"
+ - name: DOMAIN_PROXY_BYTE_BUFFER_SIZE
+ description: The byte buffer size to use for the domain proxy.
+ type: string
+ default: 32768
+ - name: DOMAIN_PROXY_DOMAIN_SOCKET
+ description: The domain socket to use for the domain proxy.
+ type: string
+ default: /tmp/domain-socket.sock
+ - name: DOMAIN_PROXY_CONNECTION_TIMEOUT
+ description: The connection timeout in milliseconds to use for the domain proxy.
+ type: string
+ default: 10000
+ - name: DOMAIN_PROXY_IDLE_TIMEOUT
+ description: The idle timeout in milliseconds to use for the domain proxy.
+ type: string
+ default: 30000
+ - name: DOMAIN_PROXY_TARGET_WHITELIST
+ description: Comma separated whitelist of target hosts for the domain proxy.
+ type: string
+ default: ""
+ - name: DOMAIN_PROXY_ENABLE_INTERNAL_PROXY
+ description: Determines if internal proxy will be used when domain proxy is enabled.
+ type: string
+ default: "false"
+ - name: DOMAIN_PROXY_INTERNAL_PROXY_HOST
+ description: Host of proxy used internally by the domain proxy.
+ type: string
+ default: ""
+ - name: DOMAIN_PROXY_INTERNAL_PROXY_PORT
+ description: Port of proxy used internally by the domain proxy.
+ type: string
+ default: ""
+ - name: DOMAIN_PROXY_INTERNAL_PROXY_USER
+ description: User of proxy used internally by the domain proxy.
+ type: string
+ default: ""
+ - name: DOMAIN_PROXY_INTERNAL_PROXY_PASSWORD
+ description: Password of proxy used internally by the domain proxy.
+ type: string
+ default: ""
+ - name: DOMAIN_PROXY_INTERNAL_NON_PROXY_HOSTS
+ description: Comma separated list of target hosts that bypass the proxy used internally by the domain proxy.
+ type: string
+ default: ""
+ - name: DOMAIN_PROXY_HTTP_PORT
+ description: The HTTP port to use for the domain proxy.
+ type: string
+ default: 8080
- name: ENABLE_INDY_PROXY
type: string
description: Enable the indy generic proxy (true/false)
@@ -167,16 +223,16 @@ spec:
secretName: $(params.ENTITLEMENT_SECRET)
- name: shared
emptyDir: {}
- - name: indy-generic-proxy-stage-secrets
+ - name: indy-generic-proxy-secrets
secret:
optional: true
secretName: indy-generic-proxy-secrets
- - name: indy-generic-proxy-stage-config
+ - name: indy-generic-proxy-config
configMap:
items:
- key: application.yaml
path: application.yaml
- name: indy-generic-proxy-stage-config
+ name: indy-generic-proxy-config
optional: true
- name: trusted-ca
configMap:
@@ -229,6 +285,32 @@ spec:
value: $(params.YUM_REPOS_D_SRC)
- name: YUM_REPOS_D_TARGET
value: $(params.YUM_REPOS_D_TARGET)
+ - name: ENABLE_DOMAIN_PROXY
+ value: $(params.ENABLE_DOMAIN_PROXY)
+ - name: DOMAIN_PROXY_BYTE_BUFFER_SIZE
+ value: $(params.DOMAIN_PROXY_BYTE_BUFFER_SIZE)
+ - name: DOMAIN_PROXY_DOMAIN_SOCKET
+ value: $(params.DOMAIN_PROXY_DOMAIN_SOCKET)
+ - name: DOMAIN_PROXY_CONNECTION_TIMEOUT
+ value: $(params.DOMAIN_PROXY_CONNECTION_TIMEOUT)
+ - name: DOMAIN_PROXY_IDLE_TIMEOUT
+ value: $(params.DOMAIN_PROXY_IDLE_TIMEOUT)
+ - name: DOMAIN_PROXY_TARGET_WHITELIST
+ value: $(params.DOMAIN_PROXY_TARGET_WHITELIST)
+ - name: DOMAIN_PROXY_ENABLE_INTERNAL_PROXY
+ value: $(params.DOMAIN_PROXY_ENABLE_INTERNAL_PROXY)
+ - name: DOMAIN_PROXY_INTERNAL_PROXY_HOST
+ value: $(params.DOMAIN_PROXY_INTERNAL_PROXY_HOST)
+ - name: DOMAIN_PROXY_INTERNAL_PROXY_PORT
+ value: $(params.DOMAIN_PROXY_INTERNAL_PROXY_PORT)
+ - name: DOMAIN_PROXY_INTERNAL_PROXY_USER
+ value: $(params.DOMAIN_PROXY_INTERNAL_PROXY_USER)
+ - name: DOMAIN_PROXY_INTERNAL_PROXY_PASSWORD
+ value: $(params.DOMAIN_PROXY_INTERNAL_PROXY_PASSWORD)
+ - name: DOMAIN_PROXY_INTERNAL_NON_PROXY_HOSTS
+ value: $(params.DOMAIN_PROXY_INTERNAL_NON_PROXY_HOSTS)
+ - name: DOMAIN_PROXY_HTTP_PORT
+ value: $(params.DOMAIN_PROXY_HTTP_PORT)
volumeMounts:
- mountPath: /shared
name: shared
@@ -236,13 +318,13 @@ spec:
name: workdir
steps:
- name: use-trusted-artifact
- image: quay.io/redhat-appstudio/build-trusted-artifacts:latest@sha256:e0e457b6af10e44ff6b90208a9e69adc863a865e1c062c4cb84bf3846037d74d
+ image: quay.io/redhat-appstudio/build-trusted-artifacts:latest@sha256:52f1391e6f1c472fd10bb838f64fae2ed3320c636f536014978a5ddbdfc6b3af
args:
- use
- $(params.SOURCE_ARTIFACT)=/var/workdir/source
- $(params.CACHI2_ARTIFACT)=/var/workdir/cachi2
- name: build
- image: quay.io/konflux-ci/buildah-task:latest@sha256:b2d6c32d1e05e91920cd4475b2761d58bb7ee11ad5dff3ecb59831c7572b4d0c
+ image: $(params.BUILD_IMAGE)
args:
- --build-args
- $(params.BUILD_ARGS[*])
@@ -268,7 +350,7 @@ spec:
value: $(params.DOCKERFILE)
script: |
#!/bin/bash
- set -e
+ set -euo pipefail
ca_bundle=/mnt/trusted-ca/ca-bundle.crt
if [ -f "$ca_bundle" ]; then
echo "INFO: Using mounted CA bundle: $ca_bundle"
@@ -305,7 +387,7 @@ spec:
dockerfile_copy=$(mktemp --tmpdir "$(basename "$dockerfile_path").XXXXXX")
cp "$dockerfile_path" "$dockerfile_copy"
- if [ -n "$JVM_BUILD_WORKSPACE_ARTIFACT_CACHE_PORT_80_TCP_ADDR" ] && grep -q '^\s*RUN \(./\)\?mvn' "$dockerfile_copy"; then
+ if [ -n "${JVM_BUILD_WORKSPACE_ARTIFACT_CACHE_PORT_80_TCP_ADDR-}" ] && grep -q '^\s*RUN \(./\)\?mvn' "$dockerfile_copy"; then
sed -i -e "s|^\s*RUN \(\(./\)\?mvn\)\(.*\)|RUN echo \"mirror.defaulthttp://$JVM_BUILD_WORKSPACE_ARTIFACT_CACHE_PORT_80_TCP_ADDR/v1/cache/default/0/*\" > /tmp/settings.yaml; \1 -s /tmp/settings.yaml \3|g" "$dockerfile_copy"
touch /var/lib/containers/java
fi
@@ -363,9 +445,9 @@ spec:
BUILD_ARG_FLAGS+=("--build-arg=$build_arg")
done
+ dockerfile-json "${BUILD_ARG_FLAGS[@]}" "$dockerfile_copy" >/shared/parsed_dockerfile.json
BASE_IMAGES=$(
- dockerfile-json "${BUILD_ARG_FLAGS[@]}" "$dockerfile_copy" |
- jq -r '.Stages[] | select(.From | .Stage or .Scratch | not) | .BaseName | select(test("^oci-archive:") | not)'
+ jq -r '.Stages[] | select(.From | .Stage or .Scratch | not) | .BaseName | select(test("^oci-archive:") | not)' /shared/parsed_dockerfile.json
)
BUILDAH_ARGS=()
@@ -456,23 +538,28 @@ spec:
# shared emptydir volume to "/etc/pki/entitlement" to prevent certificates from being included in the produced
# container.
- REGISTERED="false"
if [ -e /activation-key/org ]; then
cp -r --preserve=mode "$ACTIVATION_KEY_PATH" /tmp/activation-key
- mkdir /shared/rhsm-tmp
- VOLUME_MOUNTS+=(--volume /tmp/activation-key:/activation-key -v /shared/rhsm-tmp:/etc/pki/entitlement:Z)
+ mkdir -p /shared/rhsm/etc/pki/entitlement
+ mkdir -p /shared/rhsm/etc/pki/consumer
+
+ VOLUME_MOUNTS+=(-v /tmp/activation-key:/activation-key
+ -v /shared/rhsm/etc/pki/entitlement:/etc/pki/entitlement:Z
+ -v /shared/rhsm/etc/pki/consumer:/etc/pki/consumer:Z)
echo "Adding activation key to the build"
- if ! grep subscription-manager "$dockerfile_path" | grep -q register; then
+ if ! grep -E "^[^#]*subscription-manager.[^#]*register" "$dockerfile_path"; then
# user is not running registration in the Containerfile: pre-register.
echo "Pre-registering with subscription manager."
subscription-manager register --org "$(cat /tmp/activation-key/org)" --activationkey "$(cat /tmp/activation-key/activationkey)"
- REGISTERED=$?
- # copy generated certificates to /shared/rhsm-tmp
- cp /etc/pki/entitlement/*.pem /shared/rhsm-tmp
+ trap 'subscription-manager unregister || true' EXIT
+
+ # copy generated certificates to /shared volume
+ cp /etc/pki/entitlement/*.pem /shared/rhsm/etc/pki/entitlement
+ cp /etc/pki/consumer/*.pem /shared/rhsm/etc/pki/consumer
# and then mount get /etc/rhsm/ca/redhat-uep.pem into /run/secrets/rhsm/ca
- VOLUME_MOUNTS+=(--volume /etc/rhsm/ca/redhat-uep.pem:/run/secrets/rhsm/ca/redhat-uep.pem)
+ VOLUME_MOUNTS+=(--volume /etc/rhsm/ca/redhat-uep.pem:/etc/rhsm/ca/redhat-uep.pem:Z)
fi
# was: if [ -d "$ACTIVATION_KEY_PATH" ]; then
@@ -482,7 +569,7 @@ spec:
echo "Adding the entitlement to the build"
fi
- if [ -n "$ADDITIONAL_VOLUME_MOUNTS" ]; then
+ if [ -n "${ADDITIONAL_VOLUME_MOUNTS-}" ]; then
# ADDITIONAL_VOLUME_MOUNTS allows to specify more volumes for the build.
# This is primarily used in instrumented builds for SAST scanning and analyzing.
# Instrumented builds use this step as their base and add some other tools.
@@ -528,7 +615,46 @@ spec:
command="$buildah_cmd"
fi
- unshare -Uf "${UNSHARE_ARGS[@]}" --keep-caps -r --map-users 1,1,65536 --map-groups 1,1,65536 -w "${SOURCE_CODE_DIR}/$CONTEXT" -- sh -c "$command"
+ # disable host subcription manager integration
+ find /usr/share/rhel/secrets -type l -exec unlink {} \;
+
+ if [ "${HERMETIC}" == "true" ] && [ "${ENABLE_DOMAIN_PROXY}" == "true" ]; then
+ echo "Build will be executed with domain proxy"
+ /app/domain-proxy-server &
+ server_pid=$!
+
+ # Without expansion
+ cat >> /app/build-script.sh << 'EOF'
+ #!/bin/sh
+ /app/domain-proxy-client &
+ client_pid=$!
+ EOF
+
+ # With expansion
+ cat >> /app/build-script.sh << EOF
+ $command
+ EOF
+
+ # Without expansion
+ cat >> /app/build-script.sh << 'EOF'
+ set +e
+ kill $client_pid
+ wait $client_pid
+ set -e
+ EOF
+
+ cat /app/build-script.sh
+ chmod +x /app/build-script.sh
+
+ unshare -Uf "${UNSHARE_ARGS[@]}" --keep-caps -r --map-users 1,1,65536 --map-groups 1,1,65536 -w "${SOURCE_CODE_DIR}/$CONTEXT" -- /app/build-script.sh
+
+ set +e
+ kill $server_pid
+ wait $server_pid
+ set -e
+ else
+ unshare -Uf "${UNSHARE_ARGS[@]}" --keep-caps -r --map-users 1,1,65536 --map-groups 1,1,65536 -w "${SOURCE_CODE_DIR}/$CONTEXT" -- sh -c "$command"
+ fi
container=$(buildah from --pull-never "$IMAGE")
buildah mount $container | tee /shared/container_path
@@ -543,27 +669,85 @@ spec:
touch /shared/base_images_digests
for image in $BASE_IMAGES; do
- buildah images --format '{{ .Name }}:{{ .Tag }}@{{ .Digest }}' --filter reference="$image" >>/shared/base_images_digests
+ base_image_digest=$(buildah images --format '{{ .Name }}:{{ .Tag }}@{{ .Digest }}' --filter reference="$image")
+ # In some cases, there might be BASE_IMAGES, but not any associated digest. This happens
+ # if buildah did not use that particular image during build because it was skipped
+ if [ -n "$base_image_digest" ]; then
+ echo "$image $base_image_digest" >>/shared/base_images_digests
+ fi
done
+ computeResources:
+ limits:
+ cpu: "4"
+ memory: 8Gi
+ requests:
+ cpu: "1"
+ memory: 2Gi
+ securityContext:
+ capabilities:
+ add:
+ - SETFCAP
+ - name: push
+ image: quay.io/konflux-ci/buildah-task:latest@sha256:b2d6c32d1e05e91920cd4475b2761d58bb7ee11ad5dff3ecb59831c7572b4d0c
+ workingDir: /var/workdir
+ volumeMounts:
+ - mountPath: /var/lib/containers
+ name: varlibcontainers
+ - mountPath: /mnt/trusted-ca
+ name: trusted-ca
+ readOnly: true
+ script: |
+ #!/bin/bash
+ set -e
+
+ ca_bundle=/mnt/trusted-ca/ca-bundle.crt
+ if [ -f "$ca_bundle" ]; then
+ echo "INFO: Using mounted CA bundle: $ca_bundle"
+ cp -vf $ca_bundle /etc/pki/ca-trust/source/anchors
+ update-ca-trust
+ fi
- # Needed to generate base images SBOM
- echo "$BASE_IMAGES" >/shared/base_images_from_dockerfile
+ retries=5
+ # Push to a unique tag based on the TaskRun name to avoid race conditions
+ echo "Pushing to ${IMAGE%:*}:${TASKRUN_NAME}"
+ if ! buildah push \
+ --retry "$retries" \
+ --tls-verify="$TLSVERIFY" \
+ "$IMAGE" \
+ "docker://${IMAGE%:*}:$(context.taskRun.name)"; then
+ echo "Failed to push sbom image to ${IMAGE%:*}:$(context.taskRun.name) after ${retries} tries"
+ exit 1
+ fi
- # unregister pod from subscription manager
- if [ "$REGISTERED" == "0" ]; then
- subscription-manager unregister
+ # Push to a tag based on the git revision
+ echo "Pushing to ${IMAGE}"
+ if ! buildah push \
+ --retry "$retries" \
+ --tls-verify="$TLSVERIFY" \
+ --digestfile "/var/workdir/image-digest" "$IMAGE" \
+ "docker://$IMAGE"; then
+ echo "Failed to push sbom image to $IMAGE after ${retries} tries"
+ exit 1
fi
+
+ cat "/var/workdir"/image-digest | tee $(results.IMAGE_DIGEST.path)
+ echo -n "$IMAGE" | tee $(results.IMAGE_URL.path)
+ {
+ echo -n "${IMAGE}@"
+ cat "/var/workdir/image-digest"
+ } >"$(results.IMAGE_REF.path)"
computeResources:
limits:
cpu: "4"
- memory: 8Gi
+ memory: 4Gi
requests:
cpu: "1"
- memory: 2Gi
+ memory: 1Gi
securityContext:
capabilities:
add:
- SETFCAP
+ runAsUser: 0
- name: sbom-syft-generate
image: registry.access.redhat.com/rh-syft-tech-preview/syft-rhel9:1.4.1@sha256:34d7065427085a31dc4949bd283c001b91794d427e1e4cdf1b21ea4faf9fee3f
workingDir: /var/workdir/source
@@ -608,7 +792,7 @@ spec:
securityContext:
runAsUser: 0
- name: prepare-sboms
- image: quay.io/redhat-appstudio/sbom-utility-scripts-image@sha256:53a3041dff341b7fd1765b9cc2c324625d19e804b2eaff10a6e6d9dcdbde3a91
+ image: quay.io/redhat-appstudio/sbom-utility-scripts-image@sha256:e1347023ef1e83d52813c26384f551e3a03e482539d17a647955603e7ea6b579
workingDir: /var/workdir
script: |
echo "Merging contents of sbom-source.json and sbom-image.json into sbom-cyclonedx.json"
@@ -620,14 +804,23 @@ spec:
mv sbom-temp.json sbom-cyclonedx.json
fi
- echo "Creating sbom-purl.json"
- python3 /scripts/create_purl_sbom.py
-
echo "Adding base images data to sbom-cyclonedx.json"
python3 /scripts/base_images_sbom_script.py \
--sbom=sbom-cyclonedx.json \
- --base-images-from-dockerfile=/shared/base_images_from_dockerfile \
+ --parsed-dockerfile=/shared/parsed_dockerfile.json \
--base-images-digests=/shared/base_images_digests
+
+ echo "Adding image reference to sbom"
+ IMAGE_URL="$(cat "$(results.IMAGE_URL.path)")"
+ IMAGE_DIGEST="$(cat "$(results.IMAGE_DIGEST.path)")"
+
+ python3 /scripts/add_image_reference.py \
+ --image-url "$IMAGE_URL" \
+ --image-digest "$IMAGE_DIGEST" \
+ --input-file sbom-cyclonedx.json \
+ --output-file /tmp/sbom-cyclonedx.tmp.json
+
+ mv /tmp/sbom-cyclonedx.tmp.json sbom-cyclonedx.json
computeResources:
limits:
cpu: 200m
@@ -637,18 +830,15 @@ spec:
memory: 256Mi
securityContext:
runAsUser: 0
- - name: inject-sbom-and-push
- image: quay.io/konflux-ci/buildah-task:latest@sha256:b2d6c32d1e05e91920cd4475b2761d58bb7ee11ad5dff3ecb59831c7572b4d0c
+ - name: upload-sbom
+ image: quay.io/konflux-ci/appstudio-utils:48c311af02858e2422d6229600e9959e496ddef1@sha256:91ddd999271f65d8ec8487b10f3dd378f81aa894e11b9af4d10639fd52bba7e8
workingDir: /var/workdir
volumeMounts:
- - mountPath: /var/lib/containers
- name: varlibcontainers
- mountPath: /mnt/trusted-ca
name: trusted-ca
readOnly: true
script: |
#!/bin/bash
- set -e
ca_bundle=/mnt/trusted-ca/ca-bundle.crt
if [ -f "$ca_bundle" ]; then
@@ -657,76 +847,13 @@ spec:
update-ca-trust
fi
- base_image_name=$(buildah inspect --format '{{ index .ImageAnnotations "org.opencontainers.image.base.name"}}' $IMAGE | cut -f1 -d'@')
- base_image_digest=$(buildah inspect --format '{{ index .ImageAnnotations "org.opencontainers.image.base.digest"}}' $IMAGE)
- container=$(buildah from --pull-never $IMAGE)
- buildah copy $container sbom-cyclonedx.json sbom-purl.json /root/buildinfo/content_manifests/
- buildah config -a org.opencontainers.image.base.name=${base_image_name} -a org.opencontainers.image.base.digest=${base_image_digest} $container
-
- BUILDAH_ARGS=()
- if [ "${SQUASH}" == "true" ]; then
- BUILDAH_ARGS+=("--squash")
- fi
-
- buildah commit "${BUILDAH_ARGS[@]}" $container $IMAGE
-
- status=-1
- max_run=5
- sleep_sec=10
- for run in $(seq 1 $max_run); do
- status=0
- [ "$run" -gt 1 ] && sleep $sleep_sec
- echo "Pushing sbom image to registry"
- buildah push \
- --tls-verify=$TLSVERIFY \
- --digestfile /var/workdir/image-digest $IMAGE \
- docker://$IMAGE && break || status=$?
- done
- if [ "$status" -ne 0 ]; then
- echo "Failed to push sbom image to registry after ${max_run} tries"
- exit 1
- fi
-
- cat "/var/workdir"/image-digest | tee $(results.IMAGE_DIGEST.path)
- echo -n "$IMAGE" | tee $(results.IMAGE_URL.path)
- {
- echo -n "${IMAGE}@"
- cat "/var/workdir/image-digest"
- } >"$(results.IMAGE_REF.path)"
+ cosign attach sbom --sbom sbom-cyclonedx.json --type cyclonedx "$(cat "$(results.IMAGE_REF.path)")"
# Remove tag from IMAGE while allowing registry to contain a port number.
sbom_repo="${IMAGE%:*}"
sbom_digest="$(sha256sum sbom-cyclonedx.json | cut -d' ' -f1)"
# The SBOM_BLOB_URL is created by `cosign attach sbom`.
echo -n "${sbom_repo}@sha256:${sbom_digest}" | tee "$(results.SBOM_BLOB_URL.path)"
- computeResources:
- limits:
- cpu: "4"
- memory: 4Gi
- requests:
- cpu: "1"
- memory: 1Gi
- securityContext:
- capabilities:
- add:
- - SETFCAP
- runAsUser: 0
- - name: upload-sbom
- image: quay.io/konflux-ci/appstudio-utils:ab6b0b8e40e440158e7288c73aff1cf83a2cc8a9@sha256:24179f0efd06c65d16868c2d7eb82573cce8e43533de6cea14fec3b7446e0b14
- workingDir: /var/workdir
- volumeMounts:
- - mountPath: /mnt/trusted-ca
- name: trusted-ca
- readOnly: true
- script: |
- ca_bundle=/mnt/trusted-ca/ca-bundle.crt
- if [ -f "$ca_bundle" ]; then
- echo "INFO: Using mounted CA bundle: $ca_bundle"
- cp -vf $ca_bundle /etc/pki/ca-trust/source/anchors
- update-ca-trust
- fi
-
- cosign attach sbom --sbom sbom-cyclonedx.json --type cyclonedx "$(cat "$(results.IMAGE_REF.path)")"
computeResources:
limits:
cpu: 200m
@@ -738,10 +865,10 @@ spec:
- name: indy-generic-proxy
image: quay.io/factory2/indy-generic-proxy-service:latest-stage-mpplus
volumeMounts:
- - name: indy-generic-proxy-stage-secrets
+ - name: indy-generic-proxy-secrets
readOnly: true
mountPath: /mnt/secrets-generic-proxy
- - name: indy-generic-proxy-stage-config
+ - name: indy-generic-proxy-config
readOnly: true
mountPath: /deployment/config
computeResources:
diff --git a/docs/development.adoc b/docs/development.adoc
index d2df5c0ae..317085a87 100644
--- a/docs/development.adoc
+++ b/docs/development.adoc
@@ -153,6 +153,7 @@ export QUAY_USERNAME=
export JVM_BUILD_SERVICE_IMAGE=
export JVM_BUILD_SERVICE_CACHE_IMAGE=
export JVM_BUILD_SERVICE_REQPROCESSOR_IMAGE=
+export JVM_BUILD_SERVICE_DOMAIN_PROXY_IMAGE=
./deploy/openshift-ci.sh
make openshift-e2e
----
diff --git a/go.mod b/go.mod
index 8ddb225b1..1b64c7223 100644
--- a/go.mod
+++ b/go.mod
@@ -29,7 +29,10 @@ require (
sigs.k8s.io/yaml v1.4.0
)
-require github.com/swist/go-k8s-portforward v0.2.1
+require (
+ github.com/elazarl/goproxy v0.0.0-20190421051319-9d40249d3c2f
+ github.com/swist/go-k8s-portforward v0.2.1
+)
require (
contrib.go.opencensus.io/exporter/ocagent v0.7.1-0.20200907061046-05415f1de66d // indirect
@@ -42,6 +45,7 @@ require (
github.com/cespare/xxhash/v2 v2.3.0 // indirect
github.com/creack/pty v1.1.21 // indirect
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
+ github.com/elazarl/goproxy/ext v0.0.0-20240909085733-6741dbfc16a1 // indirect
github.com/emicklei/go-restful/v3 v3.11.0 // indirect
github.com/evanphx/json-patch v5.6.0+incompatible // indirect
github.com/evanphx/json-patch/v5 v5.9.0 // indirect
@@ -69,7 +73,7 @@ require (
github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de // indirect
github.com/mailru/easyjson v0.7.7 // indirect
github.com/moby/spdystream v0.2.0 // indirect
- github.com/moby/term v0.0.0-20221205130635-1aeaba878587 // indirect
+ github.com/moby/term v0.5.0 // indirect
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
github.com/modern-go/reflect2 v1.0.2 // indirect
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
@@ -96,9 +100,9 @@ require (
golang.org/x/tools v0.24.0 // indirect
gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect
google.golang.org/api v0.172.0 // indirect
- google.golang.org/genproto/googleapis/api v0.0.0-20240311173647-c811ad7063a7 // indirect
+ google.golang.org/genproto/googleapis/api v0.0.0-20240318140521-94a12d6c2237 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20240401170217-c3f982113cda // indirect
- google.golang.org/grpc v1.63.2 // indirect
+ google.golang.org/grpc v1.64.1 // indirect
google.golang.org/protobuf v1.34.2 // indirect
gopkg.in/inf.v0 v0.9.1 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
diff --git a/go.sum b/go.sum
index f2c8deb6b..7eb057cdd 100644
--- a/go.sum
+++ b/go.sum
@@ -95,6 +95,10 @@ github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE=
github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc=
+github.com/elazarl/goproxy v0.0.0-20190421051319-9d40249d3c2f h1:8GDPb0tCY8LQ+OJ3dbHb5sA6YZWXFORQYZx5sdsTlMs=
+github.com/elazarl/goproxy v0.0.0-20190421051319-9d40249d3c2f/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc=
+github.com/elazarl/goproxy/ext v0.0.0-20240909085733-6741dbfc16a1 h1:XgXwWTQglBILwCpfm8I9xUjPXKOi1EzRzuYzyczU5Z0=
+github.com/elazarl/goproxy/ext v0.0.0-20240909085733-6741dbfc16a1/go.mod h1:gNh8nYJoAm43RfaxurUnxr+N1PwuFV3ZMl/efxlIlY8=
github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g=
@@ -308,8 +312,8 @@ github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
github.com/moby/spdystream v0.2.0 h1:cjW1zVyyoiM0T7b6UoySUFqzXMoqRckQtXwGPiBhOM8=
github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c=
-github.com/moby/term v0.0.0-20221205130635-1aeaba878587 h1:HfkjXDfhgVaN5rmueG8cL8KKeFNecRCXFhaJ2qZ5SKA=
-github.com/moby/term v0.0.0-20221205130635-1aeaba878587/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y=
+github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0=
+github.com/moby/term v0.5.0/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y=
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
@@ -391,6 +395,7 @@ github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoG
github.com/prometheus/statsd_exporter v0.22.7 h1:7Pji/i2GuhK6Lu7DHrtTkFmNBCudCPT1pX2CziuyQR0=
github.com/prometheus/statsd_exporter v0.22.7/go.mod h1:N/TevpjkIh9ccs6nuzY3jQn9dFqnUakOjnEuMPJJJnI=
github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
+github.com/rogpeppe/go-charset v0.0.0-20180617210344-2471d30d28b4/go.mod h1:qgYeAmZ5ZIpBWTGllZSQnw97Dj+woV0toclVaRGI8pc=
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M=
github.com/rogpeppe/go-internal v1.11.0/go.mod h1:ddIwULY96R17DhadqLgMfk9H9tvdUzkipdSkR5nkCZA=
@@ -755,8 +760,8 @@ google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6D
google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20201019141844-1ed22bb0c154/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
-google.golang.org/genproto/googleapis/api v0.0.0-20240311173647-c811ad7063a7 h1:oqta3O3AnlWbmIE3bFnWbu4bRxZjfbWCp0cKSuZh01E=
-google.golang.org/genproto/googleapis/api v0.0.0-20240311173647-c811ad7063a7/go.mod h1:VQW3tUculP/D4B+xVCo+VgSq8As6wA9ZjHl//pmk+6s=
+google.golang.org/genproto/googleapis/api v0.0.0-20240318140521-94a12d6c2237 h1:RFiFrvy37/mpSpdySBDrUdipW/dHwsRwh3J3+A9VgT4=
+google.golang.org/genproto/googleapis/api v0.0.0-20240318140521-94a12d6c2237/go.mod h1:Z5Iiy3jtmioajWHDGFk7CeugTyHtPvMHA4UTmUkyalE=
google.golang.org/genproto/googleapis/rpc v0.0.0-20240401170217-c3f982113cda h1:LI5DOvAxUPMv/50agcLLoo+AdWc1irS9Rzz4vPuD1V4=
google.golang.org/genproto/googleapis/rpc v0.0.0-20240401170217-c3f982113cda/go.mod h1:WtryC6hu0hhx87FDGxWCDptyssuo68sk10vYjF+T9fY=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
@@ -772,8 +777,8 @@ google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3Iji
google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc=
-google.golang.org/grpc v1.63.2 h1:MUeiw1B2maTVZthpU5xvASfTh3LDbxHd6IJ6QQVU+xM=
-google.golang.org/grpc v1.63.2/go.mod h1:WAX/8DgncnokcFUldAxq7GeB5DXHDbMF+lLvDomNkRA=
+google.golang.org/grpc v1.64.1 h1:LKtvyfbX3UGVPFcGqJ9ItpVWW6oN/2XqTxfAnwRRXiA=
+google.golang.org/grpc v1.64.1/go.mod h1:hiQF4LFZelK2WKaP6W0L92zGHtiQdZxk8CrSdvyjeP0=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
diff --git a/java-components/build-request-processor/src/main/java/com/redhat/hacbs/container/build/preprocessor/AbstractPreprocessor.java b/java-components/build-request-processor/src/main/java/com/redhat/hacbs/container/build/preprocessor/AbstractPreprocessor.java
index 9a3076f9a..4c181422d 100644
--- a/java-components/build-request-processor/src/main/java/com/redhat/hacbs/container/build/preprocessor/AbstractPreprocessor.java
+++ b/java-components/build-request-processor/src/main/java/com/redhat/hacbs/container/build/preprocessor/AbstractPreprocessor.java
@@ -86,6 +86,11 @@ public void run() {
set -o pipefail
set -e
+ export http_proxy=http://localhost:8080
+ export https_proxy=${http_proxy}
+ export HTTP_PROXY=${http_proxy}
+ export HTTPS_PROXY=${http_proxy}
+ export ANT_OPTS="-Dhttp.proxyHost=localhost -Dhttp.proxyPort=8080"
#fix this when we no longer need to run as root
export HOME=${HOME:=/root}
# Custom base working directory.
@@ -187,6 +192,10 @@ private String getContainerFile() {
WORKDIR /var/workdir
ARG PROXY_URL=""
ENV PROXY_URL=$PROXY_URL
+ ENV http_proxy=http://localhost:8080
+ ENV https_proxy=${http_proxy}
+ ENV HTTP_PROXY=${http_proxy}
+ ENV HTTPS_PROXY=${http_proxy}
COPY .jbs/run-build.sh /var/workdir
COPY . /var/workdir/workspace/source/
RUN /var/workdir/run-build.sh
@@ -293,26 +302,11 @@ private String getMavenSetup() {
- indy-http
-
- false
+ domain-proxy
+ true
http
- domain-proxy
- 80
-
- ${BUILD_ID}+tracking
- ${ACCESS_TOKEN}
- ${PROXY_URL}|localhost
-
-
- indy-https
- false
- https
- domain-proxy
- 80
- ${BUILD_ID}+tracking
- ${ACCESS_TOKEN}
- ${PROXY_URL}|localhost
+ localhost
+ 8080
diff --git a/java-components/domain-proxy/.dockerignore b/java-components/domain-proxy/.dockerignore
deleted file mode 100644
index 84607e970..000000000
--- a/java-components/domain-proxy/.dockerignore
+++ /dev/null
@@ -1,5 +0,0 @@
-*
-!*/target/*-runner
-!*/target/*-runner.jar
-!*/target/lib/*
-!*/target/quarkus-app/*
\ No newline at end of file
diff --git a/java-components/domain-proxy/Dockerfile b/java-components/domain-proxy/Dockerfile
deleted file mode 100644
index 9f5659422..000000000
--- a/java-components/domain-proxy/Dockerfile
+++ /dev/null
@@ -1,4 +0,0 @@
-FROM quay.io/redhat-appstudio/buildah:v1.35.4@sha256:3d3575bb7d0df64abcf1f22f06e82101a945d03317db1f3caac12814f796d01c
-RUN dnf install -y iproute
-COPY client/target/domain-proxy-client-999-SNAPSHOT-runner /app/domain-proxy-client-runner
-COPY server/target/domain-proxy-server-999-SNAPSHOT-runner /app/domain-proxy-server-runner
diff --git a/java-components/domain-proxy/client/pom.xml b/java-components/domain-proxy/client/pom.xml
deleted file mode 100644
index cf1471c9a..000000000
--- a/java-components/domain-proxy/client/pom.xml
+++ /dev/null
@@ -1,55 +0,0 @@
-
-
- 4.0.0
-
- io.github.redhat-appstudio.jvmbuild
- domain-proxy-parent
- 999-SNAPSHOT
-
- domain-proxy-client
-
-
-
- io.quarkus
- quarkus-arc
-
-
- io.github.redhat-appstudio.jvmbuild
- domain-proxy-common
-
-
-
-
-
-
- ${quarkus.platform.group-id}
- quarkus-maven-plugin
- true
-
-
-
- build
- generate-code
-
-
-
-
-
-
-
-
-
- native
-
-
- native
-
-
-
- false
- true
-
-
-
-
diff --git a/java-components/domain-proxy/client/src/main/java/com/redhat/hacbs/domainproxy/client/DomainProxyClient.java b/java-components/domain-proxy/client/src/main/java/com/redhat/hacbs/domainproxy/client/DomainProxyClient.java
deleted file mode 100644
index 196cf44ae..000000000
--- a/java-components/domain-proxy/client/src/main/java/com/redhat/hacbs/domainproxy/client/DomainProxyClient.java
+++ /dev/null
@@ -1,58 +0,0 @@
-package com.redhat.hacbs.domainproxy.client;
-
-import static com.redhat.hacbs.domainproxy.common.CommonIOUtil.createChannelToSocketWriter;
-import static com.redhat.hacbs.domainproxy.common.CommonIOUtil.createSocketToChannelWriter;
-
-import java.io.IOException;
-import java.net.ServerSocket;
-import java.net.Socket;
-import java.net.UnixDomainSocketAddress;
-import java.nio.channels.SocketChannel;
-
-import jakarta.annotation.PostConstruct;
-import jakarta.inject.Inject;
-import jakarta.inject.Singleton;
-
-import org.eclipse.microprofile.config.inject.ConfigProperty;
-
-import io.quarkus.logging.Log;
-import io.quarkus.runtime.Quarkus;
-import io.quarkus.runtime.Startup;
-
-@Startup
-@Singleton
-public class DomainProxyClient {
-
- @Inject
- @ConfigProperty(name = "client-domain-socket")
- String domainSocket;
-
- @Inject
- @ConfigProperty(name = "client-http-port")
- int clientHttpPort;
-
- @Inject
- @ConfigProperty(name = "byte-buffer-size")
- int byteBufferSize;
-
- @PostConstruct
- public void start() {
- Log.info("Starting domain proxy client...");
- new Thread(() -> {
- try (final ServerSocket serverSocket = new ServerSocket(clientHttpPort)) {
- while (true) {
- final Socket socket = serverSocket.accept();
- final UnixDomainSocketAddress address = UnixDomainSocketAddress.of(domainSocket);
- final SocketChannel channel = SocketChannel.open(address);
- // Write from socket to channel
- Thread.startVirtualThread(createSocketToChannelWriter(byteBufferSize, socket, channel));
- // Write from channel to socket
- Thread.startVirtualThread(createChannelToSocketWriter(byteBufferSize, channel, socket));
- }
- } catch (final IOException e) {
- Log.errorf(e, "Error initialising domain proxy client");
- }
- Quarkus.asyncExit();
- }).start();
- }
-}
diff --git a/java-components/domain-proxy/client/src/main/resources/application.properties b/java-components/domain-proxy/client/src/main/resources/application.properties
deleted file mode 100644
index b5617e7ce..000000000
--- a/java-components/domain-proxy/client/src/main/resources/application.properties
+++ /dev/null
@@ -1,3 +0,0 @@
-client-domain-socket=${DOMAIN_SOCKET:/tmp/domainserver}
-client-http-port=8080
-byte-buffer-size=${BYTE_BUFFER_SIZE:1024}
diff --git a/java-components/domain-proxy/common/pom.xml b/java-components/domain-proxy/common/pom.xml
deleted file mode 100644
index 2190f3a52..000000000
--- a/java-components/domain-proxy/common/pom.xml
+++ /dev/null
@@ -1,18 +0,0 @@
-
-
- 4.0.0
-
- io.github.redhat-appstudio.jvmbuild
- domain-proxy-parent
- 999-SNAPSHOT
-
- domain-proxy-common
-
-
-
- org.jboss.logging
- jboss-logging
-
-
-
diff --git a/java-components/domain-proxy/common/src/main/java/com/redhat/hacbs/domainproxy/common/CommonIOUtil.java b/java-components/domain-proxy/common/src/main/java/com/redhat/hacbs/domainproxy/common/CommonIOUtil.java
deleted file mode 100644
index 2e03c7be0..000000000
--- a/java-components/domain-proxy/common/src/main/java/com/redhat/hacbs/domainproxy/common/CommonIOUtil.java
+++ /dev/null
@@ -1,84 +0,0 @@
-package com.redhat.hacbs.domainproxy.common;
-
-import java.io.IOException;
-import java.net.Socket;
-import java.net.SocketException;
-import java.nio.ByteBuffer;
-import java.nio.channels.AsynchronousCloseException;
-import java.nio.channels.SocketChannel;
-
-import org.jboss.logging.Logger;
-
-public final class CommonIOUtil {
-
- private static final Logger LOG = Logger.getLogger(CommonIOUtil.class);
-
- public static Runnable createSocketToChannelWriter(final int byteBufferSize, final Socket socket,
- final SocketChannel channel) {
- // Write from socket to channel
- return () -> {
- int r;
- final byte[] buf = new byte[byteBufferSize];
- int bytesWritten = 0;
- LOG.info("Writing from socket to channel");
- try {
- while ((r = socket.getInputStream().read(buf)) > 0) {
- channel.write(ByteBuffer.wrap(buf, 0, r));
- bytesWritten += r;
- }
- } catch (final SocketException ignore) {
- LOG.info("Socket closed");
- } catch (final IOException e) {
- LOG.errorf(e, "Error writing from socket to channel");
- } finally {
- try {
- channel.close();
- } catch (final Exception e) {
- LOG.errorf(e, "Error closing channel");
- }
- try {
- socket.close();
- } catch (final IOException e) {
- LOG.errorf(e, "Error closing socket");
- }
- }
- LOG.infof("Wrote %d bytes from socket to channel", bytesWritten);
- };
- }
-
- public static Runnable createChannelToSocketWriter(final int byteBufferSize, final SocketChannel channel,
- final Socket socket) {
- // Write from channel to socket
- return () -> {
- int r;
- final ByteBuffer buf = ByteBuffer.allocate(byteBufferSize);
- buf.clear();
- int bytesWritten = 0;
- LOG.info("Writing from channel to socket");
- try {
- while ((r = channel.read(buf)) > 0) {
- buf.flip();
- socket.getOutputStream().write(buf.array(), buf.arrayOffset(), buf.remaining());
- buf.clear();
- bytesWritten += r;
- }
- } catch (final AsynchronousCloseException ignore) {
- LOG.info("Channel closed");
- } catch (final Exception e) {
- LOG.errorf(e, "Error writing from channel to socket");
- } finally {
- try {
- channel.close();
- } catch (final IOException e) {
- LOG.errorf(e, "Error closing channel");
- }
- try {
- socket.close();
- } catch (final IOException e) {
- LOG.errorf(e, "Error closing socket");
- }
- }
- LOG.infof("Wrote %d bytes from channel to socket", bytesWritten);
- };
- }
-}
diff --git a/java-components/domain-proxy/pom.xml b/java-components/domain-proxy/pom.xml
deleted file mode 100644
index 8c158cbf2..000000000
--- a/java-components/domain-proxy/pom.xml
+++ /dev/null
@@ -1,46 +0,0 @@
-
-
- 4.0.0
-
- io.github.redhat-appstudio.jvmbuild
- jvm-build-service-parent
- 999-SNAPSHOT
-
- domain-proxy-parent
- pom
-
- true
-
-
-
- server
- client
- common
-
-
-
-
-
- io.github.redhat-appstudio.jvmbuild
- domain-proxy-common
- ${project.version}
-
-
-
-
-
-
- native
-
-
- native
-
-
-
- false
- true
-
-
-
-
diff --git a/java-components/domain-proxy/server/pom.xml b/java-components/domain-proxy/server/pom.xml
deleted file mode 100644
index 955bd1ef9..000000000
--- a/java-components/domain-proxy/server/pom.xml
+++ /dev/null
@@ -1,85 +0,0 @@
-
-
- 4.0.0
-
- io.github.redhat-appstudio.jvmbuild
- domain-proxy-parent
- 999-SNAPSHOT
-
- domain-proxy-server
-
-
-
- io.quarkus
- quarkus-vertx
-
-
- io.quarkus
- quarkus-rest-client
-
-
- io.quarkus
- quarkus-junit5
- test
-
-
- io.rest-assured
- rest-assured
- test
-
-
- org.wiremock
- wiremock
- test
-
-
- io.github.redhat-appstudio.jvmbuild
- domain-proxy-common
-
-
-
-
-
-
- ${quarkus.platform.group-id}
- quarkus-maven-plugin
- true
-
-
-
- build
- generate-code
- generate-code-tests
-
-
-
-
-
- maven-surefire-plugin
- ${surefire-plugin.version}
-
-
- org.jboss.logmanager.LogManager
- ${maven.home}
-
-
-
-
-
-
-
-
- native
-
-
- native
-
-
-
- false
- true
-
-
-
-
diff --git a/java-components/domain-proxy/server/src/main/java/com/redhat/hacbs/domainproxy/DomainProxyServer.java b/java-components/domain-proxy/server/src/main/java/com/redhat/hacbs/domainproxy/DomainProxyServer.java
deleted file mode 100644
index 40ccbbad7..000000000
--- a/java-components/domain-proxy/server/src/main/java/com/redhat/hacbs/domainproxy/DomainProxyServer.java
+++ /dev/null
@@ -1,70 +0,0 @@
-package com.redhat.hacbs.domainproxy;
-
-import static com.redhat.hacbs.domainproxy.common.CommonIOUtil.createChannelToSocketWriter;
-import static com.redhat.hacbs.domainproxy.common.CommonIOUtil.createSocketToChannelWriter;
-
-import java.io.IOException;
-import java.net.Socket;
-import java.net.StandardProtocolFamily;
-import java.net.UnixDomainSocketAddress;
-import java.nio.channels.ServerSocketChannel;
-import java.nio.channels.SocketChannel;
-import java.nio.file.Files;
-import java.nio.file.Path;
-
-import jakarta.annotation.PostConstruct;
-import jakarta.inject.Inject;
-import jakarta.inject.Singleton;
-
-import org.eclipse.microprofile.config.inject.ConfigProperty;
-
-import io.quarkus.logging.Log;
-import io.quarkus.runtime.Quarkus;
-import io.quarkus.runtime.Startup;
-
-@Startup
-@Singleton
-public class DomainProxyServer {
-
- static final String LOCALHOST = "localhost";
-
- @Inject
- @ConfigProperty(name = "server-domain-socket")
- String domainSocket;
-
- @Inject
- @ConfigProperty(name = "server-http-port")
- int httpServerPort;
-
- @Inject
- @ConfigProperty(name = "byte-buffer-size")
- int byteBufferSize;
-
- @PostConstruct
- public void start() {
- new Thread(() -> {
- Runtime.getRuntime().addShutdownHook(new Thread(() -> {
- try {
- Files.delete(Path.of(domainSocket));
- } catch (final IOException e) {
- Log.errorf(e, "Error deleting domain socket");
- }
- }));
- try (final ServerSocketChannel serverChannel = ServerSocketChannel.open(StandardProtocolFamily.UNIX)) {
- final UnixDomainSocketAddress address = UnixDomainSocketAddress.of(domainSocket);
- serverChannel.bind(address);
- while (true) {
- final SocketChannel channel = serverChannel.accept();
- final Socket socket = new Socket(LOCALHOST, httpServerPort);
- // Write from socket to channel
- Thread.startVirtualThread(createSocketToChannelWriter(byteBufferSize, socket, channel));
- // Write from channel to socket
- Thread.startVirtualThread(createChannelToSocketWriter(byteBufferSize, channel, socket));
- }
- } catch (final IOException e) {
- Log.errorf(e, "Error initialising domain proxy server");
- }
- Quarkus.asyncExit();
- }).start();
- }
-}
diff --git a/java-components/domain-proxy/server/src/main/java/com/redhat/hacbs/domainproxy/ExternalProxyVerticle.java b/java-components/domain-proxy/server/src/main/java/com/redhat/hacbs/domainproxy/ExternalProxyVerticle.java
deleted file mode 100644
index 2e5cc43e9..000000000
--- a/java-components/domain-proxy/server/src/main/java/com/redhat/hacbs/domainproxy/ExternalProxyVerticle.java
+++ /dev/null
@@ -1,137 +0,0 @@
-package com.redhat.hacbs.domainproxy;
-
-import java.util.Set;
-
-import jakarta.enterprise.context.ApplicationScoped;
-import jakarta.inject.Inject;
-
-import org.eclipse.microprofile.config.inject.ConfigProperty;
-
-import io.netty.handler.codec.http.HttpResponseStatus;
-import io.quarkus.logging.Log;
-import io.vertx.core.AbstractVerticle;
-import io.vertx.core.Vertx;
-import io.vertx.core.buffer.Buffer;
-import io.vertx.core.http.HttpMethod;
-import io.vertx.core.http.HttpServer;
-import io.vertx.core.http.HttpServerRequest;
-import io.vertx.core.net.NetClient;
-import io.vertx.core.net.NetClientOptions;
-import io.vertx.core.net.NetSocket;
-import io.vertx.ext.web.client.HttpResponse;
-import io.vertx.ext.web.client.WebClient;
-import io.vertx.ext.web.client.WebClientOptions;
-
-@ApplicationScoped
-public class ExternalProxyVerticle extends AbstractVerticle {
-
- static final int HTTPS_PORT = 443;
-
- @Inject
- @ConfigProperty(name = "server-http-port")
- int httpServerPort;
-
- @Inject
- @ConfigProperty(name = "proxy-target-whitelist")
- Set proxyTargetWhitelist;
-
- private final WebClient webClient;
- private final NetClient netClient;
- private final HttpServer httpServer;
-
- public ExternalProxyVerticle(final Vertx vertx) {
- webClient = WebClient.create(vertx, new WebClientOptions());
- netClient = vertx.createNetClient(new NetClientOptions());
- httpServer = vertx.createHttpServer();
- }
-
- @Override
- public void start() {
- Log.info("Starting domain proxy server...");
- httpServer.requestHandler(request -> {
- if (request.method() == HttpMethod.GET) {
- handleGetRequest(request);
- } else if (request.method() == HttpMethod.CONNECT) {
- handleConnectRequest(request);
- }
- });
- httpServer.listen(httpServerPort, result -> {
- if (result.succeeded()) {
- Log.infof("Server is now listening on port %d", httpServerPort);
- } else {
- Log.errorf(result.cause(), "Failed to bind server");
- }
- });
- }
-
- private void handleGetRequest(final HttpServerRequest request) {
- Log.info("Handling HTTP GET Request");
- if (isTargetWhitelisted(request.authority().host(), request)) {
- webClient.getAbs(request.uri()).send(asyncResult -> {
- if (asyncResult.succeeded()) {
- final HttpResponse response = asyncResult.result();
- if (response.statusCode() != HttpResponseStatus.OK.code()) {
- Log.errorf("Response code: %d, message: %s, body: %s", response.statusCode(), response.statusMessage(),
- response.bodyAsString());
- }
- request.response()
- .setStatusCode(response.statusCode())
- .headers().addAll(response.headers());
- request.response().end(response.body());
- } else {
- Log.errorf(asyncResult.cause(), "Failed to get response");
- request.response()
- .setStatusCode(HttpResponseStatus.BAD_GATEWAY.code())
- .setStatusMessage(HttpResponseStatus.BAD_GATEWAY.reasonPhrase())
- .end("The server received an invalid response from the upstream server.");
- }
- });
- }
- }
-
- private void handleConnectRequest(final HttpServerRequest request) {
- Log.info("Handling HTTPS CONNECT request"); //
- final String targetHost = request.authority().host();
- if (isTargetWhitelisted(targetHost, request)) {
- int targetPort = request.authority().port();
- if (targetPort == -1) {
- targetPort = HTTPS_PORT;
- }
- netClient.connect(targetPort, targetHost, targetConnect -> {
- if (targetConnect.succeeded()) {
- final NetSocket targetSocket = targetConnect.result();
- request.toNetSocket().onComplete(sourceConnect -> {
- if (sourceConnect.succeeded()) {
- final NetSocket sourceSocket = sourceConnect.result();
- sourceSocket.handler(targetSocket::write);
- targetSocket.handler(sourceSocket::write);
- sourceSocket.closeHandler(v -> targetSocket.close());
- targetSocket.closeHandler(v -> sourceSocket.close());
- } else {
- Log.errorf(sourceConnect.cause(), "Failed to connect to source");
- }
- });
- } else {
- Log.errorf(targetConnect.cause(), "Failed to connect to target");
- request.response()
- .setStatusCode(HttpResponseStatus.BAD_GATEWAY.code())
- .setStatusMessage(HttpResponseStatus.BAD_GATEWAY.reasonPhrase())
- .end("The server received an invalid response from the upstream server.");
- }
- });
- }
- }
-
- private boolean isTargetWhitelisted(final String targetHost, final HttpServerRequest request) {
- Log.infof("Target %s", targetHost);
- if (!proxyTargetWhitelist.contains(targetHost)) {
- Log.error("Target is not in whitelist");
- request.response()
- .setStatusCode(HttpResponseStatus.NOT_FOUND.code())
- .setStatusMessage(HttpResponseStatus.NOT_FOUND.reasonPhrase())
- .end("The requested resource was not found.");
- return false;
- }
- return true;
- }
-}
diff --git a/java-components/domain-proxy/server/src/main/java/com/redhat/hacbs/domainproxy/VerticleDeployer.java b/java-components/domain-proxy/server/src/main/java/com/redhat/hacbs/domainproxy/VerticleDeployer.java
deleted file mode 100644
index ecd7e0b07..000000000
--- a/java-components/domain-proxy/server/src/main/java/com/redhat/hacbs/domainproxy/VerticleDeployer.java
+++ /dev/null
@@ -1,15 +0,0 @@
-package com.redhat.hacbs.domainproxy;
-
-import jakarta.enterprise.context.ApplicationScoped;
-import jakarta.enterprise.event.Observes;
-
-import io.quarkus.runtime.StartupEvent;
-import io.vertx.mutiny.core.Vertx;
-
-@ApplicationScoped
-public class VerticleDeployer {
-
- public void init(final @Observes StartupEvent e, final Vertx vertx, final ExternalProxyVerticle verticle) {
- vertx.deployVerticle(verticle).await().indefinitely();
- }
-}
diff --git a/java-components/domain-proxy/server/src/main/resources/application.properties b/java-components/domain-proxy/server/src/main/resources/application.properties
deleted file mode 100644
index a7465e5c8..000000000
--- a/java-components/domain-proxy/server/src/main/resources/application.properties
+++ /dev/null
@@ -1,4 +0,0 @@
-server-domain-socket=${DOMAIN_SOCKET:/tmp/domainserver}
-server-http-port=2000
-byte-buffer-size=${BYTE_BUFFER_SIZE:1024}
-proxy-target-whitelist=${PROXY_TARGET_WHITELIST:repo.maven.apache.org,repository.jboss.org,packages.confluent.io,jitpack.io,repo.gradle.org,plugins.gradle.org}
diff --git a/java-components/domain-proxy/server/src/test/java/com/redhat/hacbs/domainproxy/ExternalProxyVerticleTest.java b/java-components/domain-proxy/server/src/test/java/com/redhat/hacbs/domainproxy/ExternalProxyVerticleTest.java
deleted file mode 100644
index bef55d66c..000000000
--- a/java-components/domain-proxy/server/src/test/java/com/redhat/hacbs/domainproxy/ExternalProxyVerticleTest.java
+++ /dev/null
@@ -1,113 +0,0 @@
-package com.redhat.hacbs.domainproxy;
-
-import static com.github.tomakehurst.wiremock.client.WireMock.aResponse;
-import static com.github.tomakehurst.wiremock.client.WireMock.get;
-import static com.github.tomakehurst.wiremock.client.WireMock.urlEqualTo;
-import static com.github.tomakehurst.wiremock.core.WireMockConfiguration.wireMockConfig;
-import static com.redhat.hacbs.domainproxy.DomainProxyServer.LOCALHOST;
-import static io.restassured.RestAssured.given;
-import static org.junit.jupiter.api.Assertions.assertEquals;
-
-import java.io.IOException;
-import java.nio.charset.StandardCharsets;
-import java.nio.file.Files;
-import java.nio.file.Path;
-
-import jakarta.inject.Inject;
-
-import org.apache.commons.codec.digest.DigestUtils;
-import org.eclipse.microprofile.config.inject.ConfigProperty;
-import org.junit.jupiter.api.AfterAll;
-import org.junit.jupiter.api.BeforeAll;
-import org.junit.jupiter.api.Test;
-
-import com.github.tomakehurst.wiremock.WireMockServer;
-
-import io.netty.handler.codec.http.HttpResponseStatus;
-import io.quarkus.test.junit.QuarkusTest;
-import io.quarkus.test.junit.TestProfile;
-import io.restassured.specification.RequestSpecification;
-
-@QuarkusTest
-@TestProfile(ExternalProxyVerticleTestProfile.class)
-class ExternalProxyVerticleTest {
-
- static final String MD5_HASH = "ea3ca57f8f99d1d210d1b438c9841440";
-
- private static WireMockServer wireMockServer;
-
- @Inject
- @ConfigProperty(name = "server-http-port")
- int httpServerPort;
-
- @BeforeAll
- public static void before() throws IOException {
- wireMockServer = new WireMockServer(wireMockConfig().port(2002).httpsPort(2003));
- wireMockServer.start();
- wireMockServer.stubFor(
- get(urlEqualTo("/com/foo/bar/1.0/bar-1.0.pom"))
- .willReturn(aResponse()
- .withHeader("Content-Type", "text/xml")
- .withBody(
- Files.readString(Path.of("src/test/resources/bar-1.0.pom"), StandardCharsets.UTF_8))));
- }
-
- @AfterAll
- public static void after() {
- if (wireMockServer != null) {
- wireMockServer.stop();
- }
- }
-
- private RequestSpecification httpRequest() {
- return given().proxy(LOCALHOST, httpServerPort).port(wireMockServer.port());
- }
-
- private RequestSpecification httpsRequest() {
- return given().proxy(LOCALHOST, httpServerPort).port(wireMockServer.httpsPort()).relaxedHTTPSValidation();
- }
-
- @Test
- public void testDownloadDependencyHTTP() {
- final byte[] jar = httpRequest().get("http://" + LOCALHOST + "/com/foo/bar/1.0/bar-1.0.pom")
- .asByteArray();
- assertEquals(MD5_HASH, DigestUtils.md5Hex(jar));
- }
-
- @Test
- public void testDownloadDependencyHTTPS() {
- final byte[] jar = httpsRequest().get("https://" + LOCALHOST + "/com/foo/bar/1.0/bar-1.0.pom")
- .asByteArray();
- assertEquals(MD5_HASH, DigestUtils.md5Hex(jar));
- }
-
- @Test
- public void testMissingDependencyHTTP() {
- httpRequest().get("http://" + LOCALHOST + "/com/foo/bar/2.0/bar-2.0.pom")
- .then()
- .statusCode(HttpResponseStatus.NOT_FOUND.code());
- }
-
- @Test
- public void testMissingDependencyHTTPS() {
- httpsRequest().get("https://" + LOCALHOST + "/com/foo/bar/2.0/bar-2.0.pom")
- .then()
- .statusCode(HttpResponseStatus.NOT_FOUND.code());
- }
-
- @Test
- public void testNotWhitelistedHTTP() {
- httpRequest().get(
- "http://repo1.maven.org/maven2/org/apache/maven/plugins/maven-jar-plugin/3.4.1/maven-jar-plugin-3.4.1.jar")
- .then()
- .statusCode(HttpResponseStatus.NOT_FOUND.code());
- }
-
- @Test
- public void testNotWhitelistedHTTPS() {
- httpsRequest().get(
- "https://repo1.maven.org/maven2/org/apache/maven/plugins/maven-jar-plugin/3.4.1/maven-jar-plugin-3.4.1.jar")
- .then()
- .statusCode(HttpResponseStatus.NOT_FOUND.code());
- }
-}
diff --git a/java-components/domain-proxy/server/src/test/java/com/redhat/hacbs/domainproxy/ExternalProxyVerticleTestProfile.java b/java-components/domain-proxy/server/src/test/java/com/redhat/hacbs/domainproxy/ExternalProxyVerticleTestProfile.java
deleted file mode 100644
index 8a4ee86a0..000000000
--- a/java-components/domain-proxy/server/src/test/java/com/redhat/hacbs/domainproxy/ExternalProxyVerticleTestProfile.java
+++ /dev/null
@@ -1,15 +0,0 @@
-package com.redhat.hacbs.domainproxy;
-
-import static com.redhat.hacbs.domainproxy.DomainProxyServer.LOCALHOST;
-
-import java.util.Map;
-
-import io.quarkus.test.junit.QuarkusTestProfile;
-
-public class ExternalProxyVerticleTestProfile implements QuarkusTestProfile {
-
- @Override
- public Map getConfigOverrides() {
- return Map.of("server-http-port", "2001", "proxy-target-whitelist", LOCALHOST);
- }
-}
diff --git a/java-components/pom.xml b/java-components/pom.xml
index 446a6ae01..9bf1c1f9d 100644
--- a/java-components/pom.xml
+++ b/java-components/pom.xml
@@ -90,7 +90,6 @@
common-tools
common-images
common-maven
- domain-proxy
diff --git a/openshift-with-appstudio-test/e2e/basictests.go b/openshift-with-appstudio-test/e2e/basictests.go
index bffdda446..496d16842 100644
--- a/openshift-with-appstudio-test/e2e/basictests.go
+++ b/openshift-with-appstudio-test/e2e/basictests.go
@@ -629,7 +629,7 @@ func runDbTests(path string, testSet string, ta *testArgs) {
pf.Stop()
ta.t.Run(fmt.Sprintf("buildrecipe is deleted with dependencybuild for %s", s), func(t *testing.T) {
- defer GenerateStatusReport(ta.ns, jvmClient, kubeClient, tektonClient)
+ // can't generate status report here because we delete dependency build
err = wait.PollUntilContextTimeout(context.TODO(), ta.interval, time.Hour, true, func(ctx context.Context) (done bool, err error) {
err = jvmClient.JvmbuildserviceV1alpha1().DependencyBuilds(ta.ns).Delete(context.TODO(), db.Name, metav1.DeleteOptions{})
if err != nil {
@@ -743,7 +743,7 @@ func getMavenRepoDetails(ta *testArgs) (*MavenRepoDetails, *portforward.PortForw
mavenRepository := os.Getenv("MAVEN_REPOSITORY")
mavenRepository = strings.ReplaceAll(mavenRepository, "http://jvm-build-maven-repo."+ta.ns+".svc.cluster.local", fmt.Sprintf("http://127.0.0.1:%d", localPort))
mavenPassword := os.Getenv("MAVEN_PASSWORD")
- fmt.Printf("Retrieved maven repository %#v\n", mavenRepository)
+ ta.t.Logf("retrieved maven repository %#v\n", mavenRepository)
return &MavenRepoDetails{Username: mavenUsername, Url: mavenRepository, Password: mavenPassword}, &pf
}
diff --git a/pkg/apis/jvmbuildservice/v1alpha1/systemconfig_types.go b/pkg/apis/jvmbuildservice/v1alpha1/systemconfig_types.go
index 8f6a33fd7..cd1b42096 100644
--- a/pkg/apis/jvmbuildservice/v1alpha1/systemconfig_types.go
+++ b/pkg/apis/jvmbuildservice/v1alpha1/systemconfig_types.go
@@ -48,6 +48,6 @@ type SystemConfigList struct {
const (
KonfluxGitDefinition = "https://raw.githubusercontent.com/konflux-ci/build-definitions/refs/heads/main/task/git-clone/0.1/git-clone.yaml"
KonfluxPreBuildDefinitions = "https://raw.githubusercontent.com/redhat-appstudio/jvm-build-service/main/deploy/tasks/pre-build.yaml"
- KonfluxBuildDefinitions = "https://raw.githubusercontent.com/konflux-ci/build-definitions/refs/heads/main/task/buildah-oci-ta/0.2/buildah-oci-ta.yaml"
+ KonfluxBuildDefinitions = "https://raw.githubusercontent.com/tecarter94/jvm-build-service/domain-proxy/deploy/tasks/buildah-oci-ta.yaml" // TODO Update branch to main
KonfluxMavenDeployDefinitions = "https://raw.githubusercontent.com/redhat-appstudio/jvm-build-service/main/deploy/tasks/maven-deployment.yaml"
)
diff --git a/pkg/domainproxy/client/client.go b/pkg/domainproxy/client/client.go
new file mode 100644
index 000000000..2e4ae1fde
--- /dev/null
+++ b/pkg/domainproxy/client/client.go
@@ -0,0 +1,110 @@
+package client
+
+import (
+ "fmt"
+ . "github.com/redhat-appstudio/jvm-build-service/pkg/domainproxy/common"
+ "net"
+ "time"
+)
+
+const (
+ Localhost = "localhost"
+ HttpPortKey = "DOMAIN_PROXY_HTTP_PORT"
+ DefaultHttpPort = 8080
+ HttpToDomainSocket = "HTTP <-> Domain Socket"
+)
+
+var logger = NewLogger("Domain Proxy Client")
+var common = NewCommon(logger)
+
+type DomainProxyClient struct {
+ sharedParams *SharedParams
+ httpPort int
+}
+
+func NewDomainProxyClient() *DomainProxyClient {
+ return &DomainProxyClient{
+ sharedParams: common.NewSharedParams(),
+ httpPort: getHttpPort(),
+ }
+}
+
+func (dpc *DomainProxyClient) Start(ready chan<- bool) {
+ sharedParams := dpc.sharedParams
+ logger.Println("Starting domain proxy client...")
+ var err error
+ sharedParams.Listener, err = net.Listen(TCP, fmt.Sprintf("%s:%d", Localhost, dpc.httpPort))
+ if err != nil {
+ logger.Fatalf("Failed to start HTTP server: %v", err)
+ }
+ go dpc.startClient(ready)
+}
+
+func (dpc *DomainProxyClient) startClient(ready chan<- bool) {
+ sharedParams := dpc.sharedParams
+ logger.Printf("HTTP server listening on port %d", dpc.httpPort)
+ ready <- true
+ for {
+ select {
+ case <-sharedParams.RunningContext.Done():
+ return
+ default:
+ if serverConnection, err := sharedParams.Listener.Accept(); err != nil {
+ select {
+ case <-sharedParams.RunningContext.Done():
+ return
+ default:
+ logger.Printf("Failed to accept server connection: %v", err)
+ }
+ } else {
+ go dpc.handleConnectionRequest(serverConnection)
+ }
+ }
+ }
+}
+
+func (dpc *DomainProxyClient) handleConnectionRequest(serverConnection net.Conn) {
+ sharedParams := dpc.sharedParams
+ if err := serverConnection.SetDeadline(time.Now().Add(sharedParams.IdleTimeout)); err != nil {
+ common.HandleSetDeadlineError(serverConnection, err)
+ return
+ }
+ connectionNo := sharedParams.HttpConnectionCounter.Add(1)
+ logger.Printf("Handling %s Connection %d", HttpToDomainSocket, connectionNo)
+ startTime := time.Now()
+ domainConnection, err := net.DialTimeout(UNIX, sharedParams.DomainSocket, sharedParams.ConnectionTimeout)
+ if err != nil {
+ logger.Printf("Failed to connect to domain socket: %v", err)
+ if err = serverConnection.Close(); err != nil {
+ common.HandleConnectionCloseError(err)
+ }
+ return
+ }
+ if err := domainConnection.SetDeadline(time.Now().Add(sharedParams.IdleTimeout)); err != nil {
+ common.HandleSetDeadlineError(domainConnection, err)
+ if err = serverConnection.Close(); err != nil {
+ common.HandleConnectionCloseError(err)
+ }
+ return
+ }
+ // Initiate transfer between server and domain
+ go func() {
+ common.BiDirectionalTransfer(sharedParams.RunningContext, serverConnection, domainConnection, sharedParams.ByteBufferSize, HttpToDomainSocket, connectionNo)
+ logger.Printf("%s Connection %d ended after %d ms", HttpToDomainSocket, connectionNo, time.Since(startTime).Milliseconds())
+ }()
+}
+
+func (dpc *DomainProxyClient) Stop() {
+ sharedParams := dpc.sharedParams
+ logger.Println("Shutting down domain proxy client...")
+ sharedParams.InitiateShutdown()
+ if sharedParams.Listener != nil {
+ if err := sharedParams.Listener.Close(); err != nil {
+ common.HandleListenerCloseError(err)
+ }
+ }
+}
+
+func getHttpPort() int {
+ return common.GetIntEnvVariable(HttpPortKey, DefaultHttpPort)
+}
diff --git a/pkg/domainproxy/common/common.go b/pkg/domainproxy/common/common.go
new file mode 100644
index 000000000..8f6a38024
--- /dev/null
+++ b/pkg/domainproxy/common/common.go
@@ -0,0 +1,215 @@
+package common
+
+import (
+ "context"
+ "errors"
+ "io"
+ "log"
+ "net"
+ "os"
+ "strconv"
+ "strings"
+ "sync/atomic"
+ "time"
+)
+
+const (
+ ByteBufferSizeKey = "DOMAIN_PROXY_BYTE_BUFFER_SIZE"
+ DefaultByteBufferSize = 32768
+ DomainSocketKey = "DOMAIN_PROXY_DOMAIN_SOCKET"
+ DefaultDomainSocket = "/tmp/domain-socket.sock"
+ ConnectionTimeoutKey = "DOMAIN_PROXY_CONNECTION_TIMEOUT"
+ DefaultConnectionTimeout = 10000 * time.Millisecond
+ IdleTimeoutKey = "DOMAIN_PROXY_IDLE_TIMEOUT"
+ DefaultIdleTimeout = 30000 * time.Millisecond
+ TCP = "tcp"
+ UNIX = "unix"
+)
+
+type Common struct {
+ logger *log.Logger
+}
+
+type SharedParams struct {
+ ByteBufferSize int
+ DomainSocket string
+ ConnectionTimeout time.Duration
+ IdleTimeout time.Duration
+ HttpConnectionCounter atomic.Uint64
+ Listener net.Listener
+ RunningContext context.Context
+ InitiateShutdown context.CancelFunc
+}
+
+func NewCommon(logger *log.Logger) *Common {
+ return &Common{
+ logger: logger,
+ }
+}
+
+func NewLogger(appName string) *log.Logger {
+ return log.New(os.Stdout, appName+" ", log.LstdFlags|log.Lshortfile)
+}
+
+func (c *Common) NewSharedParams() *SharedParams {
+ runningContext, initiateShutdown := context.WithCancel(context.Background())
+ return &SharedParams{
+ ByteBufferSize: c.getByteBufferSize(),
+ DomainSocket: c.getDomainSocket(),
+ ConnectionTimeout: c.getConnectionTimeout(),
+ IdleTimeout: c.getIdleTimeout(),
+ RunningContext: runningContext,
+ InitiateShutdown: initiateShutdown,
+ }
+}
+
+func (c *Common) BiDirectionalTransfer(runningContext context.Context, leftConnection, rightConnection net.Conn, byteBufferSize int, connectionType string, connectionNo uint64) {
+ defer c.CloseConnection(leftConnection, rightConnection, connectionType, connectionNo)
+ transferContext, terminateTransfer := context.WithCancel(runningContext)
+ go c.Transfer(transferContext, terminateTransfer, leftConnection, rightConnection, byteBufferSize, connectionType, connectionNo)
+ go c.Transfer(transferContext, terminateTransfer, rightConnection, leftConnection, byteBufferSize, connectionType, connectionNo)
+ <-transferContext.Done()
+}
+
+func (c *Common) Transfer(transferContext context.Context, terminateTransfer context.CancelFunc, sourceConnection, targetConnection net.Conn, bufferSize int, connectionType string, connectionNo uint64) {
+ defer terminateTransfer()
+ buf := make([]byte, bufferSize)
+ for {
+ select {
+ case <-transferContext.Done():
+ return
+ default:
+ if n, err := io.CopyBuffer(sourceConnection, targetConnection, buf); err != nil {
+ c.handleConnectionError(err, connectionType, connectionNo)
+ return
+ } else if n > 0 {
+ c.logger.Printf("%d bytes transferred for %s connection %d", n, connectionType, connectionNo)
+ } else {
+ // Nothing more to transfer
+ return
+ }
+ }
+ }
+}
+
+func (c *Common) HandleSetDeadlineError(connection net.Conn, err error) {
+ c.logger.Printf("Failed to set deadline: %v", err)
+ if err = connection.Close(); err != nil {
+ c.HandleConnectionCloseError(err)
+ }
+}
+
+func (c *Common) HandleConnectionCloseError(err error) {
+ c.logger.Printf("Failed to close connection: %v", err)
+}
+
+func (c *Common) HandleListenerCloseError(err error) {
+ c.logger.Printf("Failed to close listener: %v", err)
+}
+
+func (c *Common) handleConnectionError(err error, connectionType string, connectionNo uint64) {
+ var netErr net.Error
+ if !errors.Is(err, net.ErrClosed) { // We don't care if connection has been closed, because this is expected
+ if errors.As(err, &netErr) && netErr.Timeout() {
+ c.logger.Printf("%s connection %d timed out", connectionType, connectionNo)
+ } else if err != io.EOF {
+ c.logger.Printf("Failed to transfer data using %s connection %d: %v", connectionType, connectionNo, err)
+ }
+ }
+}
+
+func (c *Common) CloseConnection(leftConnection, rightConnection net.Conn, connectionType string, connectionNo uint64) {
+ if err := leftConnection.Close(); err != nil {
+ c.HandleConnectionCloseError(err)
+ }
+ if err := rightConnection.Close(); err != nil {
+ c.HandleConnectionCloseError(err)
+ }
+ c.logger.Printf("%s connection %d closed", connectionType, connectionNo)
+}
+
+func (c *Common) GetEnvVariable(key, defaultValue string) string {
+ value := os.Getenv(key)
+ if value == "" {
+ c.logger.Printf("Environment variable %s is not set, using default value: %s", key, defaultValue)
+ return defaultValue
+ }
+ return value
+}
+
+func (c *Common) GetIntEnvVariable(key string, defaultValue int) int {
+ valueStr := os.Getenv(key)
+ if valueStr == "" {
+ c.logger.Printf("Environment variable %s is not set, using default value: %d", key, defaultValue)
+ return defaultValue
+ }
+ value, err := strconv.Atoi(valueStr)
+ if err != nil {
+ c.logger.Printf("Invalid environment variable %s: %v, using default value: %d", key, err, defaultValue)
+ return defaultValue
+ }
+ return value
+}
+
+func (c *Common) GetCsvEnvVariable(key, defaultValue string) map[string]bool {
+ valuesStr := os.Getenv(key)
+ if valuesStr == "" {
+ c.logger.Printf("Environment variable %s is not set, using default value: %s", key, defaultValue)
+ return c.parseCsvToMap(defaultValue)
+ }
+ return c.parseCsvToMap(valuesStr)
+}
+
+func (c *Common) GetMillisecondsEnvVariable(key string, defaultValue time.Duration) time.Duration {
+ valueStr := os.Getenv(key)
+ if valueStr == "" {
+ c.logger.Printf("Environment variable %s is not set, using default value: %d", key, defaultValue.Milliseconds())
+ return defaultValue
+ }
+ value, err := strconv.Atoi(valueStr)
+ if err != nil {
+ c.logger.Printf("Invalid environment variable %s: %v, using default value: %d", key, err, defaultValue.Milliseconds())
+ return defaultValue
+ }
+ return time.Duration(value) * time.Millisecond
+}
+
+func (c *Common) parseCsvToMap(csvString string) map[string]bool {
+ valuesStr := strings.Split(csvString, ",")
+ values := make(map[string]bool)
+ for _, value := range valuesStr {
+ trimmedValue := strings.TrimSpace(value)
+ values[trimmedValue] = true
+ }
+ return values
+}
+
+func (c *Common) GetBoolEnvVariable(key string, defaultValue bool) bool {
+ valueStr := os.Getenv(key)
+ if valueStr == "" {
+ c.logger.Printf("Environment variable %s is not set, using default value: %t", key, defaultValue)
+ return defaultValue
+ }
+ value, err := strconv.ParseBool(valueStr)
+ if err != nil {
+ c.logger.Printf("Invalid environment variable %s: %v, using default value: %t", key, err, defaultValue)
+ return defaultValue
+ }
+ return value
+}
+
+func (c *Common) getByteBufferSize() int {
+ return c.GetIntEnvVariable(ByteBufferSizeKey, DefaultByteBufferSize)
+}
+
+func (c *Common) getDomainSocket() string {
+ return c.GetEnvVariable(DomainSocketKey, DefaultDomainSocket)
+}
+
+func (c *Common) getConnectionTimeout() time.Duration {
+ return c.GetMillisecondsEnvVariable(ConnectionTimeoutKey, DefaultConnectionTimeout)
+}
+
+func (c *Common) getIdleTimeout() time.Duration {
+ return c.GetMillisecondsEnvVariable(IdleTimeoutKey, DefaultIdleTimeout)
+}
diff --git a/pkg/domainproxy/integration/domainproxy_test.go b/pkg/domainproxy/integration/domainproxy_test.go
new file mode 100644
index 000000000..d98ab6ab5
--- /dev/null
+++ b/pkg/domainproxy/integration/domainproxy_test.go
@@ -0,0 +1,392 @@
+package integration
+
+import (
+ "crypto/md5"
+ "crypto/tls"
+ "encoding/hex"
+ "errors"
+ "fmt"
+ "github.com/elazarl/goproxy"
+ . "github.com/redhat-appstudio/jvm-build-service/pkg/domainproxy/client"
+ . "github.com/redhat-appstudio/jvm-build-service/pkg/domainproxy/common"
+ . "github.com/redhat-appstudio/jvm-build-service/pkg/domainproxy/server"
+ "io"
+ "math/rand"
+ "net/http"
+ "net/http/httptest"
+ "net/url"
+ "os"
+ "strconv"
+ "strings"
+ "testing"
+)
+
+const (
+ DomainProxyPort = "8081"
+ InternalProxyPort = "8082"
+ DomainProxyUrl = "http://" + Localhost + ":" + DomainProxyPort
+ ContentType = "text/xml"
+ Md5Hash = "ea3ca57f8f99d1d210d1b438c9841440"
+ ContentLength = "403"
+ MockUrlPath = "/com/foo/bar/1.0/bar-1.0.pom"
+ NonExistentUrlPath = "/com/foo/bar/1.0/bar-2.0.pom"
+ NonWhitelistedUrl = "repo1.maven.org/maven2/org/apache/maven/plugins/maven-jar-plugin/3.4.1/maven-jar-plugin-3.4.1.jar"
+ NonExistentHost = "foo.bar"
+ User = "foo"
+ Password = "bar"
+)
+
+func createClient(t *testing.T) *http.Client {
+ proxyUrl, err := url.Parse(DomainProxyUrl)
+ if err != nil {
+ t.Fatal(err)
+ }
+ transport := &http.Transport{
+ Proxy: http.ProxyURL(proxyUrl),
+ TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
+ }
+ return &http.Client{
+ Transport: transport,
+ }
+}
+
+func getMd5Hash(bytes []byte) string {
+ hash := md5.Sum(bytes)
+ return hex.EncodeToString(hash[:])
+}
+
+func getRandomDomainSocket() string {
+ return "/tmp/domain-socket-" + strconv.Itoa(rand.Int()) + ".sock"
+}
+
+func mockHandler(t *testing.T) http.HandlerFunc {
+ return func(w http.ResponseWriter, r *http.Request) {
+ if r.Method == http.MethodGet && r.URL.Path == MockUrlPath {
+ // Mock GET response
+ pom, err := os.ReadFile("testdata/bar-1.0.pom")
+ if err != nil {
+ t.Fatal(err)
+ }
+ w.Header().Set("Content-Type", ContentType)
+ w.WriteHeader(http.StatusOK)
+ if _, err := w.Write(pom); err != nil {
+ t.Fatal(err)
+ }
+ } else if r.Method == http.MethodHead && r.URL.Path == MockUrlPath {
+ // Mock HEAD response
+ w.Header().Set("Content-Type", ContentType)
+ w.Header().Set("Content-Length", ContentLength)
+ w.WriteHeader(http.StatusOK)
+ } else {
+ http.NotFound(w, r)
+ }
+ }
+}
+
+func startDomainProxy() (*DomainProxyServer, *DomainProxyClient) {
+ domainProxyServer := NewDomainProxyServer()
+ serverReady := make(chan bool)
+ go domainProxyServer.Start(serverReady)
+ <-serverReady
+ clientReady := make(chan bool)
+ domainProxyClient := NewDomainProxyClient()
+ go domainProxyClient.Start(clientReady)
+ <-clientReady
+ return domainProxyServer, domainProxyClient
+}
+
+func stopDomainProxy(domainProxyServer *DomainProxyServer, domainProxyClient *DomainProxyClient) {
+ domainProxyServer.Stop()
+ domainProxyClient.Stop()
+}
+
+func startMockServers(t *testing.T) (*httptest.Server, *httptest.Server) {
+ mockHandler := mockHandler(t)
+ mockHttpServer := httptest.NewServer(mockHandler)
+ mockHttpsServer := httptest.NewUnstartedServer(mockHandler)
+ mockHttpsServer.StartTLS()
+ return mockHttpServer, mockHttpsServer
+}
+
+func stopMockServers(mockHttpServer *httptest.Server, mockHttpsServer *httptest.Server) {
+ mockHttpServer.Close()
+ mockHttpsServer.Close()
+}
+
+func startInternalProxyServer(t *testing.T, onRequestFunction func(req *http.Request, ctx *goproxy.ProxyCtx) (*http.Request, *http.Response), onConnectFunction func(host string, ctx *goproxy.ProxyCtx) (*goproxy.ConnectAction, string)) *http.Server {
+ internalProxy := goproxy.NewProxyHttpServer()
+ internalProxy.Verbose = true
+ if onRequestFunction != nil {
+ internalProxy.OnRequest().DoFunc(onRequestFunction)
+ internalProxy.OnRequest().HandleConnectFunc(onConnectFunction)
+ }
+ internalProxyServer := &http.Server{
+ Addr: Localhost + ":" + InternalProxyPort,
+ Handler: internalProxy,
+ }
+ go func() {
+ if err := internalProxyServer.ListenAndServe(); err != nil && !errors.Is(err, http.ErrServerClosed) {
+ t.Error(err)
+ }
+ }()
+ return internalProxyServer
+}
+
+func stopInternalProxyServer(t *testing.T, internalProxyServer *http.Server) {
+ err := internalProxyServer.Close()
+ if err != nil {
+ t.Fatal(err)
+ }
+}
+
+func commonTestBehaviour(t *testing.T, qualifier string) {
+ // Set env variables
+ t.Setenv(DomainSocketKey, getRandomDomainSocket())
+ t.Setenv(HttpPortKey, DomainProxyPort)
+ t.Setenv(TargetWhitelistKey, "127.0.0.1,foo.bar")
+ // Start services
+ domainProxyServer, domainProxyClient := startDomainProxy()
+ defer stopDomainProxy(domainProxyServer, domainProxyClient)
+ // Start mock HTTP and HTTPS servers
+ mockHttpServer, mockHttpsServer := startMockServers(t)
+ defer stopMockServers(mockHttpServer, mockHttpsServer)
+ mockHttpUrl := mockHttpServer.URL
+ mockHttpsUrl := mockHttpsServer.URL
+ // Create HTTP client
+ httpClient := createClient(t)
+
+ t.Run(fmt.Sprintf("Test HTTP GET dependency%s", qualifier), func(t *testing.T) {
+ response, err := httpClient.Get(mockHttpUrl + MockUrlPath)
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer response.Body.Close()
+ if response.StatusCode != http.StatusOK {
+ t.Fatalf("Actual HTTP status %d did not match expected HTTP status %d", response.StatusCode, http.StatusOK)
+ }
+ pom, err := io.ReadAll(response.Body)
+ if err != nil {
+ t.Fatal(err)
+ }
+ hash := getMd5Hash(pom)
+ if hash != Md5Hash {
+ t.Fatalf("Actual MD5 hash %s did not match expected MD5 hash %s", hash, Md5Hash)
+ }
+ })
+
+ t.Run(fmt.Sprintf("Test HTTPS GET dependency%s", qualifier), func(t *testing.T) {
+ response, err := httpClient.Get(mockHttpsUrl + MockUrlPath)
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer response.Body.Close()
+ if response.StatusCode != http.StatusOK {
+ t.Fatalf("Actual HTTP status %d did not match expected HTTP status %d", response.StatusCode, http.StatusOK)
+ }
+ pom, err := io.ReadAll(response.Body)
+ if err != nil {
+ t.Fatal(err)
+ }
+ hash := getMd5Hash(pom)
+ if hash != Md5Hash {
+ t.Fatalf("Actual MD5 hash %s did not match expected MD5 hash %s", hash, Md5Hash)
+ }
+ })
+
+ t.Run(fmt.Sprintf("Test HTTP GET non-existent dependency%s", qualifier), func(t *testing.T) {
+ response, err := httpClient.Get(mockHttpUrl + NonExistentUrlPath)
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer response.Body.Close()
+ if response.StatusCode != http.StatusNotFound {
+ t.Fatalf("Actual HTTP status %d did not match expected HTTP status %d", response.StatusCode, http.StatusNotFound)
+ }
+ })
+
+ t.Run(fmt.Sprintf("Test HTTPS GET non-existent dependency%s", qualifier), func(t *testing.T) {
+ response, err := httpClient.Get(mockHttpsUrl + NonExistentUrlPath)
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer response.Body.Close()
+ if response.StatusCode != http.StatusNotFound {
+ t.Fatalf("Actual HTTP status %d did not match expected HTTP status %d", response.StatusCode, http.StatusNotFound)
+ }
+ })
+
+ t.Run(fmt.Sprintf("Test HTTP non-whitelisted host%s", qualifier), func(t *testing.T) {
+ response, err := httpClient.Get("http://" + NonWhitelistedUrl)
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer response.Body.Close()
+ if response.StatusCode != http.StatusForbidden {
+ t.Fatalf("Actual HTTP status %d did not match expected HTTP status %d", response.StatusCode, http.StatusForbidden)
+ }
+ })
+
+ t.Run(fmt.Sprintf("Test HTTPS non-whitelisted host%s", qualifier), func(t *testing.T) {
+ _, err := httpClient.Get("https://" + NonWhitelistedUrl)
+ statusText := http.StatusText(http.StatusForbidden)
+ if !strings.Contains(err.Error(), statusText) {
+ t.Fatalf("Actual error %s did not contain expected HTTP status text %s", err.Error(), statusText)
+ }
+ })
+
+ t.Run(fmt.Sprintf("Test HTTP non-existent host%s", qualifier), func(t *testing.T) {
+ response, err := httpClient.Get("http://" + NonExistentHost)
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer response.Body.Close()
+ if response.StatusCode != http.StatusInternalServerError {
+ t.Fatalf("Actual HTTP status %d did not match expected HTTP status %d", response.StatusCode, http.StatusInternalServerError)
+ }
+ })
+
+ t.Run(fmt.Sprintf("Test HTTPS non-existent host%s", qualifier), func(t *testing.T) {
+ _, err := httpClient.Get("https://" + NonExistentHost)
+ internalServerStatusText := http.StatusText(http.StatusInternalServerError)
+ badGatewayStatusText := http.StatusText(http.StatusBadGateway)
+ if !strings.Contains(err.Error(), internalServerStatusText) && !strings.Contains(err.Error(), badGatewayStatusText) { // Internal proxy may return 502 Bad Gateway
+ t.Fatalf("Actual error %s did not contain expected HTTP status text %s or %s", err.Error(), internalServerStatusText, badGatewayStatusText)
+ }
+ })
+
+ t.Run(fmt.Sprintf("Test HTTP HEAD dependency%s", qualifier), func(t *testing.T) {
+ response, err := httpClient.Head(mockHttpUrl + MockUrlPath)
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer response.Body.Close()
+ actualContentLength := response.Header.Get("Content-Length")
+ if actualContentLength != ContentLength {
+ t.Fatalf("Actual content length %s did not match expected content length %s", actualContentLength, ContentLength)
+ }
+ })
+
+ t.Run(fmt.Sprintf("Test HTTPS HEAD dependency%s", qualifier), func(t *testing.T) {
+ response, err := httpClient.Head(mockHttpsUrl + MockUrlPath)
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer response.Body.Close()
+ actualContentLength := response.Header.Get("Content-Length")
+ if actualContentLength != ContentLength {
+ t.Fatalf("Actual content length %s did not match expected content length %s", actualContentLength, ContentLength)
+ }
+ })
+
+ t.Run(fmt.Sprintf("Test HTTP HEAD non-existent dependency%s", qualifier), func(t *testing.T) {
+ response, err := httpClient.Head(mockHttpUrl + NonExistentUrlPath)
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer response.Body.Close()
+ if response.StatusCode != http.StatusNotFound {
+ t.Fatalf("Actual HTTP status %d did not match expected HTTP status %d", response.StatusCode, http.StatusNotFound)
+ }
+ })
+
+ t.Run(fmt.Sprintf("Test HTTPS HEAD non-existent dependency%s", qualifier), func(t *testing.T) {
+ response, err := httpClient.Head(mockHttpsUrl + NonExistentUrlPath)
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer response.Body.Close()
+ if response.StatusCode != http.StatusNotFound {
+ t.Fatalf("Actual HTTP status %d did not match expected HTTP status %d", response.StatusCode, http.StatusNotFound)
+ }
+ })
+}
+
+func commonInternalProxyTestBehaviour(t *testing.T, qualifier string, onRequestFunction func(req *http.Request, ctx *goproxy.ProxyCtx) (*http.Request, *http.Response), onConnectFunction func(host string, ctx *goproxy.ProxyCtx) (*goproxy.ConnectAction, string)) {
+ // Start internal proxy
+ internalProxyServer := startInternalProxyServer(t, onRequestFunction, onConnectFunction)
+ // Set env variables
+ t.Setenv(EnableInternalProxyKey, "true")
+ t.Setenv(InternalProxyHostKey, Localhost)
+ t.Setenv(InternalProxyPortKey, InternalProxyPort)
+ t.Setenv(InternalNonProxyHostsKey, "example.com")
+ // Run tests with internal proxy
+ commonTestBehaviour(t, qualifier)
+ // Stop internal proxy
+ stopInternalProxyServer(t, internalProxyServer)
+ // Set non-proxy hosts env variable
+ t.Setenv(InternalNonProxyHostsKey, "127.0.0.1,foo.bar")
+ // Run tests without internal proxy
+ commonTestBehaviour(t, qualifier+" and non-proxy host")
+}
+
+func TestDomainProxy(t *testing.T) {
+ commonTestBehaviour(t, "")
+}
+
+func TestDomainProxyWithInternalProxy(t *testing.T) {
+ commonInternalProxyTestBehaviour(t, " with internal proxy", nil, nil)
+}
+
+func TestDomainProxyWithInternalProxyAndAuthentication(t *testing.T) {
+ // Set env variables
+ t.Setenv(InternalProxyUserKey, User)
+ t.Setenv(InternalProxyPasswordKey, Password)
+ basicAuth := "Basic " + GetBasicAuth(User, Password)
+ // Create internal proxy HTTP authentication handler
+ onRequestFunction := func(req *http.Request, ctx *goproxy.ProxyCtx) (*http.Request, *http.Response) {
+ if req.Header.Get("Proxy-Authorization") != basicAuth {
+ return nil, goproxy.NewResponse(req, goproxy.ContentTypeText, http.StatusProxyAuthRequired, http.StatusText(http.StatusProxyAuthRequired))
+ }
+ return req, nil
+ }
+ // Create internal proxy HTTPS authentication handler
+ onConnectionFunction := func(host string, ctx *goproxy.ProxyCtx) (*goproxy.ConnectAction, string) {
+ req := ctx.Req
+ authHeader := req.Header.Get("Proxy-Authorization")
+ if authHeader != basicAuth {
+ ctx.Resp = goproxy.NewResponse(req, goproxy.ContentTypeText, http.StatusProxyAuthRequired, http.StatusText(http.StatusProxyAuthRequired))
+ return goproxy.RejectConnect, host
+ }
+ return goproxy.OkConnect, host
+ }
+ // Run tests with internal proxy and authentication
+ commonInternalProxyTestBehaviour(t, " with internal proxy and authentication", onRequestFunction, onConnectionFunction)
+
+ // Set invalid authentication env variables
+ t.Setenv(DomainSocketKey, getRandomDomainSocket())
+ t.Setenv(InternalProxyUserKey, "123")
+ t.Setenv(InternalProxyPasswordKey, "456")
+ t.Setenv(InternalNonProxyHostsKey, "example.com")
+ // Start internal proxy
+ internalProxyServer := startInternalProxyServer(t, onRequestFunction, onConnectionFunction)
+ defer stopInternalProxyServer(t, internalProxyServer)
+ // Start services
+ domainProxyServer, domainProxyClient := startDomainProxy()
+ defer stopDomainProxy(domainProxyServer, domainProxyClient)
+ // Start mock HTTP and HTTPS servers
+ mockHttpServer, mockHttpsServer := startMockServers(t)
+ defer stopMockServers(mockHttpServer, mockHttpsServer)
+ mockHttpUrl := mockHttpServer.URL
+ mockHttpsUrl := mockHttpsServer.URL
+ // Create HTTP client
+ httpClient := createClient(t)
+
+ t.Run("Test HTTP GET dependency with internal proxy and invalid authentication", func(t *testing.T) {
+ response, err := httpClient.Get(mockHttpUrl + MockUrlPath)
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer response.Body.Close()
+ if response.StatusCode != http.StatusProxyAuthRequired {
+ t.Fatalf("Actual HTTP status %d did not match expected HTTP status %d", response.StatusCode, http.StatusProxyAuthRequired)
+ }
+ })
+
+ t.Run("Test HTTPS GET dependency with internal proxy and invalid authentication", func(t *testing.T) {
+ _, err := httpClient.Get(mockHttpsUrl + MockUrlPath)
+ statusText := http.StatusText(http.StatusProxyAuthRequired)
+ if !strings.Contains(err.Error(), statusText) {
+ t.Fatalf("Actual error %s did not contain expected HTTP status text %s", err.Error(), statusText)
+ }
+ })
+}
diff --git a/java-components/domain-proxy/server/src/test/resources/bar-1.0.pom b/pkg/domainproxy/integration/testdata/bar-1.0.pom
similarity index 100%
rename from java-components/domain-proxy/server/src/test/resources/bar-1.0.pom
rename to pkg/domainproxy/integration/testdata/bar-1.0.pom
diff --git a/pkg/domainproxy/server/server.go b/pkg/domainproxy/server/server.go
new file mode 100644
index 000000000..fa0e12db2
--- /dev/null
+++ b/pkg/domainproxy/server/server.go
@@ -0,0 +1,420 @@
+package server
+
+import (
+ "bufio"
+ "encoding/base64"
+ "fmt"
+ . "github.com/redhat-appstudio/jvm-build-service/pkg/domainproxy/common"
+ "net"
+ "net/http"
+ "os"
+ "strconv"
+ "strings"
+ "sync/atomic"
+ "time"
+)
+
+const (
+ HttpPort = 80
+ HttpsPort = 443
+ TargetWhitelistKey = "DOMAIN_PROXY_TARGET_WHITELIST"
+ DefaultTargetWhitelist = "localhost,repo.maven.apache.org,repository.jboss.org,packages.confluent.io,jitpack.io,repo.gradle.org,plugins.gradle.org"
+ EnableInternalProxyKey = "DOMAIN_PROXY_ENABLE_INTERNAL_PROXY"
+ DefaultEnableInternalProxy = false
+ InternalProxyHostKey = "DOMAIN_PROXY_INTERNAL_PROXY_HOST"
+ DefaultInternalProxyHost = "indy-generic-proxy"
+ InternalProxyPortKey = "DOMAIN_PROXY_INTERNAL_PROXY_PORT"
+ DefaultInternalProxyPort = 80
+ InternalProxyUserKey = "DOMAIN_PROXY_INTERNAL_PROXY_USER"
+ DefaultInternalProxyUser = ""
+ InternalProxyPasswordKey = "DOMAIN_PROXY_INTERNAL_PROXY_PASSWORD"
+ DefaultInternalProxyPassword = ""
+ InternalNonProxyHostsKey = "DOMAIN_PROXY_INTERNAL_NON_PROXY_HOSTS"
+ DefaultInternalNonProxyHosts = "localhost"
+ DomainSocketToHttp = "Domain Socket <-> HTTP"
+ DomainSocketToHttps = "Domain Socket <-> HTTPS"
+)
+
+var logger = NewLogger("Domain Proxy Server")
+var common = NewCommon(logger)
+
+type DomainProxyServer struct {
+ sharedParams *SharedParams
+ targetWhitelist map[string]bool
+ enableInternalProxy bool
+ internalProxyHost string
+ internalProxyPort int
+ internalProxyUser string
+ internalProxyPassword string
+ internalNonProxyHosts map[string]bool
+ httpsConnectionCounter atomic.Uint64
+}
+
+func NewDomainProxyServer() *DomainProxyServer {
+ return &DomainProxyServer{
+ sharedParams: common.NewSharedParams(),
+ targetWhitelist: getTargetWhitelist(),
+ enableInternalProxy: getEnableInternalProxy(),
+ internalProxyHost: getInternalProxyHost(),
+ internalProxyPort: getInternalProxyPort(),
+ internalProxyUser: getInternalProxyUser(),
+ internalProxyPassword: getInternalProxyPassword(),
+ internalNonProxyHosts: getInternalNonProxyHosts(),
+ }
+}
+
+func (dps *DomainProxyServer) Start(ready chan<- bool) {
+ sharedParams := dps.sharedParams
+ logger.Println("Starting domain proxy server...")
+ if _, err := os.Stat(sharedParams.DomainSocket); err == nil {
+ if err := os.Remove(sharedParams.DomainSocket); err != nil {
+ logger.Fatalf("Failed to delete existing domain socket: %v", err)
+ }
+ }
+ var err error
+ sharedParams.Listener, err = net.Listen(UNIX, sharedParams.DomainSocket)
+ if err != nil {
+ logger.Fatalf("Failed to start domain socket listener: %v", err)
+ }
+ go dps.startServer(ready)
+}
+
+func (dps *DomainProxyServer) startServer(ready chan<- bool) {
+ sharedParams := dps.sharedParams
+ logger.Printf("Domain socket server listening on %s", sharedParams.DomainSocket)
+ ready <- true
+ for {
+ select {
+ case <-sharedParams.RunningContext.Done():
+ return
+ default:
+ if domainConnection, err := sharedParams.Listener.Accept(); err != nil {
+ select {
+ case <-sharedParams.RunningContext.Done():
+ return
+ default:
+ logger.Printf("Failed to accept domain socket connection: %v", err)
+ }
+ } else {
+ go dps.handleConnectionRequest(domainConnection)
+ }
+ }
+ }
+}
+
+func (dps *DomainProxyServer) handleConnectionRequest(domainConnection net.Conn) {
+ sharedParams := dps.sharedParams
+ if err := domainConnection.SetDeadline(time.Now().Add(sharedParams.IdleTimeout)); err != nil {
+ common.HandleSetDeadlineError(domainConnection, err)
+ return
+ }
+ reader := bufio.NewReader(domainConnection)
+ request, err := http.ReadRequest(reader)
+ if err != nil {
+ logger.Printf("Failed to read request: %v", err)
+ if err = domainConnection.Close(); err != nil {
+ common.HandleConnectionCloseError(err)
+ }
+ return
+ }
+ writer := &responseWriter{connection: domainConnection}
+ if request.Method == http.MethodConnect {
+ dps.handleHttpsConnection(domainConnection, writer, request)
+ } else {
+ dps.handleHttpConnection(domainConnection, writer, request)
+ }
+}
+
+func (dps *DomainProxyServer) handleHttpConnection(sourceConnection net.Conn, writer http.ResponseWriter, request *http.Request) {
+ sharedParams := dps.sharedParams
+ connectionNo := sharedParams.HttpConnectionCounter.Add(1)
+ targetHost, targetPort := getTargetHostAndPort(request.Host, HttpPort)
+ actualTargetHost, actualTargetPort := targetHost, targetPort
+ targetConnectionName := "target"
+ useInternalProxy := dps.useInternalProxy(targetHost)
+ // Redirect connection to internal proxy if enabled
+ if useInternalProxy {
+ targetHost, targetPort = dps.internalProxyHost, dps.internalProxyPort
+ logger.Printf("Handling %s Connection %d with internal proxy %s:%d and target %s:%d", DomainSocketToHttp, connectionNo, targetHost, targetPort, actualTargetHost, actualTargetPort)
+ targetConnectionName = "internal proxy"
+ } else {
+ logger.Printf("Handling %s Connection %d with target %s:%d", DomainSocketToHttp, connectionNo, actualTargetHost, actualTargetPort)
+ }
+ // Check if target is whitelisted
+ if !dps.isTargetWhitelisted(actualTargetHost, writer) {
+ if err := sourceConnection.Close(); err != nil {
+ common.HandleConnectionCloseError(err)
+ }
+ return
+ }
+ startTime := time.Now()
+ request.Header.Del("Proxy-Connection") // Prevent keep-alive as it breaks internal proxy authentication
+ request.Header.Set("Connection", "close") // Prevent keep-alive as it breaks internal proxy authentication
+ // Update request with target details for internal proxy if enabled
+ if useInternalProxy {
+ request.Header.Set("Host", fmt.Sprintf("%s:%d", actualTargetHost, actualTargetPort))
+ // Add authentication details if configured
+ if dps.internalProxyUser != "" && dps.internalProxyPassword != "" {
+ request.Header.Set("Proxy-Authorization", "Basic "+GetBasicAuth(dps.internalProxyUser, dps.internalProxyPassword))
+ }
+ }
+ // Try to connect to target or internal proxy
+ targetConnection, err := net.DialTimeout(TCP, fmt.Sprintf("%s:%d", targetHost, targetPort), sharedParams.ConnectionTimeout)
+ if err != nil {
+ dps.handleErrorResponse(writer, err, fmt.Sprintf("Failed to connect to %s", targetConnectionName))
+ if err = sourceConnection.Close(); err != nil {
+ common.HandleConnectionCloseError(err)
+ }
+ return
+ }
+ if err = targetConnection.SetDeadline(time.Now().Add(sharedParams.IdleTimeout)); err != nil {
+ common.HandleSetDeadlineError(targetConnection, err)
+ if err = sourceConnection.Close(); err != nil {
+ common.HandleConnectionCloseError(err)
+ }
+ return
+ }
+ // Send HTTP request to internal proxy if enabled
+ if useInternalProxy {
+ err = request.WriteProxy(targetConnection)
+ } else {
+ err = request.Write(targetConnection)
+ }
+ if err != nil {
+ dps.handleErrorResponse(writer, err, fmt.Sprintf("Failed to send request to %s", targetConnectionName))
+ if err = targetConnection.Close(); err != nil {
+ common.HandleConnectionCloseError(err)
+ }
+ if err = sourceConnection.Close(); err != nil {
+ common.HandleConnectionCloseError(err)
+ }
+ return
+ }
+ // Initiate transfer between source and target or internal proxy
+ go func() {
+ common.BiDirectionalTransfer(sharedParams.RunningContext, sourceConnection, targetConnection, sharedParams.ByteBufferSize, DomainSocketToHttp, connectionNo)
+ logger.Printf("%s Connection %d ended after %d ms", DomainSocketToHttp, connectionNo, time.Since(startTime).Milliseconds())
+ }()
+}
+
+func (dps *DomainProxyServer) handleHttpsConnection(sourceConnection net.Conn, writer http.ResponseWriter, request *http.Request) {
+ sharedParams := dps.sharedParams
+ connectionNo := dps.httpsConnectionCounter.Add(1)
+ targetHost, targetPort := getTargetHostAndPort(request.Host, HttpsPort)
+ actualTargetHost, actualTargetPort := targetHost, targetPort
+ targetConnectionName := "target"
+ useInternalProxy := dps.useInternalProxy(targetHost)
+ // Redirect connection to internal proxy if enabled
+ if useInternalProxy {
+ targetHost, targetPort = dps.internalProxyHost, dps.internalProxyPort
+ logger.Printf("Handling %s Connection %d with internal proxy %s:%d and target %s:%d", DomainSocketToHttps, connectionNo, targetHost, targetPort, actualTargetHost, actualTargetPort)
+ targetConnectionName = "internal proxy"
+ } else {
+ logger.Printf("Handling %s Connection %d with target %s:%d", DomainSocketToHttps, connectionNo, actualTargetHost, actualTargetPort)
+ }
+ // Check if target is whitelisted
+ if !dps.isTargetWhitelisted(actualTargetHost, writer) {
+ if err := sourceConnection.Close(); err != nil {
+ common.HandleConnectionCloseError(err)
+ }
+ return
+ }
+ startTime := time.Now()
+ request.Header.Del("Proxy-Connection") // Prevent keep-alive as it breaks internal proxy authentication
+ request.Header.Set("Connection", "close") // Prevent keep-alive as it breaks internal proxy authentication
+ // Try to connect to target or internal proxy
+ targetConnection, err := net.DialTimeout(TCP, fmt.Sprintf("%s:%d", targetHost, targetPort), sharedParams.ConnectionTimeout)
+ if err != nil {
+ dps.handleErrorResponse(writer, err, fmt.Sprintf("Failed to connect to %s", targetConnectionName))
+ if err = sourceConnection.Close(); err != nil {
+ common.HandleConnectionCloseError(err)
+ }
+ return
+ }
+ if err = targetConnection.SetDeadline(time.Now().Add(sharedParams.IdleTimeout)); err != nil {
+ common.HandleSetDeadlineError(targetConnection, err)
+ if err = sourceConnection.Close(); err != nil {
+ common.HandleConnectionCloseError(err)
+ }
+ return
+ }
+ // Create HTTPS connection to internal proxy if enabled
+ if useInternalProxy {
+ proxyConnectRequest := fmt.Sprintf("CONNECT %s:%d HTTP/1.1\r\nHost: %s:%d\r\nConnection: close\r\n", actualTargetHost, actualTargetPort, actualTargetHost, actualTargetPort) // Prevent keep-alive as it breaks internal proxy authentication
+ // Add authentication details if configured
+ if dps.internalProxyUser != "" && dps.internalProxyPassword != "" {
+ proxyConnectRequest += fmt.Sprintf("Proxy-Authorization: Basic %s\r\n", GetBasicAuth(dps.internalProxyUser, dps.internalProxyPassword))
+ }
+ proxyConnectRequest += "\r\n"
+ if _, err = targetConnection.Write([]byte(proxyConnectRequest)); err != nil {
+ dps.handleErrorResponse(writer, err, "Failed to send connect request to internal proxy")
+ if err = targetConnection.Close(); err != nil {
+ common.HandleConnectionCloseError(err)
+ }
+ if err = sourceConnection.Close(); err != nil {
+ common.HandleConnectionCloseError(err)
+ }
+ return
+ }
+ proxyReader := bufio.NewReader(targetConnection)
+ proxyResponse, err := http.ReadResponse(proxyReader, request)
+ if err != nil {
+ dps.handleErrorResponse(writer, err, "Failed to establish connection with internal proxy")
+ if err = targetConnection.Close(); err != nil {
+ common.HandleConnectionCloseError(err)
+ }
+ if err = sourceConnection.Close(); err != nil {
+ common.HandleConnectionCloseError(err)
+ }
+ return
+ } else if proxyResponse.StatusCode != http.StatusOK {
+ proxyResponse.Header.Set("Connection", "close") // Prevent keep-alive as it breaks internal proxy authentication
+ if err := proxyResponse.Write(sourceConnection); err != nil {
+ dps.handleErrorResponse(writer, err, "Failed to send internal proxy response to source")
+ }
+ if err = targetConnection.Close(); err != nil {
+ common.HandleConnectionCloseError(err)
+ }
+ if err = sourceConnection.Close(); err != nil {
+ common.HandleConnectionCloseError(err)
+ }
+ return
+ }
+ }
+ // Notify source that HTTPS connection has been established to target or internal proxy
+ if _, err = writer.Write([]byte("HTTP/1.1 200 Connection Established\r\nConnection: close\r\n\r\n")); err != nil { // Prevent keep-alive as it breaks internal proxy authentication
+ dps.handleErrorResponse(writer, err, "Failed to send connect response to source")
+ if err = targetConnection.Close(); err != nil {
+ common.HandleConnectionCloseError(err)
+ }
+ if err = sourceConnection.Close(); err != nil {
+ common.HandleConnectionCloseError(err)
+ }
+ return
+ }
+ // Initiate transfer between source and target or internal proxy
+ go func() {
+ common.BiDirectionalTransfer(sharedParams.RunningContext, sourceConnection, targetConnection, sharedParams.ByteBufferSize, DomainSocketToHttps, connectionNo)
+ logger.Printf("%s Connection %d ended after %d ms", DomainSocketToHttps, connectionNo, time.Since(startTime).Milliseconds())
+ }()
+}
+
+func getTargetHostAndPort(host string, defaultPort int) (string, int) {
+ hostAndPort := strings.Split(host, ":")
+ targetHost := hostAndPort[0]
+ targetPort := defaultPort
+ if len(hostAndPort) > 1 {
+ if port, err := strconv.Atoi(hostAndPort[1]); err == nil {
+ targetPort = port
+ }
+ }
+ return targetHost, targetPort
+}
+
+func (dps *DomainProxyServer) isTargetWhitelisted(targetHost string, writer http.ResponseWriter) bool {
+ if !dps.targetWhitelist[targetHost] {
+ message := fmt.Sprintf("Target host %s is not whitelisted", targetHost)
+ logger.Println(message)
+ http.Error(writer, message, http.StatusForbidden)
+ return false
+ }
+ return true
+}
+
+func (dps *DomainProxyServer) useInternalProxy(targetHost string) bool {
+ if dps.enableInternalProxy {
+ if !dps.internalNonProxyHosts[targetHost] {
+ return true
+ } else {
+ logger.Printf("Target host %s is non-proxy host", targetHost)
+ }
+ }
+ return false
+}
+
+func GetBasicAuth(user string, password string) string {
+ return base64.StdEncoding.EncodeToString([]byte(user + ":" + password))
+}
+
+func (dps *DomainProxyServer) handleErrorResponse(writer http.ResponseWriter, err error, message string) {
+ logger.Printf("%s: %v", message, err)
+ writer.Header().Set("Connection", "close") // Prevent keep-alive as it breaks internal proxy authentication
+ status := http.StatusInternalServerError
+ http.Error(writer, message+": "+err.Error(), status)
+}
+
+func (dps *DomainProxyServer) Stop() {
+ sharedParams := dps.sharedParams
+ logger.Println("Shutting down domain proxy server...")
+ sharedParams.InitiateShutdown()
+ if sharedParams.Listener != nil {
+ if err := sharedParams.Listener.Close(); err != nil {
+ common.HandleListenerCloseError(err)
+ }
+ }
+ if _, err := os.Stat(sharedParams.DomainSocket); err == nil {
+ if err := os.Remove(sharedParams.DomainSocket); err != nil {
+ logger.Printf("Failed to delete domain socket: %v", err)
+ }
+ }
+}
+
+type responseWriter struct {
+ connection net.Conn
+ header http.Header
+ statusCode int
+}
+
+func (rw *responseWriter) Header() http.Header {
+ if rw.header == nil {
+ rw.header = make(http.Header)
+ }
+ return rw.header
+}
+
+func (rw *responseWriter) Write(data []byte) (int, error) {
+ return rw.connection.Write(data)
+}
+
+func (rw *responseWriter) WriteHeader(statusCode int) {
+ rw.statusCode = statusCode
+ headers := fmt.Sprintf("HTTP/1.1 %d %s\r\n", statusCode, http.StatusText(statusCode))
+ headers += "Connection: close\r\n" // Prevent keep-alive as it breaks internal proxy authentication
+ for k, v := range rw.Header() {
+ for _, vv := range v {
+ headers += fmt.Sprintf("%s: %s\r\n", k, vv)
+ }
+ }
+ headers += "\r\n"
+ if _, err := rw.connection.Write([]byte(headers)); err != nil {
+ logger.Printf("Failed to write headers to connection: %v", err)
+ }
+}
+
+func getTargetWhitelist() map[string]bool {
+ return common.GetCsvEnvVariable(TargetWhitelistKey, DefaultTargetWhitelist)
+}
+
+func getEnableInternalProxy() bool {
+ return common.GetBoolEnvVariable(EnableInternalProxyKey, DefaultEnableInternalProxy)
+}
+
+func getInternalProxyHost() string {
+ return common.GetEnvVariable(InternalProxyHostKey, DefaultInternalProxyHost)
+}
+
+func getInternalProxyPort() int {
+ return common.GetIntEnvVariable(InternalProxyPortKey, DefaultInternalProxyPort)
+}
+
+func getInternalProxyUser() string {
+ return common.GetEnvVariable(InternalProxyUserKey, DefaultInternalProxyUser)
+}
+
+func getInternalProxyPassword() string {
+ return common.GetEnvVariable(InternalProxyPasswordKey, DefaultInternalProxyPassword)
+}
+
+func getInternalNonProxyHosts() map[string]bool {
+ return common.GetCsvEnvVariable(InternalNonProxyHostsKey, DefaultInternalNonProxyHosts)
+}
diff --git a/pkg/reconciler/dependencybuild/buildrecipeyaml.go b/pkg/reconciler/dependencybuild/buildrecipeyaml.go
index a0b153a74..8cec863c6 100644
--- a/pkg/reconciler/dependencybuild/buildrecipeyaml.go
+++ b/pkg/reconciler/dependencybuild/buildrecipeyaml.go
@@ -6,6 +6,7 @@ import (
"fmt"
"github.com/go-logr/logr"
v12 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "net/url"
"regexp"
"strconv"
"strings"
@@ -133,7 +134,7 @@ func createDeployPipelineSpec(jbsConfig *v1alpha1.JBSConfig, buildRequestProcess
}
*/
-func createPipelineSpec(log logr.Logger, tool string, commitTime int64, jbsConfig *v1alpha1.JBSConfig, systemConfig *v1alpha1.SystemConfig, recipe *v1alpha1.BuildRecipe, db *v1alpha1.DependencyBuild, paramValues []tektonpipeline.Param, buildRequestProcessorImage string, buildId string, existingImages map[string]string, orasOptions string) (*tektonpipeline.PipelineSpec, string, error) {
+func createPipelineSpec(log logr.Logger, tool string, commitTime int64, jbsConfig *v1alpha1.JBSConfig, systemConfig *v1alpha1.SystemConfig, recipe *v1alpha1.BuildRecipe, db *v1alpha1.DependencyBuild, paramValues []tektonpipeline.Param, buildRequestProcessorImage string, domainProxyImage string, buildId string, existingImages map[string]string, orasOptions string) (*tektonpipeline.PipelineSpec, string, error) {
// Rather than tagging with hash of json build recipe, buildrequestprocessor image and db.Name as the former two
// could change with new image versions just use db.Name (which is a hash of scm url/tag/path so should be stable)
@@ -480,6 +481,10 @@ func createPipelineSpec(log logr.Logger, tool string, commitTime int64, jbsConfi
},
}
+ whitelistUrl, err := url.Parse(cacheUrl)
+ if err != nil {
+ return nil, "", err
+ }
ps.Tasks = append([]tektonpipeline.PipelineTask{
{
Name: BuildTaskName,
@@ -526,6 +531,69 @@ func createPipelineSpec(log logr.Logger, tool string, commitTime int64, jbsConfi
},
},
},
+ {
+ Name: "HERMETIC",
+ Value: tektonpipeline.ParamValue{
+ Type: tektonpipeline.ParamTypeString,
+ StringVal: "true",
+ },
+ },
+ {
+ Name: "BUILD_IMAGE",
+ Value: tektonpipeline.ParamValue{
+ Type: tektonpipeline.ParamTypeString,
+ StringVal: domainProxyImage,
+ },
+ },
+ {
+ Name: "ENABLE_DOMAIN_PROXY",
+ Value: tektonpipeline.ParamValue{
+ Type: tektonpipeline.ParamTypeString,
+ StringVal: "true",
+ },
+ },
+ {
+ Name: "DOMAIN_PROXY_TARGET_WHITELIST",
+ Value: tektonpipeline.ParamValue{
+ Type: tektonpipeline.ParamTypeString,
+ StringVal: whitelistUrl.Host + ",localhost,cdn-ubi.redhat.com,repo1.maven.org,repo.scala-sbt.org,scala.jfrog.io,repo.typesafe.com,jfrog-prod-usw2-shared-oregon-main.s3.amazonaws.com",
+ },
+ },
+ {
+ Name: "DOMAIN_PROXY_INTERNAL_PROXY_HOST",
+ Value: tektonpipeline.ParamValue{
+ Type: tektonpipeline.ParamTypeString,
+ StringVal: "indy-generic-proxy",
+ },
+ },
+ {
+ Name: "DOMAIN_PROXY_INTERNAL_PROXY_PORT",
+ Value: tektonpipeline.ParamValue{
+ Type: tektonpipeline.ParamTypeString,
+ StringVal: "80",
+ },
+ },
+ {
+ Name: "DOMAIN_PROXY_INTERNAL_PROXY_USER",
+ Value: tektonpipeline.ParamValue{
+ Type: tektonpipeline.ParamTypeString,
+ StringVal: buildId + "+tracking",
+ },
+ },
+ {
+ Name: "DOMAIN_PROXY_INTERNAL_PROXY_PASSWORD",
+ Value: tektonpipeline.ParamValue{
+ Type: tektonpipeline.ParamTypeString,
+ StringVal: "${ACCESS_TOKEN}", // TODO how to get the access token value?
+ },
+ },
+ {
+ Name: "DOMAIN_PROXY_INTERNAL_NON_PROXY_HOSTS",
+ Value: tektonpipeline.ParamValue{
+ Type: tektonpipeline.ParamTypeString,
+ StringVal: whitelistUrl.Host + ",localhost",
+ },
+ },
},
}}, ps.Tasks...)
diff --git a/pkg/reconciler/dependencybuild/dependencybuild.go b/pkg/reconciler/dependencybuild/dependencybuild.go
index 44bfc1b8f..eae90f644 100644
--- a/pkg/reconciler/dependencybuild/dependencybuild.go
+++ b/pkg/reconciler/dependencybuild/dependencybuild.go
@@ -537,6 +537,10 @@ func (r *ReconcileDependencyBuild) handleStateBuilding(ctx context.Context, db *
if err != nil {
return reconcile.Result{}, err
}
+ domainProxyImage, err := r.domainProxyImage(ctx)
+ if err != nil {
+ return reconcile.Result{}, err
+ }
pr.Namespace = db.Namespace
// we do not use generate name since
// 1. it was used in creating the db and the db name has random ids
@@ -592,7 +596,7 @@ func (r *ReconcileDependencyBuild) handleStateBuilding(ctx context.Context, db *
Pipeline: &v12.Duration{Duration: time.Hour * v1alpha1.DefaultTimeout},
Tasks: &v12.Duration{Duration: time.Hour * v1alpha1.DefaultTimeout},
}
- pr.Spec.PipelineSpec, diagnosticContainerfile, err = createPipelineSpec(log, attempt.Recipe.Tool, db.Status.CommitTime, jbsConfig, &systemConfig, attempt.Recipe, db, paramValues, buildRequestProcessorImage, attempt.BuildId, preBuildImages, orasOptions)
+ pr.Spec.PipelineSpec, diagnosticContainerfile, err = createPipelineSpec(log, attempt.Recipe.Tool, db.Status.CommitTime, jbsConfig, &systemConfig, attempt.Recipe, db, paramValues, buildRequestProcessorImage, domainProxyImage, attempt.BuildId, preBuildImages, orasOptions)
if err != nil {
return reconcile.Result{}, err
}
@@ -1320,6 +1324,11 @@ func (r *ReconcileDependencyBuild) buildRequestProcessorImage(ctx context.Contex
return image, err
}
+func (r *ReconcileDependencyBuild) domainProxyImage(ctx context.Context) (string, error) {
+ image, err := util.GetImageName(ctx, r.client, "domain-proxy", "JVM_BUILD_SERVICE_DOMAIN_PROXY_IMAGE")
+ return image, err
+}
+
func (r *ReconcileDependencyBuild) handleTektonResults(db *v1alpha1.DependencyBuild, pr *tektonpipeline.PipelineRun) bool {
if pr.GetAnnotations() == nil {
return false
diff --git a/vendor/github.com/elazarl/goproxy/.gitignore b/vendor/github.com/elazarl/goproxy/.gitignore
new file mode 100644
index 000000000..1005f6f1e
--- /dev/null
+++ b/vendor/github.com/elazarl/goproxy/.gitignore
@@ -0,0 +1,2 @@
+bin
+*.swp
diff --git a/vendor/github.com/elazarl/goproxy/LICENSE b/vendor/github.com/elazarl/goproxy/LICENSE
new file mode 100644
index 000000000..2067e567c
--- /dev/null
+++ b/vendor/github.com/elazarl/goproxy/LICENSE
@@ -0,0 +1,27 @@
+Copyright (c) 2012 Elazar Leibovich. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Elazar Leibovich. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/elazarl/goproxy/README.md b/vendor/github.com/elazarl/goproxy/README.md
new file mode 100644
index 000000000..2bb2367f6
--- /dev/null
+++ b/vendor/github.com/elazarl/goproxy/README.md
@@ -0,0 +1,168 @@
+# Introduction
+
+[![GoDoc](https://godoc.org/github.com/elazarl/goproxy?status.svg)](https://godoc.org/github.com/elazarl/goproxy)
+[![Join the chat at https://gitter.im/elazarl/goproxy](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/elazarl/goproxy?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
+
+Package goproxy provides a customizable HTTP proxy library for Go (golang),
+
+It supports regular HTTP proxy, HTTPS through CONNECT, and "hijacking" HTTPS
+connection using "Man in the Middle" style attack.
+
+The intent of the proxy is to be usable with reasonable amount of traffic,
+yet customizable and programmable.
+
+The proxy itself is simply a `net/http` handler.
+
+In order to use goproxy, one should set their browser to use goproxy as an HTTP
+proxy. Here is how you do that [in Chrome](https://support.google.com/chrome/answer/96815?hl=en)
+and [in Firefox](http://www.wikihow.com/Enter-Proxy-Settings-in-Firefox).
+
+For example, the URL you should use as proxy when running `./bin/basic` is
+`localhost:8080`, as this is the default binding for the basic proxy.
+
+## Mailing List
+
+New features will be discussed on the [mailing list](https://groups.google.com/forum/#!forum/goproxy-dev)
+before their development.
+
+## Latest Stable Release
+
+Get the latest goproxy from `gopkg.in/elazarl/goproxy.v1`.
+
+# Why not Fiddler2?
+
+Fiddler is an excellent software with similar intent. However, Fiddler is not
+as customizable as goproxy intends to be. The main difference is, Fiddler is not
+intended to be used as a real proxy.
+
+A possible use case that suits goproxy but
+not Fiddler, is gathering statistics on page load times for a certain website over a week.
+With goproxy you could ask all your users to set their proxy to a dedicated machine running a
+goproxy server. Fiddler is a GUI app not designed to be run like a server for multiple users.
+
+# A taste of goproxy
+
+To get a taste of `goproxy`, a basic HTTP/HTTPS transparent proxy
+
+```go
+package main
+
+import (
+ "github.com/elazarl/goproxy"
+ "log"
+ "net/http"
+)
+
+func main() {
+ proxy := goproxy.NewProxyHttpServer()
+ proxy.Verbose = true
+ log.Fatal(http.ListenAndServe(":8080", proxy))
+}
+```
+
+This line will add `X-GoProxy: yxorPoG-X` header to all requests sent through the proxy
+
+```go
+proxy.OnRequest().DoFunc(
+ func(r *http.Request,ctx *goproxy.ProxyCtx)(*http.Request,*http.Response) {
+ r.Header.Set("X-GoProxy","yxorPoG-X")
+ return r,nil
+ })
+```
+
+`DoFunc` will process all incoming requests to the proxy. It will add a header to the request
+and return it. The proxy will send the modified request.
+
+Note that we returned nil value as the response. Had we returned a response, goproxy would
+have discarded the request and sent the new response to the client.
+
+In order to refuse connections to reddit at work time
+
+```go
+proxy.OnRequest(goproxy.DstHostIs("www.reddit.com")).DoFunc(
+ func(r *http.Request,ctx *goproxy.ProxyCtx)(*http.Request,*http.Response) {
+ if h,_,_ := time.Now().Clock(); h >= 8 && h <= 17 {
+ return r,goproxy.NewResponse(r,
+ goproxy.ContentTypeText,http.StatusForbidden,
+ "Don't waste your time!")
+ }
+ return r,nil
+})
+```
+
+`DstHostIs` returns a `ReqCondition`, that is a function receiving a `Request` and returning a boolean.
+We will only process requests that match the condition. `DstHostIs("www.reddit.com")` will return
+a `ReqCondition` accepting only requests directed to "www.reddit.com".
+
+`DoFunc` will receive a function that will preprocess the request. We can change the request, or
+return a response. If the time is between 8:00am and 17:00pm, we will reject the request, and
+return a precanned text response saying "do not waste your time".
+
+See additional examples in the examples directory.
+
+
+# Type of handlers for manipulating connect/req/resp behavior
+
+There are 3 kinds of useful handlers to manipulate the behavior, as follows:
+
+```go
+// handler called after receiving HTTP CONNECT from the client, and before proxy establish connection
+// with destination host
+httpsHandlers []HttpsHandler
+
+// handler called before proxy send HTTP request to destination host
+reqHandlers []ReqHandler
+
+// handler called after proxy receives HTTP Response from destination host, and before proxy forward
+// the Response to the client.
+respHandlers []RespHandler
+```
+
+Depending on what you want to manipulate, the ways to add handlers to each handler list are:
+
+```go
+// Add handlers to httpsHandlers
+proxy.OnRequest(Some ReqConditions).HandleConnect(YourHandlerFunc())
+
+// Add handlers to reqHandlers
+proxy.OnRequest(Some ReqConditions).Do(YourReqHandlerFunc())
+
+// Add handlers to respHandlers
+proxy.OnResponse(Some RespConditions).Do(YourRespHandlerFunc())
+```
+
+For example:
+
+```go
+// This rejects the HTTPS request to *.reddit.com during HTTP CONNECT phase
+proxy.OnRequest(goproxy.ReqHostMatches(regexp.MustCompile("reddit.*:443$"))).HandleConnect(goproxy.RejectConnect)
+
+// This will NOT reject the HTTPS request with URL ending with gif, due to the fact that proxy
+// only got the URL.Hostname and URL.Port during the HTTP CONNECT phase if the scheme is HTTPS, which is
+// quiet common these days.
+proxy.OnRequest(goproxy.UrlMatches(regexp.MustCompile(`.*gif$`))).HandleConnect(goproxy.RejectConnect)
+
+// The correct way to manipulate the HTTP request using URL.Path as condition is:
+proxy.OnRequest(goproxy.UrlMatches(regexp.MustCompile(`.*gif$`))).Do(YourReqHandlerFunc())
+```
+
+# What's New
+
+1. Ability to `Hijack` CONNECT requests. See
+[the eavesdropper example](https://github.com/elazarl/goproxy/blob/master/examples/goproxy-eavesdropper/main.go#L27)
+2. Transparent proxy support for http/https including MITM certificate generation for TLS. See the [transparent example.](https://github.com/elazarl/goproxy/tree/master/examples/goproxy-transparent)
+
+# License
+
+I put the software temporarily under the Go-compatible BSD license.
+If this prevents someone from using the software, do let me know and I'll consider changing it.
+
+At any rate, user feedback is very important for me, so I'll be delighted to know if you're using this package.
+
+# Beta Software
+
+I've received positive feedback from a few people who use goproxy in production settings.
+I believe it is good enough for usage.
+
+I'll try to keep reasonable backwards compatibility. In case of a major API change,
+I'll change the import path.
diff --git a/vendor/github.com/elazarl/goproxy/actions.go b/vendor/github.com/elazarl/goproxy/actions.go
new file mode 100644
index 000000000..e1a3e7ff1
--- /dev/null
+++ b/vendor/github.com/elazarl/goproxy/actions.go
@@ -0,0 +1,57 @@
+package goproxy
+
+import "net/http"
+
+// ReqHandler will "tamper" with the request coming to the proxy server
+// If Handle returns req,nil the proxy will send the returned request
+// to the destination server. If it returns nil,resp the proxy will
+// skip sending any requests, and will simply return the response `resp`
+// to the client.
+type ReqHandler interface {
+ Handle(req *http.Request, ctx *ProxyCtx) (*http.Request, *http.Response)
+}
+
+// A wrapper that would convert a function to a ReqHandler interface type
+type FuncReqHandler func(req *http.Request, ctx *ProxyCtx) (*http.Request, *http.Response)
+
+// FuncReqHandler.Handle(req,ctx) <=> FuncReqHandler(req,ctx)
+func (f FuncReqHandler) Handle(req *http.Request, ctx *ProxyCtx) (*http.Request, *http.Response) {
+ return f(req, ctx)
+}
+
+// after the proxy have sent the request to the destination server, it will
+// "filter" the response through the RespHandlers it has.
+// The proxy server will send to the client the response returned by the RespHandler.
+// In case of error, resp will be nil, and ctx.RoundTrip.Error will contain the error
+type RespHandler interface {
+ Handle(resp *http.Response, ctx *ProxyCtx) *http.Response
+}
+
+// A wrapper that would convert a function to a RespHandler interface type
+type FuncRespHandler func(resp *http.Response, ctx *ProxyCtx) *http.Response
+
+// FuncRespHandler.Handle(req,ctx) <=> FuncRespHandler(req,ctx)
+func (f FuncRespHandler) Handle(resp *http.Response, ctx *ProxyCtx) *http.Response {
+ return f(resp, ctx)
+}
+
+// When a client send a CONNECT request to a host, the request is filtered through
+// all the HttpsHandlers the proxy has, and if one returns true, the connection is
+// sniffed using Man in the Middle attack.
+// That is, the proxy will create a TLS connection with the client, another TLS
+// connection with the destination the client wished to connect to, and would
+// send back and forth all messages from the server to the client and vice versa.
+// The request and responses sent in this Man In the Middle channel are filtered
+// through the usual flow (request and response filtered through the ReqHandlers
+// and RespHandlers)
+type HttpsHandler interface {
+ HandleConnect(req string, ctx *ProxyCtx) (*ConnectAction, string)
+}
+
+// A wrapper that would convert a function to a HttpsHandler interface type
+type FuncHttpsHandler func(host string, ctx *ProxyCtx) (*ConnectAction, string)
+
+// FuncHttpsHandler should implement the RespHandler interface
+func (f FuncHttpsHandler) HandleConnect(host string, ctx *ProxyCtx) (*ConnectAction, string) {
+ return f(host, ctx)
+}
diff --git a/vendor/github.com/elazarl/goproxy/all.bash b/vendor/github.com/elazarl/goproxy/all.bash
new file mode 100644
index 000000000..6503e73dc
--- /dev/null
+++ b/vendor/github.com/elazarl/goproxy/all.bash
@@ -0,0 +1,15 @@
+#!/bin/bash
+
+go test || exit
+for action in $@; do go $action; done
+
+mkdir -p bin
+find regretable examples/* ext/* -maxdepth 0 -type d | while read d; do
+ (cd $d
+ go build -o ../../bin/$(basename $d)
+ find *_test.go -maxdepth 0 2>/dev/null|while read f;do
+ for action in $@; do go $action; done
+ go test
+ break
+ done)
+done
diff --git a/vendor/github.com/elazarl/goproxy/ca.pem b/vendor/github.com/elazarl/goproxy/ca.pem
new file mode 100644
index 000000000..62653dae8
--- /dev/null
+++ b/vendor/github.com/elazarl/goproxy/ca.pem
@@ -0,0 +1,34 @@
+-----BEGIN CERTIFICATE-----
+MIIF9DCCA9ygAwIBAgIJAODqYUwoVjJkMA0GCSqGSIb3DQEBCwUAMIGOMQswCQYD
+VQQGEwJJTDEPMA0GA1UECAwGQ2VudGVyMQwwCgYDVQQHDANMb2QxEDAOBgNVBAoM
+B0dvUHJveHkxEDAOBgNVBAsMB0dvUHJveHkxGjAYBgNVBAMMEWdvcHJveHkuZ2l0
+aHViLmlvMSAwHgYJKoZIhvcNAQkBFhFlbGF6YXJsQGdtYWlsLmNvbTAeFw0xNzA0
+MDUyMDAwMTBaFw0zNzAzMzEyMDAwMTBaMIGOMQswCQYDVQQGEwJJTDEPMA0GA1UE
+CAwGQ2VudGVyMQwwCgYDVQQHDANMb2QxEDAOBgNVBAoMB0dvUHJveHkxEDAOBgNV
+BAsMB0dvUHJveHkxGjAYBgNVBAMMEWdvcHJveHkuZ2l0aHViLmlvMSAwHgYJKoZI
+hvcNAQkBFhFlbGF6YXJsQGdtYWlsLmNvbTCCAiIwDQYJKoZIhvcNAQEBBQADggIP
+ADCCAgoCggIBAJ4Qy+H6hhoY1s0QRcvIhxrjSHaO/RbaFj3rwqcnpOgFq07gRdI9
+3c0TFKQJHpgv6feLRhEvX/YllFYu4J35lM9ZcYY4qlKFuStcX8Jm8fqpgtmAMBzP
+sqtqDi8M9RQGKENzU9IFOnCV7SAeh45scMuI3wz8wrjBcH7zquHkvqUSYZz035t9
+V6WTrHyTEvT4w+lFOVN2bA/6DAIxrjBiF6DhoJqnha0SZtDfv77XpwGG3EhA/qoh
+hiYrDruYK7zJdESQL44LwzMPupVigqalfv+YHfQjbhT951IVurW2NJgRyBE62dLr
+lHYdtT9tCTCrd+KJNMJ+jp9hAjdIu1Br/kifU4F4+4ZLMR9Ueji0GkkPKsYdyMnq
+j0p0PogyvP1l4qmboPImMYtaoFuYmMYlebgC9LN10bL91K4+jLt0I1YntEzrqgJo
+WsJztYDw543NzSy5W+/cq4XRYgtq1b0RWwuUiswezmMoeyHZ8BQJe2xMjAOllASD
+fqa8OK3WABHJpy4zUrnUBiMuPITzD/FuDx4C5IwwlC68gHAZblNqpBZCX0nFCtKj
+YOcI2So5HbQ2OC8QF+zGVuduHUSok4hSy2BBfZ1pfvziqBeetWJwFvapGB44nIHh
+WKNKvqOxLNIy7e+TGRiWOomrAWM18VSR9LZbBxpJK7PLSzWqYJYTRCZHAgMBAAGj
+UzBRMB0GA1UdDgQWBBR4uDD9Y6x7iUoHO+32ioOcw1ICZTAfBgNVHSMEGDAWgBR4
+uDD9Y6x7iUoHO+32ioOcw1ICZTAPBgNVHRMBAf8EBTADAQH/MA0GCSqGSIb3DQEB
+CwUAA4ICAQAaCEupzGGqcdh+L7BzhX7zyd7yzAKUoLxFrxaZY34Xyj3lcx1XoK6F
+AqsH2JM25GixgadzhNt92JP7vzoWeHZtLfstrPS638Y1zZi6toy4E49viYjFk5J0
+C6ZcFC04VYWWx6z0HwJuAS08tZ37JuFXpJGfXJOjZCQyxse0Lg0tuKLMeXDCk2Y3
+Ba0noeuNyHRoWXXPyiUoeApkVCU5gIsyiJSWOjhJ5hpJG06rQNfNYexgKrrraEin
+o0jmEMtJMx5TtD83hSnLCnFGBBq5lkE7jgXME1KsbIE3lJZzRX1mQwUK8CJDYxye
+i6M/dzSvy0SsPvz8fTAlprXRtWWtJQmxgWENp3Dv+0Pmux/l+ilk7KA4sMXGhsfr
+bvTOeWl1/uoFTPYiWR/ww7QEPLq23yDFY04Q7Un0qjIk8ExvaY8lCkXMgc8i7sGY
+VfvOYb0zm67EfAQl3TW8Ky5fl5CcxpVCD360Bzi6hwjYixa3qEeBggOixFQBFWft
+8wrkKTHpOQXjn4sDPtet8imm9UYEtzWrFX6T9MFYkBR0/yye0FIh9+YPiTA6WB86
+NCNwK5Yl6HuvF97CIH5CdgO+5C7KifUtqTOL8pQKbNwy0S3sNYvB+njGvRpR7pKV
+BUnFpB/Atptqr4CUlTXrc5IPLAqAfmwk5IKcwy3EXUbruf9Dwz69YA==
+-----END CERTIFICATE-----
diff --git a/vendor/github.com/elazarl/goproxy/certs.go b/vendor/github.com/elazarl/goproxy/certs.go
new file mode 100644
index 000000000..4731971e7
--- /dev/null
+++ b/vendor/github.com/elazarl/goproxy/certs.go
@@ -0,0 +1,111 @@
+package goproxy
+
+import (
+ "crypto/tls"
+ "crypto/x509"
+)
+
+func init() {
+ if goproxyCaErr != nil {
+ panic("Error parsing builtin CA " + goproxyCaErr.Error())
+ }
+ var err error
+ if GoproxyCa.Leaf, err = x509.ParseCertificate(GoproxyCa.Certificate[0]); err != nil {
+ panic("Error parsing builtin CA " + err.Error())
+ }
+}
+
+var tlsClientSkipVerify = &tls.Config{InsecureSkipVerify: true}
+
+var defaultTLSConfig = &tls.Config{
+ InsecureSkipVerify: true,
+}
+
+var CA_CERT = []byte(`-----BEGIN CERTIFICATE-----
+MIIF9DCCA9ygAwIBAgIJAODqYUwoVjJkMA0GCSqGSIb3DQEBCwUAMIGOMQswCQYD
+VQQGEwJJTDEPMA0GA1UECAwGQ2VudGVyMQwwCgYDVQQHDANMb2QxEDAOBgNVBAoM
+B0dvUHJveHkxEDAOBgNVBAsMB0dvUHJveHkxGjAYBgNVBAMMEWdvcHJveHkuZ2l0
+aHViLmlvMSAwHgYJKoZIhvcNAQkBFhFlbGF6YXJsQGdtYWlsLmNvbTAeFw0xNzA0
+MDUyMDAwMTBaFw0zNzAzMzEyMDAwMTBaMIGOMQswCQYDVQQGEwJJTDEPMA0GA1UE
+CAwGQ2VudGVyMQwwCgYDVQQHDANMb2QxEDAOBgNVBAoMB0dvUHJveHkxEDAOBgNV
+BAsMB0dvUHJveHkxGjAYBgNVBAMMEWdvcHJveHkuZ2l0aHViLmlvMSAwHgYJKoZI
+hvcNAQkBFhFlbGF6YXJsQGdtYWlsLmNvbTCCAiIwDQYJKoZIhvcNAQEBBQADggIP
+ADCCAgoCggIBAJ4Qy+H6hhoY1s0QRcvIhxrjSHaO/RbaFj3rwqcnpOgFq07gRdI9
+3c0TFKQJHpgv6feLRhEvX/YllFYu4J35lM9ZcYY4qlKFuStcX8Jm8fqpgtmAMBzP
+sqtqDi8M9RQGKENzU9IFOnCV7SAeh45scMuI3wz8wrjBcH7zquHkvqUSYZz035t9
+V6WTrHyTEvT4w+lFOVN2bA/6DAIxrjBiF6DhoJqnha0SZtDfv77XpwGG3EhA/qoh
+hiYrDruYK7zJdESQL44LwzMPupVigqalfv+YHfQjbhT951IVurW2NJgRyBE62dLr
+lHYdtT9tCTCrd+KJNMJ+jp9hAjdIu1Br/kifU4F4+4ZLMR9Ueji0GkkPKsYdyMnq
+j0p0PogyvP1l4qmboPImMYtaoFuYmMYlebgC9LN10bL91K4+jLt0I1YntEzrqgJo
+WsJztYDw543NzSy5W+/cq4XRYgtq1b0RWwuUiswezmMoeyHZ8BQJe2xMjAOllASD
+fqa8OK3WABHJpy4zUrnUBiMuPITzD/FuDx4C5IwwlC68gHAZblNqpBZCX0nFCtKj
+YOcI2So5HbQ2OC8QF+zGVuduHUSok4hSy2BBfZ1pfvziqBeetWJwFvapGB44nIHh
+WKNKvqOxLNIy7e+TGRiWOomrAWM18VSR9LZbBxpJK7PLSzWqYJYTRCZHAgMBAAGj
+UzBRMB0GA1UdDgQWBBR4uDD9Y6x7iUoHO+32ioOcw1ICZTAfBgNVHSMEGDAWgBR4
+uDD9Y6x7iUoHO+32ioOcw1ICZTAPBgNVHRMBAf8EBTADAQH/MA0GCSqGSIb3DQEB
+CwUAA4ICAQAaCEupzGGqcdh+L7BzhX7zyd7yzAKUoLxFrxaZY34Xyj3lcx1XoK6F
+AqsH2JM25GixgadzhNt92JP7vzoWeHZtLfstrPS638Y1zZi6toy4E49viYjFk5J0
+C6ZcFC04VYWWx6z0HwJuAS08tZ37JuFXpJGfXJOjZCQyxse0Lg0tuKLMeXDCk2Y3
+Ba0noeuNyHRoWXXPyiUoeApkVCU5gIsyiJSWOjhJ5hpJG06rQNfNYexgKrrraEin
+o0jmEMtJMx5TtD83hSnLCnFGBBq5lkE7jgXME1KsbIE3lJZzRX1mQwUK8CJDYxye
+i6M/dzSvy0SsPvz8fTAlprXRtWWtJQmxgWENp3Dv+0Pmux/l+ilk7KA4sMXGhsfr
+bvTOeWl1/uoFTPYiWR/ww7QEPLq23yDFY04Q7Un0qjIk8ExvaY8lCkXMgc8i7sGY
+VfvOYb0zm67EfAQl3TW8Ky5fl5CcxpVCD360Bzi6hwjYixa3qEeBggOixFQBFWft
+8wrkKTHpOQXjn4sDPtet8imm9UYEtzWrFX6T9MFYkBR0/yye0FIh9+YPiTA6WB86
+NCNwK5Yl6HuvF97CIH5CdgO+5C7KifUtqTOL8pQKbNwy0S3sNYvB+njGvRpR7pKV
+BUnFpB/Atptqr4CUlTXrc5IPLAqAfmwk5IKcwy3EXUbruf9Dwz69YA==
+-----END CERTIFICATE-----`)
+
+var CA_KEY = []byte(`-----BEGIN RSA PRIVATE KEY-----
+MIIJKAIBAAKCAgEAnhDL4fqGGhjWzRBFy8iHGuNIdo79FtoWPevCpyek6AWrTuBF
+0j3dzRMUpAkemC/p94tGES9f9iWUVi7gnfmUz1lxhjiqUoW5K1xfwmbx+qmC2YAw
+HM+yq2oOLwz1FAYoQ3NT0gU6cJXtIB6Hjmxwy4jfDPzCuMFwfvOq4eS+pRJhnPTf
+m31XpZOsfJMS9PjD6UU5U3ZsD/oMAjGuMGIXoOGgmqeFrRJm0N+/vtenAYbcSED+
+qiGGJisOu5grvMl0RJAvjgvDMw+6lWKCpqV+/5gd9CNuFP3nUhW6tbY0mBHIETrZ
+0uuUdh21P20JMKt34ok0wn6On2ECN0i7UGv+SJ9TgXj7hksxH1R6OLQaSQ8qxh3I
+yeqPSnQ+iDK8/WXiqZug8iYxi1qgW5iYxiV5uAL0s3XRsv3Urj6Mu3QjVie0TOuq
+AmhawnO1gPDnjc3NLLlb79yrhdFiC2rVvRFbC5SKzB7OYyh7IdnwFAl7bEyMA6WU
+BIN+prw4rdYAEcmnLjNSudQGIy48hPMP8W4PHgLkjDCULryAcBluU2qkFkJfScUK
+0qNg5wjZKjkdtDY4LxAX7MZW524dRKiTiFLLYEF9nWl+/OKoF561YnAW9qkYHjic
+geFYo0q+o7Es0jLt75MZGJY6iasBYzXxVJH0tlsHGkkrs8tLNapglhNEJkcCAwEA
+AQKCAgAwSuNvxHHqUUJ3XoxkiXy1u1EtX9x1eeYnvvs2xMb+WJURQTYz2NEGUdkR
+kPO2/ZSXHAcpQvcnpi2e8y2PNmy/uQ0VPATVt6NuWweqxncR5W5j82U/uDlXY8y3
+lVbfak4s5XRri0tikHvlP06dNgZ0OPok5qi7d+Zd8yZ3Y8LXfjkykiIrSG1Z2jdt
+zCWTkNmSUKMGG/1CGFxI41Lb12xuq+C8v4f469Fb6bCUpyCQN9rffHQSGLH6wVb7
++68JO+d49zCATpmx5RFViMZwEcouXxRvvc9pPHXLP3ZPBD8nYu9kTD220mEGgWcZ
+3L9dDlZPcSocbjw295WMvHz2QjhrDrb8gXwdpoRyuyofqgCyNxSnEC5M13SjOxtf
+pjGzjTqh0kDlKXg2/eTkd9xIHjVhFYiHIEeITM/lHCfWwBCYxViuuF7pSRPzTe8U
+C440b62qZSPMjVoquaMg+qx0n9fKSo6n1FIKHypv3Kue2G0WhDeK6u0U288vQ1t4
+Ood3Qa13gZ+9hwDLbM/AoBfVBDlP/tpAwa7AIIU1ZRDNbZr7emFdctx9B6kLINv3
+4PDOGM2xrjOuACSGMq8Zcu7LBz35PpIZtviJOeKNwUd8/xHjWC6W0itgfJb5I1Nm
+V6Vj368pGlJx6Se26lvXwyyrc9pSw6jSAwARBeU4YkNWpi4i6QKCAQEA0T7u3P/9
+jZJSnDN1o2PXymDrJulE61yguhc/QSmLccEPZe7or06/DmEhhKuCbv+1MswKDeag
+/1JdFPGhL2+4G/f/9BK3BJPdcOZSz7K6Ty8AMMBf8AehKTcSBqwkJWcbEvpHpKJ6
+eDqn1B6brXTNKMT6fEEXCuZJGPBpNidyLv/xXDcN7kCOo3nGYKfB5OhFpNiL63tw
++LntU56WESZwEqr8Pf80uFvsyXQK3a5q5HhIQtxl6tqQuPlNjsDBvCqj0x72mmaJ
+ZVsVWlv7khUrCwAXz7Y8K7mKKBd2ekF5hSbryfJsxFyvEaWUPhnJpTKV85lAS+tt
+FQuIp9TvKYlRQwKCAQEAwWJN8jysapdhi67jO0HtYOEl9wwnF4w6XtiOYtllkMmC
+06/e9h7RsRyWPMdu3qRDPUYFaVDy6+dpUDSQ0+E2Ot6AHtVyvjeUTIL651mFIo/7
+OSUCEc+HRo3SfPXdPhSQ2thNTxl6y9XcFacuvbthgr70KXbvC4k6IEmdpf/0Kgs9
+7QTZCG26HDrEZ2q9yMRlRaL2SRD+7Y2xra7gB+cQGFj6yn0Wd/07er49RqMXidQf
+KR2oYfev2BDtHXoSZFfhFGHlOdLvWRh90D4qZf4vQ+g/EIMgcNSoxjvph1EShmKt
+sjhTHtoHuu+XmEQvIewk2oCI+JvofBkcnpFrVvUUrQKCAQAaTIufETmgCo0BfuJB
+N/JOSGIl0NnNryWwXe2gVgVltbsmt6FdL0uKFiEtWJUbOF5g1Q5Kcvs3O/XhBQGa
+QbNlKIVt+tAv7hm97+Tmn/MUsraWagdk1sCluns0hXxBizT27KgGhDlaVRz05yfv
+5CdJAYDuDwxDXXBAhy7iFJEgYSDH00+X61tCJrMNQOh4ycy/DEyBu1EWod+3S85W
+t3sMjZsIe8P3i+4137Th6eMbdha2+JaCrxfTd9oMoCN5b+6JQXIDM/H+4DTN15PF
+540yY7+aZrAnWrmHknNcqFAKsTqfdi2/fFqwoBwCtiEG91WreU6AfEWIiJuTZIru
+sIibAoIBAAqIwlo5t+KukF+9jR9DPh0S5rCIdvCvcNaN0WPNF91FPN0vLWQW1bFi
+L0TsUDvMkuUZlV3hTPpQxsnZszH3iK64RB5p3jBCcs+gKu7DT59MXJEGVRCHT4Um
+YJryAbVKBYIGWl++sZO8+JotWzx2op8uq7o+glMMjKAJoo7SXIiVyC/LHc95urOi
+9+PySphPKn0anXPpexmRqGYfqpCDo7rPzgmNutWac80B4/CfHb8iUPg6Z1u+1FNe
+yKvcZHgW2Wn00znNJcCitufLGyAnMofudND/c5rx2qfBx7zZS7sKUQ/uRYjes6EZ
+QBbJUA/2/yLv8YYpaAaqj4aLwV8hRpkCggEBAIh3e25tr3avCdGgtCxS7Y1blQ2c
+ue4erZKmFP1u8wTNHQ03T6sECZbnIfEywRD/esHpclfF3kYAKDRqIP4K905Rb0iH
+759ZWt2iCbqZznf50XTvptdmjm5KxvouJzScnQ52gIV6L+QrCKIPelLBEIqCJREh
+pmcjjocD/UCCSuHgbAYNNnO/JdhnSylz1tIg26I+2iLNyeTKIepSNlsBxnkLmqM1
+cj/azKBaT04IOMLaN8xfSqitJYSraWMVNgGJM5vfcVaivZnNh0lZBv+qu6YkdM88
+4/avCJ8IutT+FcMM+GbGazOm5ALWqUyhrnbLGc4CQMPfe7Il6NxwcrOxT8w=
+-----END RSA PRIVATE KEY-----`)
+
+var GoproxyCa, goproxyCaErr = tls.X509KeyPair(CA_CERT, CA_KEY)
diff --git a/vendor/github.com/elazarl/goproxy/chunked.go b/vendor/github.com/elazarl/goproxy/chunked.go
new file mode 100644
index 000000000..83654f658
--- /dev/null
+++ b/vendor/github.com/elazarl/goproxy/chunked.go
@@ -0,0 +1,59 @@
+// Taken from $GOROOT/src/pkg/net/http/chunked
+// needed to write https responses to client.
+package goproxy
+
+import (
+ "io"
+ "strconv"
+)
+
+// newChunkedWriter returns a new chunkedWriter that translates writes into HTTP
+// "chunked" format before writing them to w. Closing the returned chunkedWriter
+// sends the final 0-length chunk that marks the end of the stream.
+//
+// newChunkedWriter is not needed by normal applications. The http
+// package adds chunking automatically if handlers don't set a
+// Content-Length header. Using newChunkedWriter inside a handler
+// would result in double chunking or chunking with a Content-Length
+// length, both of which are wrong.
+func newChunkedWriter(w io.Writer) io.WriteCloser {
+ return &chunkedWriter{w}
+}
+
+// Writing to chunkedWriter translates to writing in HTTP chunked Transfer
+// Encoding wire format to the underlying Wire chunkedWriter.
+type chunkedWriter struct {
+ Wire io.Writer
+}
+
+// Write the contents of data as one chunk to Wire.
+// NOTE: Note that the corresponding chunk-writing procedure in Conn.Write has
+// a bug since it does not check for success of io.WriteString
+func (cw *chunkedWriter) Write(data []byte) (n int, err error) {
+
+ // Don't send 0-length data. It looks like EOF for chunked encoding.
+ if len(data) == 0 {
+ return 0, nil
+ }
+
+ head := strconv.FormatInt(int64(len(data)), 16) + "\r\n"
+
+ if _, err = io.WriteString(cw.Wire, head); err != nil {
+ return 0, err
+ }
+ if n, err = cw.Wire.Write(data); err != nil {
+ return
+ }
+ if n != len(data) {
+ err = io.ErrShortWrite
+ return
+ }
+ _, err = io.WriteString(cw.Wire, "\r\n")
+
+ return
+}
+
+func (cw *chunkedWriter) Close() error {
+ _, err := io.WriteString(cw.Wire, "0\r\n")
+ return err
+}
diff --git a/vendor/github.com/elazarl/goproxy/counterecryptor.go b/vendor/github.com/elazarl/goproxy/counterecryptor.go
new file mode 100644
index 000000000..494e7a4fe
--- /dev/null
+++ b/vendor/github.com/elazarl/goproxy/counterecryptor.go
@@ -0,0 +1,68 @@
+package goproxy
+
+import (
+ "crypto/aes"
+ "crypto/cipher"
+ "crypto/rsa"
+ "crypto/sha256"
+ "crypto/x509"
+ "errors"
+)
+
+type CounterEncryptorRand struct {
+ cipher cipher.Block
+ counter []byte
+ rand []byte
+ ix int
+}
+
+func NewCounterEncryptorRandFromKey(key interface{}, seed []byte) (r CounterEncryptorRand, err error) {
+ var keyBytes []byte
+ switch key := key.(type) {
+ case *rsa.PrivateKey:
+ keyBytes = x509.MarshalPKCS1PrivateKey(key)
+ default:
+ err = errors.New("only RSA keys supported")
+ return
+ }
+ h := sha256.New()
+ if r.cipher, err = aes.NewCipher(h.Sum(keyBytes)[:aes.BlockSize]); err != nil {
+ return
+ }
+ r.counter = make([]byte, r.cipher.BlockSize())
+ if seed != nil {
+ copy(r.counter, h.Sum(seed)[:r.cipher.BlockSize()])
+ }
+ r.rand = make([]byte, r.cipher.BlockSize())
+ r.ix = len(r.rand)
+ return
+}
+
+func (c *CounterEncryptorRand) Seed(b []byte) {
+ if len(b) != len(c.counter) {
+ panic("SetCounter: wrong counter size")
+ }
+ copy(c.counter, b)
+}
+
+func (c *CounterEncryptorRand) refill() {
+ c.cipher.Encrypt(c.rand, c.counter)
+ for i := 0; i < len(c.counter); i++ {
+ if c.counter[i]++; c.counter[i] != 0 {
+ break
+ }
+ }
+ c.ix = 0
+}
+
+func (c *CounterEncryptorRand) Read(b []byte) (n int, err error) {
+ if c.ix == len(c.rand) {
+ c.refill()
+ }
+ if n = len(c.rand) - c.ix; n > len(b) {
+ n = len(b)
+ }
+ copy(b, c.rand[c.ix:c.ix+n])
+ c.ix += n
+ return
+}
diff --git a/vendor/github.com/elazarl/goproxy/ctx.go b/vendor/github.com/elazarl/goproxy/ctx.go
new file mode 100644
index 000000000..86162deb7
--- /dev/null
+++ b/vendor/github.com/elazarl/goproxy/ctx.go
@@ -0,0 +1,93 @@
+package goproxy
+
+import (
+ "crypto/tls"
+ "net/http"
+ "regexp"
+)
+
+// ProxyCtx is the Proxy context, contains useful information about every request. It is passed to
+// every user function. Also used as a logger.
+type ProxyCtx struct {
+ // Will contain the client request from the proxy
+ Req *http.Request
+ // Will contain the remote server's response (if available. nil if the request wasn't send yet)
+ Resp *http.Response
+ RoundTripper RoundTripper
+ // will contain the recent error that occurred while trying to send receive or parse traffic
+ Error error
+ // A handle for the user to keep data in the context, from the call of ReqHandler to the
+ // call of RespHandler
+ UserData interface{}
+ // Will connect a request to a response
+ Session int64
+ certStore CertStorage
+ proxy *ProxyHttpServer
+}
+
+type RoundTripper interface {
+ RoundTrip(req *http.Request, ctx *ProxyCtx) (*http.Response, error)
+}
+
+type CertStorage interface {
+ Fetch(hostname string, gen func() (*tls.Certificate, error)) (*tls.Certificate, error)
+}
+
+type RoundTripperFunc func(req *http.Request, ctx *ProxyCtx) (*http.Response, error)
+
+func (f RoundTripperFunc) RoundTrip(req *http.Request, ctx *ProxyCtx) (*http.Response, error) {
+ return f(req, ctx)
+}
+
+func (ctx *ProxyCtx) RoundTrip(req *http.Request) (*http.Response, error) {
+ if ctx.RoundTripper != nil {
+ return ctx.RoundTripper.RoundTrip(req, ctx)
+ }
+ return ctx.proxy.Tr.RoundTrip(req)
+}
+
+func (ctx *ProxyCtx) printf(msg string, argv ...interface{}) {
+ ctx.proxy.Logger.Printf("[%03d] "+msg+"\n", append([]interface{}{ctx.Session & 0xFF}, argv...)...)
+}
+
+// Logf prints a message to the proxy's log. Should be used in a ProxyHttpServer's filter
+// This message will be printed only if the Verbose field of the ProxyHttpServer is set to true
+//
+// proxy.OnRequest().DoFunc(func(r *http.Request,ctx *goproxy.ProxyCtx) (*http.Request, *http.Response){
+// nr := atomic.AddInt32(&counter,1)
+// ctx.Printf("So far %d requests",nr)
+// return r, nil
+// })
+func (ctx *ProxyCtx) Logf(msg string, argv ...interface{}) {
+ if ctx.proxy.Verbose {
+ ctx.printf("INFO: "+msg, argv...)
+ }
+}
+
+// Warnf prints a message to the proxy's log. Should be used in a ProxyHttpServer's filter
+// This message will always be printed.
+//
+// proxy.OnRequest().DoFunc(func(r *http.Request,ctx *goproxy.ProxyCtx) (*http.Request, *http.Response){
+// f,err := os.OpenFile(cachedContent)
+// if err != nil {
+// ctx.Warnf("error open file %v: %v",cachedContent,err)
+// return r, nil
+// }
+// return r, nil
+// })
+func (ctx *ProxyCtx) Warnf(msg string, argv ...interface{}) {
+ ctx.printf("WARN: "+msg, argv...)
+}
+
+var charsetFinder = regexp.MustCompile("charset=([^ ;]*)")
+
+// Will try to infer the character set of the request from the headers.
+// Returns the empty string if we don't know which character set it used.
+// Currently it will look for charset= in the Content-Type header of the request.
+func (ctx *ProxyCtx) Charset() string {
+ charsets := charsetFinder.FindStringSubmatch(ctx.Resp.Header.Get("Content-Type"))
+ if charsets == nil {
+ return ""
+ }
+ return charsets[1]
+}
diff --git a/vendor/github.com/elazarl/goproxy/dispatcher.go b/vendor/github.com/elazarl/goproxy/dispatcher.go
new file mode 100644
index 000000000..4e7c9cb9d
--- /dev/null
+++ b/vendor/github.com/elazarl/goproxy/dispatcher.go
@@ -0,0 +1,325 @@
+package goproxy
+
+import (
+ "bytes"
+ "io/ioutil"
+ "net"
+ "net/http"
+ "regexp"
+ "strings"
+)
+
+// ReqCondition.HandleReq will decide whether or not to use the ReqHandler on an HTTP request
+// before sending it to the remote server
+type ReqCondition interface {
+ RespCondition
+ HandleReq(req *http.Request, ctx *ProxyCtx) bool
+}
+
+// RespCondition.HandleReq will decide whether or not to use the RespHandler on an HTTP response
+// before sending it to the proxy client. Note that resp might be nil, in case there was an
+// error sending the request.
+type RespCondition interface {
+ HandleResp(resp *http.Response, ctx *ProxyCtx) bool
+}
+
+// ReqConditionFunc.HandleReq(req,ctx) <=> ReqConditionFunc(req,ctx)
+type ReqConditionFunc func(req *http.Request, ctx *ProxyCtx) bool
+
+// RespConditionFunc.HandleResp(resp,ctx) <=> RespConditionFunc(resp,ctx)
+type RespConditionFunc func(resp *http.Response, ctx *ProxyCtx) bool
+
+func (c ReqConditionFunc) HandleReq(req *http.Request, ctx *ProxyCtx) bool {
+ return c(req, ctx)
+}
+
+// ReqConditionFunc cannot test responses. It only satisfies RespCondition interface so that
+// to be usable as RespCondition.
+func (c ReqConditionFunc) HandleResp(resp *http.Response, ctx *ProxyCtx) bool {
+ return c(ctx.Req, ctx)
+}
+
+func (c RespConditionFunc) HandleResp(resp *http.Response, ctx *ProxyCtx) bool {
+ return c(resp, ctx)
+}
+
+// UrlHasPrefix returns a ReqCondition checking wether the destination URL the proxy client has requested
+// has the given prefix, with or without the host.
+// For example UrlHasPrefix("host/x") will match requests of the form 'GET host/x', and will match
+// requests to url 'http://host/x'
+func UrlHasPrefix(prefix string) ReqConditionFunc {
+ return func(req *http.Request, ctx *ProxyCtx) bool {
+ return strings.HasPrefix(req.URL.Path, prefix) ||
+ strings.HasPrefix(req.URL.Host+req.URL.Path, prefix) ||
+ strings.HasPrefix(req.URL.Scheme+req.URL.Host+req.URL.Path, prefix)
+ }
+}
+
+// UrlIs returns a ReqCondition, testing whether or not the request URL is one of the given strings
+// with or without the host prefix.
+// UrlIs("google.com/","foo") will match requests 'GET /' to 'google.com', requests `'GET google.com/' to
+// any host, and requests of the form 'GET foo'.
+func UrlIs(urls ...string) ReqConditionFunc {
+ urlSet := make(map[string]bool)
+ for _, u := range urls {
+ urlSet[u] = true
+ }
+ return func(req *http.Request, ctx *ProxyCtx) bool {
+ _, pathOk := urlSet[req.URL.Path]
+ _, hostAndOk := urlSet[req.URL.Host+req.URL.Path]
+ return pathOk || hostAndOk
+ }
+}
+
+// ReqHostMatches returns a ReqCondition, testing whether the host to which the request was directed to matches
+// any of the given regular expressions.
+func ReqHostMatches(regexps ...*regexp.Regexp) ReqConditionFunc {
+ return func(req *http.Request, ctx *ProxyCtx) bool {
+ for _, re := range regexps {
+ if re.MatchString(req.Host) {
+ return true
+ }
+ }
+ return false
+ }
+}
+
+// ReqHostIs returns a ReqCondition, testing whether the host to which the request is directed to equal
+// to one of the given strings
+func ReqHostIs(hosts ...string) ReqConditionFunc {
+ hostSet := make(map[string]bool)
+ for _, h := range hosts {
+ hostSet[h] = true
+ }
+ return func(req *http.Request, ctx *ProxyCtx) bool {
+ _, ok := hostSet[req.URL.Host]
+ return ok
+ }
+}
+
+var localHostIpv4 = regexp.MustCompile(`127\.0\.0\.\d+`)
+
+// IsLocalHost checks whether the destination host is explicitly local host
+// (buggy, there can be IPv6 addresses it doesn't catch)
+var IsLocalHost ReqConditionFunc = func(req *http.Request, ctx *ProxyCtx) bool {
+ return req.URL.Host == "::1" ||
+ req.URL.Host == "0:0:0:0:0:0:0:1" ||
+ localHostIpv4.MatchString(req.URL.Host) ||
+ req.URL.Host == "localhost"
+}
+
+// UrlMatches returns a ReqCondition testing whether the destination URL
+// of the request matches the given regexp, with or without prefix
+func UrlMatches(re *regexp.Regexp) ReqConditionFunc {
+ return func(req *http.Request, ctx *ProxyCtx) bool {
+ return re.MatchString(req.URL.Path) ||
+ re.MatchString(req.URL.Host+req.URL.Path)
+ }
+}
+
+// DstHostIs returns a ReqCondition testing wether the host in the request url is the given string
+func DstHostIs(host string) ReqConditionFunc {
+ return func(req *http.Request, ctx *ProxyCtx) bool {
+ return req.URL.Host == host
+ }
+}
+
+// SrcIpIs returns a ReqCondition testing whether the source IP of the request is one of the given strings
+func SrcIpIs(ips ...string) ReqCondition {
+ return ReqConditionFunc(func(req *http.Request, ctx *ProxyCtx) bool {
+ for _, ip := range ips {
+ if strings.HasPrefix(req.RemoteAddr, ip+":") {
+ return true
+ }
+ }
+ return false
+ })
+}
+
+// Not returns a ReqCondition negating the given ReqCondition
+func Not(r ReqCondition) ReqConditionFunc {
+ return func(req *http.Request, ctx *ProxyCtx) bool {
+ return !r.HandleReq(req, ctx)
+ }
+}
+
+// ContentTypeIs returns a RespCondition testing whether the HTTP response has Content-Type header equal
+// to one of the given strings.
+func ContentTypeIs(typ string, types ...string) RespCondition {
+ types = append(types, typ)
+ return RespConditionFunc(func(resp *http.Response, ctx *ProxyCtx) bool {
+ if resp == nil {
+ return false
+ }
+ contentType := resp.Header.Get("Content-Type")
+ for _, typ := range types {
+ if contentType == typ || strings.HasPrefix(contentType, typ+";") {
+ return true
+ }
+ }
+ return false
+ })
+}
+
+// ProxyHttpServer.OnRequest Will return a temporary ReqProxyConds struct, aggregating the given condtions.
+// You will use the ReqProxyConds struct to register a ReqHandler, that would filter
+// the request, only if all the given ReqCondition matched.
+// Typical usage:
+// proxy.OnRequest(UrlIs("example.com/foo"),UrlMatches(regexp.MustParse(`.*\.exampl.\com\./.*`)).Do(...)
+func (proxy *ProxyHttpServer) OnRequest(conds ...ReqCondition) *ReqProxyConds {
+ return &ReqProxyConds{proxy, conds}
+}
+
+// ReqProxyConds aggregate ReqConditions for a ProxyHttpServer. Upon calling Do, it will register a ReqHandler that would
+// handle the request if all conditions on the HTTP request are met.
+type ReqProxyConds struct {
+ proxy *ProxyHttpServer
+ reqConds []ReqCondition
+}
+
+// DoFunc is equivalent to proxy.OnRequest().Do(FuncReqHandler(f))
+func (pcond *ReqProxyConds) DoFunc(f func(req *http.Request, ctx *ProxyCtx) (*http.Request, *http.Response)) {
+ pcond.Do(FuncReqHandler(f))
+}
+
+// ReqProxyConds.Do will register the ReqHandler on the proxy,
+// the ReqHandler will handle the HTTP request if all the conditions
+// aggregated in the ReqProxyConds are met. Typical usage:
+// proxy.OnRequest().Do(handler) // will call handler.Handle(req,ctx) on every request to the proxy
+// proxy.OnRequest(cond1,cond2).Do(handler)
+// // given request to the proxy, will test if cond1.HandleReq(req,ctx) && cond2.HandleReq(req,ctx) are true
+// // if they are, will call handler.Handle(req,ctx)
+func (pcond *ReqProxyConds) Do(h ReqHandler) {
+ pcond.proxy.reqHandlers = append(pcond.proxy.reqHandlers,
+ FuncReqHandler(func(r *http.Request, ctx *ProxyCtx) (*http.Request, *http.Response) {
+ for _, cond := range pcond.reqConds {
+ if !cond.HandleReq(r, ctx) {
+ return r, nil
+ }
+ }
+ return h.Handle(r, ctx)
+ }))
+}
+
+// HandleConnect is used when proxy receives an HTTP CONNECT request,
+// it'll then use the HttpsHandler to determine what should it
+// do with this request. The handler returns a ConnectAction struct, the Action field in the ConnectAction
+// struct returned will determine what to do with this request. ConnectAccept will simply accept the request
+// forwarding all bytes from the client to the remote host, ConnectReject will close the connection with the
+// client, and ConnectMitm, will assume the underlying connection is an HTTPS connection, and will use Man
+// in the Middle attack to eavesdrop the connection. All regular handler will be active on this eavesdropped
+// connection.
+// The ConnectAction struct contains possible tlsConfig that will be used for eavesdropping. If nil, the proxy
+// will use the default tls configuration.
+// proxy.OnRequest().HandleConnect(goproxy.AlwaysReject) // rejects all CONNECT requests
+func (pcond *ReqProxyConds) HandleConnect(h HttpsHandler) {
+ pcond.proxy.httpsHandlers = append(pcond.proxy.httpsHandlers,
+ FuncHttpsHandler(func(host string, ctx *ProxyCtx) (*ConnectAction, string) {
+ for _, cond := range pcond.reqConds {
+ if !cond.HandleReq(ctx.Req, ctx) {
+ return nil, ""
+ }
+ }
+ return h.HandleConnect(host, ctx)
+ }))
+}
+
+// HandleConnectFunc is equivalent to HandleConnect,
+// for example, accepting CONNECT request if they contain a password in header
+// io.WriteString(h,password)
+// passHash := h.Sum(nil)
+// proxy.OnRequest().HandleConnectFunc(func(host string, ctx *ProxyCtx) (*ConnectAction, string) {
+// c := sha1.New()
+// io.WriteString(c,ctx.Req.Header.Get("X-GoProxy-Auth"))
+// if c.Sum(nil) == passHash {
+// return OkConnect, host
+// }
+// return RejectConnect, host
+// })
+func (pcond *ReqProxyConds) HandleConnectFunc(f func(host string, ctx *ProxyCtx) (*ConnectAction, string)) {
+ pcond.HandleConnect(FuncHttpsHandler(f))
+}
+
+func (pcond *ReqProxyConds) HijackConnect(f func(req *http.Request, client net.Conn, ctx *ProxyCtx)) {
+ pcond.proxy.httpsHandlers = append(pcond.proxy.httpsHandlers,
+ FuncHttpsHandler(func(host string, ctx *ProxyCtx) (*ConnectAction, string) {
+ for _, cond := range pcond.reqConds {
+ if !cond.HandleReq(ctx.Req, ctx) {
+ return nil, ""
+ }
+ }
+ return &ConnectAction{Action: ConnectHijack, Hijack: f}, host
+ }))
+}
+
+// ProxyConds is used to aggregate RespConditions for a ProxyHttpServer.
+// Upon calling ProxyConds.Do, it will register a RespHandler that would
+// handle the HTTP response from remote server if all conditions on the HTTP response are met.
+type ProxyConds struct {
+ proxy *ProxyHttpServer
+ reqConds []ReqCondition
+ respCond []RespCondition
+}
+
+// ProxyConds.DoFunc is equivalent to proxy.OnResponse().Do(FuncRespHandler(f))
+func (pcond *ProxyConds) DoFunc(f func(resp *http.Response, ctx *ProxyCtx) *http.Response) {
+ pcond.Do(FuncRespHandler(f))
+}
+
+// ProxyConds.Do will register the RespHandler on the proxy, h.Handle(resp,ctx) will be called on every
+// request that matches the conditions aggregated in pcond.
+func (pcond *ProxyConds) Do(h RespHandler) {
+ pcond.proxy.respHandlers = append(pcond.proxy.respHandlers,
+ FuncRespHandler(func(resp *http.Response, ctx *ProxyCtx) *http.Response {
+ for _, cond := range pcond.reqConds {
+ if !cond.HandleReq(ctx.Req, ctx) {
+ return resp
+ }
+ }
+ for _, cond := range pcond.respCond {
+ if !cond.HandleResp(resp, ctx) {
+ return resp
+ }
+ }
+ return h.Handle(resp, ctx)
+ }))
+}
+
+// OnResponse is used when adding a response-filter to the HTTP proxy, usual pattern is
+// proxy.OnResponse(cond1,cond2).Do(handler) // handler.Handle(resp,ctx) will be used
+// // if cond1.HandleResp(resp) && cond2.HandleResp(resp)
+func (proxy *ProxyHttpServer) OnResponse(conds ...RespCondition) *ProxyConds {
+ return &ProxyConds{proxy, make([]ReqCondition, 0), conds}
+}
+
+// AlwaysMitm is a HttpsHandler that always eavesdrop https connections, for example to
+// eavesdrop all https connections to www.google.com, we can use
+// proxy.OnRequest(goproxy.ReqHostIs("www.google.com")).HandleConnect(goproxy.AlwaysMitm)
+var AlwaysMitm FuncHttpsHandler = func(host string, ctx *ProxyCtx) (*ConnectAction, string) {
+ return MitmConnect, host
+}
+
+// AlwaysReject is a HttpsHandler that drops any CONNECT request, for example, this code will disallow
+// connections to hosts on any other port than 443
+// proxy.OnRequest(goproxy.Not(goproxy.ReqHostMatches(regexp.MustCompile(":443$"))).
+// HandleConnect(goproxy.AlwaysReject)
+var AlwaysReject FuncHttpsHandler = func(host string, ctx *ProxyCtx) (*ConnectAction, string) {
+ return RejectConnect, host
+}
+
+// HandleBytes will return a RespHandler that read the entire body of the request
+// to a byte array in memory, would run the user supplied f function on the byte arra,
+// and will replace the body of the original response with the resulting byte array.
+func HandleBytes(f func(b []byte, ctx *ProxyCtx) []byte) RespHandler {
+ return FuncRespHandler(func(resp *http.Response, ctx *ProxyCtx) *http.Response {
+ b, err := ioutil.ReadAll(resp.Body)
+ if err != nil {
+ ctx.Warnf("Cannot read response %s", err)
+ return resp
+ }
+ resp.Body.Close()
+
+ resp.Body = ioutil.NopCloser(bytes.NewBuffer(f(b, ctx)))
+ return resp
+ })
+}
diff --git a/vendor/github.com/elazarl/goproxy/doc.go b/vendor/github.com/elazarl/goproxy/doc.go
new file mode 100644
index 000000000..50aaa71f8
--- /dev/null
+++ b/vendor/github.com/elazarl/goproxy/doc.go
@@ -0,0 +1,100 @@
+/*
+Package goproxy provides a customizable HTTP proxy,
+supporting hijacking HTTPS connection.
+
+The intent of the proxy, is to be usable with reasonable amount of traffic
+yet, customizable and programable.
+
+The proxy itself is simply an `net/http` handler.
+
+Typical usage is
+
+ proxy := goproxy.NewProxyHttpServer()
+ proxy.OnRequest(..conditions..).Do(..requesthandler..)
+ proxy.OnRequest(..conditions..).DoFunc(..requesthandlerFunction..)
+ proxy.OnResponse(..conditions..).Do(..responesHandler..)
+ proxy.OnResponse(..conditions..).DoFunc(..responesHandlerFunction..)
+ http.ListenAndServe(":8080", proxy)
+
+Adding a header to each request
+
+ proxy.OnRequest().DoFunc(func(r *http.Request,ctx *goproxy.ProxyCtx) (*http.Request, *http.Response){
+ r.Header.Set("X-GoProxy","1")
+ return r, nil
+ })
+
+Note that the function is called before the proxy sends the request to the server
+
+For printing the content type of all incoming responses
+
+ proxy.OnResponse().DoFunc(func(r *http.Response, ctx *goproxy.ProxyCtx)*http.Response{
+ println(ctx.Req.Host,"->",r.Header.Get("Content-Type"))
+ return r
+ })
+
+note that we used the ProxyCtx context variable here. It contains the request
+and the response (Req and Resp, Resp is nil if unavailable) of this specific client
+interaction with the proxy.
+
+To print the content type of all responses from a certain url, we'll add a
+ReqCondition to the OnResponse function:
+
+ proxy.OnResponse(goproxy.UrlIs("golang.org/pkg")).DoFunc(func(r *http.Response, ctx *goproxy.ProxyCtx)*http.Response{
+ println(ctx.Req.Host,"->",r.Header.Get("Content-Type"))
+ return r
+ })
+
+We can write the condition ourselves, conditions can be set on request and on response
+
+ var random = ReqConditionFunc(func(r *http.Request) bool {
+ return rand.Intn(1) == 0
+ })
+ var hasGoProxyHeader = RespConditionFunc(func(resp *http.Response,req *http.Request)bool {
+ return resp.Header.Get("X-GoProxy") != ""
+ })
+
+Caution! If you give a RespCondition to the OnRequest function, you'll get a run time panic! It doesn't
+make sense to read the response, if you still haven't got it!
+
+Finally, we have convenience function to throw a quick response
+
+ proxy.OnResponse(hasGoProxyHeader).DoFunc(func(r*http.Response,ctx *goproxy.ProxyCtx)*http.Response {
+ r.Body.Close()
+ return goproxy.ForbiddenTextResponse(ctx.Req,"Can't see response with X-GoProxy header!")
+ })
+
+we close the body of the original repsonse, and return a new 403 response with a short message.
+
+Example use cases:
+
+1. https://github.com/elazarl/goproxy/tree/master/examples/goproxy-avgsize
+
+To measure the average size of an Html served in your site. One can ask
+all the QA team to access the website by a proxy, and the proxy will
+measure the average size of all text/html responses from your host.
+
+2. [not yet implemented]
+
+All requests to your web servers should be directed through the proxy,
+when the proxy will detect html pieces sent as a response to AJAX
+request, it'll send a warning email.
+
+3. https://github.com/elazarl/goproxy/blob/master/examples/goproxy-httpdump/
+
+Generate a real traffic to your website by real users using through
+proxy. Record the traffic, and try it again for more real load testing.
+
+4. https://github.com/elazarl/goproxy/tree/master/examples/goproxy-no-reddit-at-worktime
+
+Will allow browsing to reddit.com between 8:00am and 17:00pm
+
+5. https://github.com/elazarl/goproxy/tree/master/examples/goproxy-jquery-version
+
+Will warn if multiple versions of jquery are used in the same domain.
+
+6. https://github.com/elazarl/goproxy/blob/master/examples/goproxy-upside-down-ternet/
+
+Modifies image files in an HTTP response via goproxy's image extension found in ext/.
+
+*/
+package goproxy
diff --git a/vendor/github.com/elazarl/goproxy/https.go b/vendor/github.com/elazarl/goproxy/https.go
new file mode 100644
index 000000000..12de7511d
--- /dev/null
+++ b/vendor/github.com/elazarl/goproxy/https.go
@@ -0,0 +1,435 @@
+package goproxy
+
+import (
+ "bufio"
+ "crypto/tls"
+ "errors"
+ "io"
+ "io/ioutil"
+ "net"
+ "net/http"
+ "net/url"
+ "os"
+ "regexp"
+ "strconv"
+ "strings"
+ "sync"
+ "sync/atomic"
+)
+
+type ConnectActionLiteral int
+
+const (
+ ConnectAccept = iota
+ ConnectReject
+ ConnectMitm
+ ConnectHijack
+ ConnectHTTPMitm
+ ConnectProxyAuthHijack
+)
+
+var (
+ OkConnect = &ConnectAction{Action: ConnectAccept, TLSConfig: TLSConfigFromCA(&GoproxyCa)}
+ MitmConnect = &ConnectAction{Action: ConnectMitm, TLSConfig: TLSConfigFromCA(&GoproxyCa)}
+ HTTPMitmConnect = &ConnectAction{Action: ConnectHTTPMitm, TLSConfig: TLSConfigFromCA(&GoproxyCa)}
+ RejectConnect = &ConnectAction{Action: ConnectReject, TLSConfig: TLSConfigFromCA(&GoproxyCa)}
+ httpsRegexp = regexp.MustCompile(`^https:\/\/`)
+)
+
+type ConnectAction struct {
+ Action ConnectActionLiteral
+ Hijack func(req *http.Request, client net.Conn, ctx *ProxyCtx)
+ TLSConfig func(host string, ctx *ProxyCtx) (*tls.Config, error)
+}
+
+func stripPort(s string) string {
+ ix := strings.IndexRune(s, ':')
+ if ix == -1 {
+ return s
+ }
+ return s[:ix]
+}
+
+func (proxy *ProxyHttpServer) dial(network, addr string) (c net.Conn, err error) {
+ if proxy.Tr.Dial != nil {
+ return proxy.Tr.Dial(network, addr)
+ }
+ return net.Dial(network, addr)
+}
+
+func (proxy *ProxyHttpServer) connectDial(network, addr string) (c net.Conn, err error) {
+ if proxy.ConnectDial == nil {
+ return proxy.dial(network, addr)
+ }
+ return proxy.ConnectDial(network, addr)
+}
+
+func (proxy *ProxyHttpServer) handleHttps(w http.ResponseWriter, r *http.Request) {
+ ctx := &ProxyCtx{Req: r, Session: atomic.AddInt64(&proxy.sess, 1), proxy: proxy, certStore: proxy.CertStore}
+
+ hij, ok := w.(http.Hijacker)
+ if !ok {
+ panic("httpserver does not support hijacking")
+ }
+
+ proxyClient, _, e := hij.Hijack()
+ if e != nil {
+ panic("Cannot hijack connection " + e.Error())
+ }
+
+ ctx.Logf("Running %d CONNECT handlers", len(proxy.httpsHandlers))
+ todo, host := OkConnect, r.URL.Host
+ for i, h := range proxy.httpsHandlers {
+ newtodo, newhost := h.HandleConnect(host, ctx)
+
+ // If found a result, break the loop immediately
+ if newtodo != nil {
+ todo, host = newtodo, newhost
+ ctx.Logf("on %dth handler: %v %s", i, todo, host)
+ break
+ }
+ }
+ switch todo.Action {
+ case ConnectAccept:
+ if !hasPort.MatchString(host) {
+ host += ":80"
+ }
+ targetSiteCon, err := proxy.connectDial("tcp", host)
+ if err != nil {
+ httpError(proxyClient, ctx, err)
+ return
+ }
+ ctx.Logf("Accepting CONNECT to %s", host)
+ proxyClient.Write([]byte("HTTP/1.0 200 OK\r\n\r\n"))
+
+ targetTCP, targetOK := targetSiteCon.(*net.TCPConn)
+ proxyClientTCP, clientOK := proxyClient.(*net.TCPConn)
+ if targetOK && clientOK {
+ go copyAndClose(ctx, targetTCP, proxyClientTCP)
+ go copyAndClose(ctx, proxyClientTCP, targetTCP)
+ } else {
+ go func() {
+ var wg sync.WaitGroup
+ wg.Add(2)
+ go copyOrWarn(ctx, targetSiteCon, proxyClient, &wg)
+ go copyOrWarn(ctx, proxyClient, targetSiteCon, &wg)
+ wg.Wait()
+ proxyClient.Close()
+ targetSiteCon.Close()
+
+ }()
+ }
+
+ case ConnectHijack:
+ ctx.Logf("Hijacking CONNECT to %s", host)
+ proxyClient.Write([]byte("HTTP/1.0 200 OK\r\n\r\n"))
+ todo.Hijack(r, proxyClient, ctx)
+ case ConnectHTTPMitm:
+ proxyClient.Write([]byte("HTTP/1.0 200 OK\r\n\r\n"))
+ ctx.Logf("Assuming CONNECT is plain HTTP tunneling, mitm proxying it")
+ targetSiteCon, err := proxy.connectDial("tcp", host)
+ if err != nil {
+ ctx.Warnf("Error dialing to %s: %s", host, err.Error())
+ return
+ }
+ for {
+ client := bufio.NewReader(proxyClient)
+ remote := bufio.NewReader(targetSiteCon)
+ req, err := http.ReadRequest(client)
+ if err != nil && err != io.EOF {
+ ctx.Warnf("cannot read request of MITM HTTP client: %+#v", err)
+ }
+ if err != nil {
+ return
+ }
+ req, resp := proxy.filterRequest(req, ctx)
+ if resp == nil {
+ if err := req.Write(targetSiteCon); err != nil {
+ httpError(proxyClient, ctx, err)
+ return
+ }
+ resp, err = http.ReadResponse(remote, req)
+ if err != nil {
+ httpError(proxyClient, ctx, err)
+ return
+ }
+ defer resp.Body.Close()
+ }
+ resp = proxy.filterResponse(resp, ctx)
+ if err := resp.Write(proxyClient); err != nil {
+ httpError(proxyClient, ctx, err)
+ return
+ }
+ }
+ case ConnectMitm:
+ proxyClient.Write([]byte("HTTP/1.0 200 OK\r\n\r\n"))
+ ctx.Logf("Assuming CONNECT is TLS, mitm proxying it")
+ // this goes in a separate goroutine, so that the net/http server won't think we're
+ // still handling the request even after hijacking the connection. Those HTTP CONNECT
+ // request can take forever, and the server will be stuck when "closed".
+ // TODO: Allow Server.Close() mechanism to shut down this connection as nicely as possible
+ tlsConfig := defaultTLSConfig
+ if todo.TLSConfig != nil {
+ var err error
+ tlsConfig, err = todo.TLSConfig(host, ctx)
+ if err != nil {
+ httpError(proxyClient, ctx, err)
+ return
+ }
+ }
+ go func() {
+ //TODO: cache connections to the remote website
+ rawClientTls := tls.Server(proxyClient, tlsConfig)
+ if err := rawClientTls.Handshake(); err != nil {
+ ctx.Warnf("Cannot handshake client %v %v", r.Host, err)
+ return
+ }
+ defer rawClientTls.Close()
+ clientTlsReader := bufio.NewReader(rawClientTls)
+ for !isEof(clientTlsReader) {
+ req, err := http.ReadRequest(clientTlsReader)
+ var ctx = &ProxyCtx{Req: req, Session: atomic.AddInt64(&proxy.sess, 1), proxy: proxy, UserData: ctx.UserData}
+ if err != nil && err != io.EOF {
+ return
+ }
+ if err != nil {
+ ctx.Warnf("Cannot read TLS request from mitm'd client %v %v", r.Host, err)
+ return
+ }
+ req.RemoteAddr = r.RemoteAddr // since we're converting the request, need to carry over the original connecting IP as well
+ ctx.Logf("req %v", r.Host)
+
+ if !httpsRegexp.MatchString(req.URL.String()) {
+ req.URL, err = url.Parse("https://" + r.Host + req.URL.String())
+ }
+
+ // Bug fix which goproxy fails to provide request
+ // information URL in the context when does HTTPS MITM
+ ctx.Req = req
+
+ req, resp := proxy.filterRequest(req, ctx)
+ if resp == nil {
+ if err != nil {
+ ctx.Warnf("Illegal URL %s", "https://"+r.Host+req.URL.Path)
+ return
+ }
+ removeProxyHeaders(ctx, req)
+ resp, err = ctx.RoundTrip(req)
+ if err != nil {
+ ctx.Warnf("Cannot read TLS response from mitm'd server %v", err)
+ return
+ }
+ ctx.Logf("resp %v", resp.Status)
+ }
+ resp = proxy.filterResponse(resp, ctx)
+ defer resp.Body.Close()
+
+ text := resp.Status
+ statusCode := strconv.Itoa(resp.StatusCode) + " "
+ if strings.HasPrefix(text, statusCode) {
+ text = text[len(statusCode):]
+ }
+ // always use 1.1 to support chunked encoding
+ if _, err := io.WriteString(rawClientTls, "HTTP/1.1"+" "+statusCode+text+"\r\n"); err != nil {
+ ctx.Warnf("Cannot write TLS response HTTP status from mitm'd client: %v", err)
+ return
+ }
+ // Since we don't know the length of resp, return chunked encoded response
+ // TODO: use a more reasonable scheme
+ resp.Header.Del("Content-Length")
+ resp.Header.Set("Transfer-Encoding", "chunked")
+ // Force connection close otherwise chrome will keep CONNECT tunnel open forever
+ resp.Header.Set("Connection", "close")
+ if err := resp.Header.Write(rawClientTls); err != nil {
+ ctx.Warnf("Cannot write TLS response header from mitm'd client: %v", err)
+ return
+ }
+ if _, err = io.WriteString(rawClientTls, "\r\n"); err != nil {
+ ctx.Warnf("Cannot write TLS response header end from mitm'd client: %v", err)
+ return
+ }
+ chunked := newChunkedWriter(rawClientTls)
+ if _, err := io.Copy(chunked, resp.Body); err != nil {
+ ctx.Warnf("Cannot write TLS response body from mitm'd client: %v", err)
+ return
+ }
+ if err := chunked.Close(); err != nil {
+ ctx.Warnf("Cannot write TLS chunked EOF from mitm'd client: %v", err)
+ return
+ }
+ if _, err = io.WriteString(rawClientTls, "\r\n"); err != nil {
+ ctx.Warnf("Cannot write TLS response chunked trailer from mitm'd client: %v", err)
+ return
+ }
+ }
+ ctx.Logf("Exiting on EOF")
+ }()
+ case ConnectProxyAuthHijack:
+ proxyClient.Write([]byte("HTTP/1.1 407 Proxy Authentication Required\r\n"))
+ todo.Hijack(r, proxyClient, ctx)
+ case ConnectReject:
+ if ctx.Resp != nil {
+ if err := ctx.Resp.Write(proxyClient); err != nil {
+ ctx.Warnf("Cannot write response that reject http CONNECT: %v", err)
+ }
+ }
+ proxyClient.Close()
+ }
+}
+
+func httpError(w io.WriteCloser, ctx *ProxyCtx, err error) {
+ if _, err := io.WriteString(w, "HTTP/1.1 502 Bad Gateway\r\n\r\n"); err != nil {
+ ctx.Warnf("Error responding to client: %s", err)
+ }
+ if err := w.Close(); err != nil {
+ ctx.Warnf("Error closing client connection: %s", err)
+ }
+}
+
+func copyOrWarn(ctx *ProxyCtx, dst io.Writer, src io.Reader, wg *sync.WaitGroup) {
+ if _, err := io.Copy(dst, src); err != nil {
+ ctx.Warnf("Error copying to client: %s", err)
+ }
+ wg.Done()
+}
+
+func copyAndClose(ctx *ProxyCtx, dst, src *net.TCPConn) {
+ if _, err := io.Copy(dst, src); err != nil {
+ ctx.Warnf("Error copying to client: %s", err)
+ }
+
+ dst.CloseWrite()
+ src.CloseRead()
+}
+
+func dialerFromEnv(proxy *ProxyHttpServer) func(network, addr string) (net.Conn, error) {
+ https_proxy := os.Getenv("HTTPS_PROXY")
+ if https_proxy == "" {
+ https_proxy = os.Getenv("https_proxy")
+ }
+ if https_proxy == "" {
+ return nil
+ }
+ return proxy.NewConnectDialToProxy(https_proxy)
+}
+
+func (proxy *ProxyHttpServer) NewConnectDialToProxy(https_proxy string) func(network, addr string) (net.Conn, error) {
+ return proxy.NewConnectDialToProxyWithHandler(https_proxy, nil)
+}
+
+func (proxy *ProxyHttpServer) NewConnectDialToProxyWithHandler(https_proxy string, connectReqHandler func(req *http.Request)) func(network, addr string) (net.Conn, error) {
+ u, err := url.Parse(https_proxy)
+ if err != nil {
+ return nil
+ }
+ if u.Scheme == "" || u.Scheme == "http" {
+ if strings.IndexRune(u.Host, ':') == -1 {
+ u.Host += ":80"
+ }
+ return func(network, addr string) (net.Conn, error) {
+ connectReq := &http.Request{
+ Method: "CONNECT",
+ URL: &url.URL{Opaque: addr},
+ Host: addr,
+ Header: make(http.Header),
+ }
+ if connectReqHandler != nil {
+ connectReqHandler(connectReq)
+ }
+ c, err := proxy.dial(network, u.Host)
+ if err != nil {
+ return nil, err
+ }
+ connectReq.Write(c)
+ // Read response.
+ // Okay to use and discard buffered reader here, because
+ // TLS server will not speak until spoken to.
+ br := bufio.NewReader(c)
+ resp, err := http.ReadResponse(br, connectReq)
+ if err != nil {
+ c.Close()
+ return nil, err
+ }
+ defer resp.Body.Close()
+ if resp.StatusCode != 200 {
+ resp, err := ioutil.ReadAll(resp.Body)
+ if err != nil {
+ return nil, err
+ }
+ c.Close()
+ return nil, errors.New("proxy refused connection" + string(resp))
+ }
+ return c, nil
+ }
+ }
+ if u.Scheme == "https" {
+ if strings.IndexRune(u.Host, ':') == -1 {
+ u.Host += ":443"
+ }
+ return func(network, addr string) (net.Conn, error) {
+ c, err := proxy.dial(network, u.Host)
+ if err != nil {
+ return nil, err
+ }
+ c = tls.Client(c, proxy.Tr.TLSClientConfig)
+ connectReq := &http.Request{
+ Method: "CONNECT",
+ URL: &url.URL{Opaque: addr},
+ Host: addr,
+ Header: make(http.Header),
+ }
+ if connectReqHandler != nil {
+ connectReqHandler(connectReq)
+ }
+ connectReq.Write(c)
+ // Read response.
+ // Okay to use and discard buffered reader here, because
+ // TLS server will not speak until spoken to.
+ br := bufio.NewReader(c)
+ resp, err := http.ReadResponse(br, connectReq)
+ if err != nil {
+ c.Close()
+ return nil, err
+ }
+ defer resp.Body.Close()
+ if resp.StatusCode != 200 {
+ body, err := ioutil.ReadAll(io.LimitReader(resp.Body, 500))
+ if err != nil {
+ return nil, err
+ }
+ c.Close()
+ return nil, errors.New("proxy refused connection" + string(body))
+ }
+ return c, nil
+ }
+ }
+ return nil
+}
+
+func TLSConfigFromCA(ca *tls.Certificate) func(host string, ctx *ProxyCtx) (*tls.Config, error) {
+ return func(host string, ctx *ProxyCtx) (*tls.Config, error) {
+ var err error
+ var cert *tls.Certificate
+
+ hostname := stripPort(host)
+ config := *defaultTLSConfig
+ ctx.Logf("signing for %s", stripPort(host))
+
+ genCert := func() (*tls.Certificate, error) {
+ return signHost(*ca, []string{hostname})
+ }
+ if ctx.certStore != nil {
+ cert, err = ctx.certStore.Fetch(hostname, genCert)
+ } else {
+ cert, err = genCert()
+ }
+
+ if err != nil {
+ ctx.Warnf("Cannot sign host certificate with provided CA: %s", err)
+ return nil, err
+ }
+
+ config.Certificates = append(config.Certificates, *cert)
+ return &config, nil
+ }
+}
diff --git a/vendor/github.com/elazarl/goproxy/key.pem b/vendor/github.com/elazarl/goproxy/key.pem
new file mode 100644
index 000000000..2ea1dca4e
--- /dev/null
+++ b/vendor/github.com/elazarl/goproxy/key.pem
@@ -0,0 +1,51 @@
+-----BEGIN RSA PRIVATE KEY-----
+MIIJKAIBAAKCAgEAnhDL4fqGGhjWzRBFy8iHGuNIdo79FtoWPevCpyek6AWrTuBF
+0j3dzRMUpAkemC/p94tGES9f9iWUVi7gnfmUz1lxhjiqUoW5K1xfwmbx+qmC2YAw
+HM+yq2oOLwz1FAYoQ3NT0gU6cJXtIB6Hjmxwy4jfDPzCuMFwfvOq4eS+pRJhnPTf
+m31XpZOsfJMS9PjD6UU5U3ZsD/oMAjGuMGIXoOGgmqeFrRJm0N+/vtenAYbcSED+
+qiGGJisOu5grvMl0RJAvjgvDMw+6lWKCpqV+/5gd9CNuFP3nUhW6tbY0mBHIETrZ
+0uuUdh21P20JMKt34ok0wn6On2ECN0i7UGv+SJ9TgXj7hksxH1R6OLQaSQ8qxh3I
+yeqPSnQ+iDK8/WXiqZug8iYxi1qgW5iYxiV5uAL0s3XRsv3Urj6Mu3QjVie0TOuq
+AmhawnO1gPDnjc3NLLlb79yrhdFiC2rVvRFbC5SKzB7OYyh7IdnwFAl7bEyMA6WU
+BIN+prw4rdYAEcmnLjNSudQGIy48hPMP8W4PHgLkjDCULryAcBluU2qkFkJfScUK
+0qNg5wjZKjkdtDY4LxAX7MZW524dRKiTiFLLYEF9nWl+/OKoF561YnAW9qkYHjic
+geFYo0q+o7Es0jLt75MZGJY6iasBYzXxVJH0tlsHGkkrs8tLNapglhNEJkcCAwEA
+AQKCAgAwSuNvxHHqUUJ3XoxkiXy1u1EtX9x1eeYnvvs2xMb+WJURQTYz2NEGUdkR
+kPO2/ZSXHAcpQvcnpi2e8y2PNmy/uQ0VPATVt6NuWweqxncR5W5j82U/uDlXY8y3
+lVbfak4s5XRri0tikHvlP06dNgZ0OPok5qi7d+Zd8yZ3Y8LXfjkykiIrSG1Z2jdt
+zCWTkNmSUKMGG/1CGFxI41Lb12xuq+C8v4f469Fb6bCUpyCQN9rffHQSGLH6wVb7
++68JO+d49zCATpmx5RFViMZwEcouXxRvvc9pPHXLP3ZPBD8nYu9kTD220mEGgWcZ
+3L9dDlZPcSocbjw295WMvHz2QjhrDrb8gXwdpoRyuyofqgCyNxSnEC5M13SjOxtf
+pjGzjTqh0kDlKXg2/eTkd9xIHjVhFYiHIEeITM/lHCfWwBCYxViuuF7pSRPzTe8U
+C440b62qZSPMjVoquaMg+qx0n9fKSo6n1FIKHypv3Kue2G0WhDeK6u0U288vQ1t4
+Ood3Qa13gZ+9hwDLbM/AoBfVBDlP/tpAwa7AIIU1ZRDNbZr7emFdctx9B6kLINv3
+4PDOGM2xrjOuACSGMq8Zcu7LBz35PpIZtviJOeKNwUd8/xHjWC6W0itgfJb5I1Nm
+V6Vj368pGlJx6Se26lvXwyyrc9pSw6jSAwARBeU4YkNWpi4i6QKCAQEA0T7u3P/9
+jZJSnDN1o2PXymDrJulE61yguhc/QSmLccEPZe7or06/DmEhhKuCbv+1MswKDeag
+/1JdFPGhL2+4G/f/9BK3BJPdcOZSz7K6Ty8AMMBf8AehKTcSBqwkJWcbEvpHpKJ6
+eDqn1B6brXTNKMT6fEEXCuZJGPBpNidyLv/xXDcN7kCOo3nGYKfB5OhFpNiL63tw
++LntU56WESZwEqr8Pf80uFvsyXQK3a5q5HhIQtxl6tqQuPlNjsDBvCqj0x72mmaJ
+ZVsVWlv7khUrCwAXz7Y8K7mKKBd2ekF5hSbryfJsxFyvEaWUPhnJpTKV85lAS+tt
+FQuIp9TvKYlRQwKCAQEAwWJN8jysapdhi67jO0HtYOEl9wwnF4w6XtiOYtllkMmC
+06/e9h7RsRyWPMdu3qRDPUYFaVDy6+dpUDSQ0+E2Ot6AHtVyvjeUTIL651mFIo/7
+OSUCEc+HRo3SfPXdPhSQ2thNTxl6y9XcFacuvbthgr70KXbvC4k6IEmdpf/0Kgs9
+7QTZCG26HDrEZ2q9yMRlRaL2SRD+7Y2xra7gB+cQGFj6yn0Wd/07er49RqMXidQf
+KR2oYfev2BDtHXoSZFfhFGHlOdLvWRh90D4qZf4vQ+g/EIMgcNSoxjvph1EShmKt
+sjhTHtoHuu+XmEQvIewk2oCI+JvofBkcnpFrVvUUrQKCAQAaTIufETmgCo0BfuJB
+N/JOSGIl0NnNryWwXe2gVgVltbsmt6FdL0uKFiEtWJUbOF5g1Q5Kcvs3O/XhBQGa
+QbNlKIVt+tAv7hm97+Tmn/MUsraWagdk1sCluns0hXxBizT27KgGhDlaVRz05yfv
+5CdJAYDuDwxDXXBAhy7iFJEgYSDH00+X61tCJrMNQOh4ycy/DEyBu1EWod+3S85W
+t3sMjZsIe8P3i+4137Th6eMbdha2+JaCrxfTd9oMoCN5b+6JQXIDM/H+4DTN15PF
+540yY7+aZrAnWrmHknNcqFAKsTqfdi2/fFqwoBwCtiEG91WreU6AfEWIiJuTZIru
+sIibAoIBAAqIwlo5t+KukF+9jR9DPh0S5rCIdvCvcNaN0WPNF91FPN0vLWQW1bFi
+L0TsUDvMkuUZlV3hTPpQxsnZszH3iK64RB5p3jBCcs+gKu7DT59MXJEGVRCHT4Um
+YJryAbVKBYIGWl++sZO8+JotWzx2op8uq7o+glMMjKAJoo7SXIiVyC/LHc95urOi
+9+PySphPKn0anXPpexmRqGYfqpCDo7rPzgmNutWac80B4/CfHb8iUPg6Z1u+1FNe
+yKvcZHgW2Wn00znNJcCitufLGyAnMofudND/c5rx2qfBx7zZS7sKUQ/uRYjes6EZ
+QBbJUA/2/yLv8YYpaAaqj4aLwV8hRpkCggEBAIh3e25tr3avCdGgtCxS7Y1blQ2c
+ue4erZKmFP1u8wTNHQ03T6sECZbnIfEywRD/esHpclfF3kYAKDRqIP4K905Rb0iH
+759ZWt2iCbqZznf50XTvptdmjm5KxvouJzScnQ52gIV6L+QrCKIPelLBEIqCJREh
+pmcjjocD/UCCSuHgbAYNNnO/JdhnSylz1tIg26I+2iLNyeTKIepSNlsBxnkLmqM1
+cj/azKBaT04IOMLaN8xfSqitJYSraWMVNgGJM5vfcVaivZnNh0lZBv+qu6YkdM88
+4/avCJ8IutT+FcMM+GbGazOm5ALWqUyhrnbLGc4CQMPfe7Il6NxwcrOxT8w=
+-----END RSA PRIVATE KEY-----
diff --git a/vendor/github.com/elazarl/goproxy/logger.go b/vendor/github.com/elazarl/goproxy/logger.go
new file mode 100644
index 000000000..939cf69ed
--- /dev/null
+++ b/vendor/github.com/elazarl/goproxy/logger.go
@@ -0,0 +1,5 @@
+package goproxy
+
+type Logger interface {
+ Printf(format string, v ...interface{})
+}
diff --git a/vendor/github.com/elazarl/goproxy/proxy.go b/vendor/github.com/elazarl/goproxy/proxy.go
new file mode 100644
index 000000000..175851615
--- /dev/null
+++ b/vendor/github.com/elazarl/goproxy/proxy.go
@@ -0,0 +1,167 @@
+package goproxy
+
+import (
+ "bufio"
+ "io"
+ "log"
+ "net"
+ "net/http"
+ "os"
+ "regexp"
+ "sync/atomic"
+)
+
+// The basic proxy type. Implements http.Handler.
+type ProxyHttpServer struct {
+ // session variable must be aligned in i386
+ // see http://golang.org/src/pkg/sync/atomic/doc.go#L41
+ sess int64
+ // KeepDestinationHeaders indicates the proxy should retain any headers present in the http.Response before proxying
+ KeepDestinationHeaders bool
+ // setting Verbose to true will log information on each request sent to the proxy
+ Verbose bool
+ Logger Logger
+ NonproxyHandler http.Handler
+ reqHandlers []ReqHandler
+ respHandlers []RespHandler
+ httpsHandlers []HttpsHandler
+ Tr *http.Transport
+ // ConnectDial will be used to create TCP connections for CONNECT requests
+ // if nil Tr.Dial will be used
+ ConnectDial func(network string, addr string) (net.Conn, error)
+ CertStore CertStorage
+}
+
+var hasPort = regexp.MustCompile(`:\d+$`)
+
+func copyHeaders(dst, src http.Header, keepDestHeaders bool) {
+ if !keepDestHeaders {
+ for k := range dst {
+ dst.Del(k)
+ }
+ }
+ for k, vs := range src {
+ for _, v := range vs {
+ dst.Add(k, v)
+ }
+ }
+}
+
+func isEof(r *bufio.Reader) bool {
+ _, err := r.Peek(1)
+ if err == io.EOF {
+ return true
+ }
+ return false
+}
+
+func (proxy *ProxyHttpServer) filterRequest(r *http.Request, ctx *ProxyCtx) (req *http.Request, resp *http.Response) {
+ req = r
+ for _, h := range proxy.reqHandlers {
+ req, resp = h.Handle(r, ctx)
+ // non-nil resp means the handler decided to skip sending the request
+ // and return canned response instead.
+ if resp != nil {
+ break
+ }
+ }
+ return
+}
+func (proxy *ProxyHttpServer) filterResponse(respOrig *http.Response, ctx *ProxyCtx) (resp *http.Response) {
+ resp = respOrig
+ for _, h := range proxy.respHandlers {
+ ctx.Resp = resp
+ resp = h.Handle(resp, ctx)
+ }
+ return
+}
+
+func removeProxyHeaders(ctx *ProxyCtx, r *http.Request) {
+ r.RequestURI = "" // this must be reset when serving a request with the client
+ ctx.Logf("Sending request %v %v", r.Method, r.URL.String())
+ // If no Accept-Encoding header exists, Transport will add the headers it can accept
+ // and would wrap the response body with the relevant reader.
+ r.Header.Del("Accept-Encoding")
+ // curl can add that, see
+ // https://jdebp.eu./FGA/web-proxy-connection-header.html
+ r.Header.Del("Proxy-Connection")
+ r.Header.Del("Proxy-Authenticate")
+ r.Header.Del("Proxy-Authorization")
+ // Connection, Authenticate and Authorization are single hop Header:
+ // http://www.w3.org/Protocols/rfc2616/rfc2616.txt
+ // 14.10 Connection
+ // The Connection general-header field allows the sender to specify
+ // options that are desired for that particular connection and MUST NOT
+ // be communicated by proxies over further connections.
+ r.Header.Del("Connection")
+}
+
+// Standard net/http function. Shouldn't be used directly, http.Serve will use it.
+func (proxy *ProxyHttpServer) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+ //r.Header["X-Forwarded-For"] = w.RemoteAddr()
+ if r.Method == "CONNECT" {
+ proxy.handleHttps(w, r)
+ } else {
+ ctx := &ProxyCtx{Req: r, Session: atomic.AddInt64(&proxy.sess, 1), proxy: proxy}
+
+ var err error
+ ctx.Logf("Got request %v %v %v %v", r.URL.Path, r.Host, r.Method, r.URL.String())
+ if !r.URL.IsAbs() {
+ proxy.NonproxyHandler.ServeHTTP(w, r)
+ return
+ }
+ r, resp := proxy.filterRequest(r, ctx)
+
+ if resp == nil {
+ removeProxyHeaders(ctx, r)
+ resp, err = ctx.RoundTrip(r)
+ if err != nil {
+ ctx.Error = err
+ resp = proxy.filterResponse(nil, ctx)
+ if resp == nil {
+ ctx.Logf("error read response %v %v:", r.URL.Host, err.Error())
+ http.Error(w, err.Error(), 500)
+ return
+ }
+ }
+ ctx.Logf("Received response %v", resp.Status)
+ }
+ origBody := resp.Body
+ resp = proxy.filterResponse(resp, ctx)
+ defer origBody.Close()
+ ctx.Logf("Copying response to client %v [%d]", resp.Status, resp.StatusCode)
+ // http.ResponseWriter will take care of filling the correct response length
+ // Setting it now, might impose wrong value, contradicting the actual new
+ // body the user returned.
+ // We keep the original body to remove the header only if things changed.
+ // This will prevent problems with HEAD requests where there's no body, yet,
+ // the Content-Length header should be set.
+ if origBody != resp.Body {
+ resp.Header.Del("Content-Length")
+ }
+ copyHeaders(w.Header(), resp.Header, proxy.KeepDestinationHeaders)
+ w.WriteHeader(resp.StatusCode)
+ nr, err := io.Copy(w, resp.Body)
+ if err := resp.Body.Close(); err != nil {
+ ctx.Warnf("Can't close response body %v", err)
+ }
+ ctx.Logf("Copied %v bytes to client error=%v", nr, err)
+ }
+}
+
+// NewProxyHttpServer creates and returns a proxy server, logging to stderr by default
+func NewProxyHttpServer() *ProxyHttpServer {
+ proxy := ProxyHttpServer{
+ Logger: log.New(os.Stderr, "", log.LstdFlags),
+ reqHandlers: []ReqHandler{},
+ respHandlers: []RespHandler{},
+ httpsHandlers: []HttpsHandler{},
+ NonproxyHandler: http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
+ http.Error(w, "This is a proxy server. Does not respond to non-proxy requests.", 500)
+ }),
+ Tr: &http.Transport{TLSClientConfig: tlsClientSkipVerify, Proxy: http.ProxyFromEnvironment},
+ }
+ proxy.ConnectDial = dialerFromEnv(&proxy)
+
+ return &proxy
+}
diff --git a/vendor/github.com/elazarl/goproxy/responses.go b/vendor/github.com/elazarl/goproxy/responses.go
new file mode 100644
index 000000000..e1bf28fc2
--- /dev/null
+++ b/vendor/github.com/elazarl/goproxy/responses.go
@@ -0,0 +1,39 @@
+package goproxy
+
+import (
+ "bytes"
+ "io/ioutil"
+ "net/http"
+)
+
+// Will generate a valid http response to the given request the response will have
+// the given contentType, and http status.
+// Typical usage, refuse to process requests to local addresses:
+//
+// proxy.OnRequest(IsLocalHost()).DoFunc(func(r *http.Request, ctx *goproxy.ProxyCtx) (*http.Request,*http.Response) {
+// return nil,NewResponse(r,goproxy.ContentTypeHtml,http.StatusUnauthorized,
+// `Can't use proxy for local addresses`)
+// })
+func NewResponse(r *http.Request, contentType string, status int, body string) *http.Response {
+ resp := &http.Response{}
+ resp.Request = r
+ resp.TransferEncoding = r.TransferEncoding
+ resp.Header = make(http.Header)
+ resp.Header.Add("Content-Type", contentType)
+ resp.StatusCode = status
+ resp.Status = http.StatusText(status)
+ buf := bytes.NewBufferString(body)
+ resp.ContentLength = int64(buf.Len())
+ resp.Body = ioutil.NopCloser(buf)
+ return resp
+}
+
+const (
+ ContentTypeText = "text/plain"
+ ContentTypeHtml = "text/html"
+)
+
+// Alias for NewResponse(r,ContentTypeText,http.StatusAccepted,text)
+func TextResponse(r *http.Request, text string) *http.Response {
+ return NewResponse(r, ContentTypeText, http.StatusAccepted, text)
+}
diff --git a/vendor/github.com/elazarl/goproxy/signer.go b/vendor/github.com/elazarl/goproxy/signer.go
new file mode 100644
index 000000000..11a98de45
--- /dev/null
+++ b/vendor/github.com/elazarl/goproxy/signer.go
@@ -0,0 +1,88 @@
+package goproxy
+
+import (
+ "crypto/rsa"
+ "crypto/sha1"
+ "crypto/tls"
+ "crypto/x509"
+ "crypto/x509/pkix"
+ "math/big"
+ "net"
+ "runtime"
+ "sort"
+ "time"
+)
+
+func hashSorted(lst []string) []byte {
+ c := make([]string, len(lst))
+ copy(c, lst)
+ sort.Strings(c)
+ h := sha1.New()
+ for _, s := range c {
+ h.Write([]byte(s + ","))
+ }
+ return h.Sum(nil)
+}
+
+func hashSortedBigInt(lst []string) *big.Int {
+ rv := new(big.Int)
+ rv.SetBytes(hashSorted(lst))
+ return rv
+}
+
+var goproxySignerVersion = ":goroxy1"
+
+func signHost(ca tls.Certificate, hosts []string) (cert *tls.Certificate, err error) {
+ var x509ca *x509.Certificate
+
+ // Use the provided ca and not the global GoproxyCa for certificate generation.
+ if x509ca, err = x509.ParseCertificate(ca.Certificate[0]); err != nil {
+ return
+ }
+ start := time.Unix(0, 0)
+ end, err := time.Parse("2006-01-02", "2049-12-31")
+ if err != nil {
+ panic(err)
+ }
+ hash := hashSorted(append(hosts, goproxySignerVersion, ":"+runtime.Version()))
+ serial := new(big.Int)
+ serial.SetBytes(hash)
+ template := x509.Certificate{
+ // TODO(elazar): instead of this ugly hack, just encode the certificate and hash the binary form.
+ SerialNumber: serial,
+ Issuer: x509ca.Subject,
+ Subject: pkix.Name{
+ Organization: []string{"GoProxy untrusted MITM proxy Inc"},
+ },
+ NotBefore: start,
+ NotAfter: end,
+
+ KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature,
+ ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth},
+ BasicConstraintsValid: true,
+ }
+ for _, h := range hosts {
+ if ip := net.ParseIP(h); ip != nil {
+ template.IPAddresses = append(template.IPAddresses, ip)
+ } else {
+ template.DNSNames = append(template.DNSNames, h)
+ template.Subject.CommonName = h
+ }
+ }
+ var csprng CounterEncryptorRand
+ if csprng, err = NewCounterEncryptorRandFromKey(ca.PrivateKey, hash); err != nil {
+ return
+ }
+ var certpriv *rsa.PrivateKey
+ if certpriv, err = rsa.GenerateKey(&csprng, 2048); err != nil {
+ return
+ }
+ var derBytes []byte
+ if derBytes, err = x509.CreateCertificate(&csprng, &template, x509ca, &certpriv.PublicKey, ca.PrivateKey); err != nil {
+ return
+ }
+ return &tls.Certificate{
+ Certificate: [][]byte{derBytes, ca.Certificate[0]},
+ PrivateKey: certpriv,
+ }, nil
+}
diff --git a/vendor/github.com/moby/term/doc.go b/vendor/github.com/moby/term/doc.go
new file mode 100644
index 000000000..c9bc03244
--- /dev/null
+++ b/vendor/github.com/moby/term/doc.go
@@ -0,0 +1,3 @@
+// Package term provides structures and helper functions to work with
+// terminal (state, sizes).
+package term
diff --git a/vendor/github.com/moby/term/tc.go b/vendor/github.com/moby/term/tc.go
deleted file mode 100644
index 8a5e09f58..000000000
--- a/vendor/github.com/moby/term/tc.go
+++ /dev/null
@@ -1,20 +0,0 @@
-//go:build !windows
-// +build !windows
-
-package term
-
-import (
- "golang.org/x/sys/unix"
-)
-
-func tcget(fd uintptr) (*Termios, error) {
- p, err := unix.IoctlGetTermios(int(fd), getTermios)
- if err != nil {
- return nil, err
- }
- return p, nil
-}
-
-func tcset(fd uintptr, p *Termios) error {
- return unix.IoctlSetTermios(int(fd), setTermios, p)
-}
diff --git a/vendor/github.com/moby/term/term.go b/vendor/github.com/moby/term/term.go
index 2dd3d090d..f9d8988ef 100644
--- a/vendor/github.com/moby/term/term.go
+++ b/vendor/github.com/moby/term/term.go
@@ -1,100 +1,85 @@
-//go:build !windows
-// +build !windows
-
-// Package term provides structures and helper functions to work with
-// terminal (state, sizes).
package term
-import (
- "errors"
- "io"
- "os"
-
- "golang.org/x/sys/unix"
-)
-
-// ErrInvalidState is returned if the state of the terminal is invalid.
-var ErrInvalidState = errors.New("Invalid terminal state")
+import "io"
-// State represents the state of the terminal.
-type State struct {
- termios Termios
-}
+// State holds the platform-specific state / console mode for the terminal.
+type State terminalState
// Winsize represents the size of the terminal window.
type Winsize struct {
Height uint16
Width uint16
- x uint16
- y uint16
+
+ // Only used on Unix
+ x uint16
+ y uint16
}
// StdStreams returns the standard streams (stdin, stdout, stderr).
+//
+// On Windows, it attempts to turn on VT handling on all std handles if
+// supported, or falls back to terminal emulation. On Unix, this returns
+// the standard [os.Stdin], [os.Stdout] and [os.Stderr].
func StdStreams() (stdIn io.ReadCloser, stdOut, stdErr io.Writer) {
- return os.Stdin, os.Stdout, os.Stderr
+ return stdStreams()
}
// GetFdInfo returns the file descriptor for an os.File and indicates whether the file represents a terminal.
-func GetFdInfo(in interface{}) (uintptr, bool) {
- var inFd uintptr
- var isTerminalIn bool
- if file, ok := in.(*os.File); ok {
- inFd = file.Fd()
- isTerminalIn = IsTerminal(inFd)
- }
- return inFd, isTerminalIn
+func GetFdInfo(in interface{}) (fd uintptr, isTerminal bool) {
+ return getFdInfo(in)
+}
+
+// GetWinsize returns the window size based on the specified file descriptor.
+func GetWinsize(fd uintptr) (*Winsize, error) {
+ return getWinsize(fd)
+}
+
+// SetWinsize tries to set the specified window size for the specified file
+// descriptor. It is only implemented on Unix, and returns an error on Windows.
+func SetWinsize(fd uintptr, ws *Winsize) error {
+ return setWinsize(fd, ws)
}
// IsTerminal returns true if the given file descriptor is a terminal.
func IsTerminal(fd uintptr) bool {
- _, err := tcget(fd)
- return err == nil
+ return isTerminal(fd)
}
// RestoreTerminal restores the terminal connected to the given file descriptor
// to a previous state.
func RestoreTerminal(fd uintptr, state *State) error {
- if state == nil {
- return ErrInvalidState
- }
- return tcset(fd, &state.termios)
+ return restoreTerminal(fd, state)
}
// SaveState saves the state of the terminal connected to the given file descriptor.
func SaveState(fd uintptr) (*State, error) {
- termios, err := tcget(fd)
- if err != nil {
- return nil, err
- }
- return &State{termios: *termios}, nil
+ return saveState(fd)
}
// DisableEcho applies the specified state to the terminal connected to the file
// descriptor, with echo disabled.
func DisableEcho(fd uintptr, state *State) error {
- newState := state.termios
- newState.Lflag &^= unix.ECHO
-
- if err := tcset(fd, &newState); err != nil {
- return err
- }
- return nil
+ return disableEcho(fd, state)
}
// SetRawTerminal puts the terminal connected to the given file descriptor into
-// raw mode and returns the previous state. On UNIX, this puts both the input
-// and output into raw mode. On Windows, it only puts the input into raw mode.
-func SetRawTerminal(fd uintptr) (*State, error) {
- oldState, err := MakeRaw(fd)
- if err != nil {
- return nil, err
- }
- return oldState, err
+// raw mode and returns the previous state. On UNIX, this is the equivalent of
+// [MakeRaw], and puts both the input and output into raw mode. On Windows, it
+// only puts the input into raw mode.
+func SetRawTerminal(fd uintptr) (previousState *State, err error) {
+ return setRawTerminal(fd)
}
// SetRawTerminalOutput puts the output of terminal connected to the given file
// descriptor into raw mode. On UNIX, this does nothing and returns nil for the
// state. On Windows, it disables LF -> CRLF translation.
-func SetRawTerminalOutput(fd uintptr) (*State, error) {
- return nil, nil
+func SetRawTerminalOutput(fd uintptr) (previousState *State, err error) {
+ return setRawTerminalOutput(fd)
+}
+
+// MakeRaw puts the terminal (Windows Console) connected to the
+// given file descriptor into raw mode and returns the previous state of
+// the terminal so that it can be restored.
+func MakeRaw(fd uintptr) (previousState *State, err error) {
+ return makeRaw(fd)
}
diff --git a/vendor/github.com/moby/term/term_unix.go b/vendor/github.com/moby/term/term_unix.go
new file mode 100644
index 000000000..2ec7706a1
--- /dev/null
+++ b/vendor/github.com/moby/term/term_unix.go
@@ -0,0 +1,98 @@
+//go:build !windows
+// +build !windows
+
+package term
+
+import (
+ "errors"
+ "io"
+ "os"
+
+ "golang.org/x/sys/unix"
+)
+
+// ErrInvalidState is returned if the state of the terminal is invalid.
+//
+// Deprecated: ErrInvalidState is no longer used.
+var ErrInvalidState = errors.New("Invalid terminal state")
+
+// terminalState holds the platform-specific state / console mode for the terminal.
+type terminalState struct {
+ termios unix.Termios
+}
+
+func stdStreams() (stdIn io.ReadCloser, stdOut, stdErr io.Writer) {
+ return os.Stdin, os.Stdout, os.Stderr
+}
+
+func getFdInfo(in interface{}) (uintptr, bool) {
+ var inFd uintptr
+ var isTerminalIn bool
+ if file, ok := in.(*os.File); ok {
+ inFd = file.Fd()
+ isTerminalIn = isTerminal(inFd)
+ }
+ return inFd, isTerminalIn
+}
+
+func getWinsize(fd uintptr) (*Winsize, error) {
+ uws, err := unix.IoctlGetWinsize(int(fd), unix.TIOCGWINSZ)
+ ws := &Winsize{Height: uws.Row, Width: uws.Col, x: uws.Xpixel, y: uws.Ypixel}
+ return ws, err
+}
+
+func setWinsize(fd uintptr, ws *Winsize) error {
+ return unix.IoctlSetWinsize(int(fd), unix.TIOCSWINSZ, &unix.Winsize{
+ Row: ws.Height,
+ Col: ws.Width,
+ Xpixel: ws.x,
+ Ypixel: ws.y,
+ })
+}
+
+func isTerminal(fd uintptr) bool {
+ _, err := tcget(fd)
+ return err == nil
+}
+
+func restoreTerminal(fd uintptr, state *State) error {
+ if state == nil {
+ return errors.New("invalid terminal state")
+ }
+ return tcset(fd, &state.termios)
+}
+
+func saveState(fd uintptr) (*State, error) {
+ termios, err := tcget(fd)
+ if err != nil {
+ return nil, err
+ }
+ return &State{termios: *termios}, nil
+}
+
+func disableEcho(fd uintptr, state *State) error {
+ newState := state.termios
+ newState.Lflag &^= unix.ECHO
+
+ return tcset(fd, &newState)
+}
+
+func setRawTerminal(fd uintptr) (*State, error) {
+ return makeRaw(fd)
+}
+
+func setRawTerminalOutput(fd uintptr) (*State, error) {
+ return nil, nil
+}
+
+func tcget(fd uintptr) (*unix.Termios, error) {
+ p, err := unix.IoctlGetTermios(int(fd), getTermios)
+ if err != nil {
+ return nil, err
+ }
+ return p, nil
+}
+
+func tcset(fd uintptr, p *unix.Termios) error {
+ return unix.IoctlSetTermios(int(fd), setTermios, p)
+}
diff --git a/vendor/github.com/moby/term/term_windows.go b/vendor/github.com/moby/term/term_windows.go
index 3cdc8edbd..81ccff042 100644
--- a/vendor/github.com/moby/term/term_windows.go
+++ b/vendor/github.com/moby/term/term_windows.go
@@ -1,6 +1,7 @@
package term
import (
+ "fmt"
"io"
"os"
"os/signal"
@@ -9,22 +10,15 @@ import (
"golang.org/x/sys/windows"
)
-// State holds the console mode for the terminal.
-type State struct {
+// terminalState holds the platform-specific state / console mode for the terminal.
+type terminalState struct {
mode uint32
}
-// Winsize is used for window size.
-type Winsize struct {
- Height uint16
- Width uint16
-}
-
// vtInputSupported is true if winterm.ENABLE_VIRTUAL_TERMINAL_INPUT is supported by the console
var vtInputSupported bool
-// StdStreams returns the standard streams (stdin, stdout, stderr).
-func StdStreams() (stdIn io.ReadCloser, stdOut, stdErr io.Writer) {
+func stdStreams() (stdIn io.ReadCloser, stdOut, stdErr io.Writer) {
// Turn on VT handling on all std handles, if possible. This might
// fail, in which case we will fall back to terminal emulation.
var (
@@ -87,16 +81,14 @@ func StdStreams() (stdIn io.ReadCloser, stdOut, stdErr io.Writer) {
stdErr = os.Stderr
}
- return
+ return stdIn, stdOut, stdErr
}
-// GetFdInfo returns the file descriptor for an os.File and indicates whether the file represents a terminal.
-func GetFdInfo(in interface{}) (uintptr, bool) {
+func getFdInfo(in interface{}) (uintptr, bool) {
return windowsconsole.GetHandleInfo(in)
}
-// GetWinsize returns the window size based on the specified file descriptor.
-func GetWinsize(fd uintptr) (*Winsize, error) {
+func getWinsize(fd uintptr) (*Winsize, error) {
var info windows.ConsoleScreenBufferInfo
if err := windows.GetConsoleScreenBufferInfo(windows.Handle(fd), &info); err != nil {
return nil, err
@@ -110,21 +102,21 @@ func GetWinsize(fd uintptr) (*Winsize, error) {
return winsize, nil
}
-// IsTerminal returns true if the given file descriptor is a terminal.
-func IsTerminal(fd uintptr) bool {
+func setWinsize(fd uintptr, ws *Winsize) error {
+ return fmt.Errorf("not implemented on Windows")
+}
+
+func isTerminal(fd uintptr) bool {
var mode uint32
err := windows.GetConsoleMode(windows.Handle(fd), &mode)
return err == nil
}
-// RestoreTerminal restores the terminal connected to the given file descriptor
-// to a previous state.
-func RestoreTerminal(fd uintptr, state *State) error {
+func restoreTerminal(fd uintptr, state *State) error {
return windows.SetConsoleMode(windows.Handle(fd), state.mode)
}
-// SaveState saves the state of the terminal connected to the given file descriptor.
-func SaveState(fd uintptr) (*State, error) {
+func saveState(fd uintptr) (*State, error) {
var mode uint32
if err := windows.GetConsoleMode(windows.Handle(fd), &mode); err != nil {
@@ -134,9 +126,8 @@ func SaveState(fd uintptr) (*State, error) {
return &State{mode: mode}, nil
}
-// DisableEcho disables echo for the terminal connected to the given file descriptor.
-// -- See https://msdn.microsoft.com/en-us/library/windows/desktop/ms683462(v=vs.85).aspx
-func DisableEcho(fd uintptr, state *State) error {
+func disableEcho(fd uintptr, state *State) error {
+ // See https://msdn.microsoft.com/en-us/library/windows/desktop/ms683462(v=vs.85).aspx
mode := state.mode
mode &^= windows.ENABLE_ECHO_INPUT
mode |= windows.ENABLE_PROCESSED_INPUT | windows.ENABLE_LINE_INPUT
@@ -150,69 +141,27 @@ func DisableEcho(fd uintptr, state *State) error {
return nil
}
-// SetRawTerminal puts the terminal connected to the given file descriptor into
-// raw mode and returns the previous state. On UNIX, this puts both the input
-// and output into raw mode. On Windows, it only puts the input into raw mode.
-func SetRawTerminal(fd uintptr) (*State, error) {
- state, err := MakeRaw(fd)
+func setRawTerminal(fd uintptr) (*State, error) {
+ oldState, err := MakeRaw(fd)
if err != nil {
return nil, err
}
// Register an interrupt handler to catch and restore prior state
- restoreAtInterrupt(fd, state)
- return state, err
+ restoreAtInterrupt(fd, oldState)
+ return oldState, err
}
-// SetRawTerminalOutput puts the output of terminal connected to the given file
-// descriptor into raw mode. On UNIX, this does nothing and returns nil for the
-// state. On Windows, it disables LF -> CRLF translation.
-func SetRawTerminalOutput(fd uintptr) (*State, error) {
- state, err := SaveState(fd)
+func setRawTerminalOutput(fd uintptr) (*State, error) {
+ oldState, err := saveState(fd)
if err != nil {
return nil, err
}
// Ignore failures, since winterm.DISABLE_NEWLINE_AUTO_RETURN might not be supported on this
// version of Windows.
- _ = windows.SetConsoleMode(windows.Handle(fd), state.mode|windows.DISABLE_NEWLINE_AUTO_RETURN)
- return state, err
-}
-
-// MakeRaw puts the terminal (Windows Console) connected to the given file descriptor into raw
-// mode and returns the previous state of the terminal so that it can be restored.
-func MakeRaw(fd uintptr) (*State, error) {
- state, err := SaveState(fd)
- if err != nil {
- return nil, err
- }
-
- mode := state.mode
-
- // See
- // -- https://msdn.microsoft.com/en-us/library/windows/desktop/ms686033(v=vs.85).aspx
- // -- https://msdn.microsoft.com/en-us/library/windows/desktop/ms683462(v=vs.85).aspx
-
- // Disable these modes
- mode &^= windows.ENABLE_ECHO_INPUT
- mode &^= windows.ENABLE_LINE_INPUT
- mode &^= windows.ENABLE_MOUSE_INPUT
- mode &^= windows.ENABLE_WINDOW_INPUT
- mode &^= windows.ENABLE_PROCESSED_INPUT
-
- // Enable these modes
- mode |= windows.ENABLE_EXTENDED_FLAGS
- mode |= windows.ENABLE_INSERT_MODE
- mode |= windows.ENABLE_QUICK_EDIT_MODE
- if vtInputSupported {
- mode |= windows.ENABLE_VIRTUAL_TERMINAL_INPUT
- }
-
- err = windows.SetConsoleMode(windows.Handle(fd), mode)
- if err != nil {
- return nil, err
- }
- return state, nil
+ _ = windows.SetConsoleMode(windows.Handle(fd), oldState.mode|windows.DISABLE_NEWLINE_AUTO_RETURN)
+ return oldState, err
}
func restoreAtInterrupt(fd uintptr, state *State) {
diff --git a/vendor/github.com/moby/term/termios.go b/vendor/github.com/moby/term/termios_unix.go
similarity index 50%
rename from vendor/github.com/moby/term/termios.go
rename to vendor/github.com/moby/term/termios_unix.go
index 99c0f7de6..60c823783 100644
--- a/vendor/github.com/moby/term/termios.go
+++ b/vendor/github.com/moby/term/termios_unix.go
@@ -8,12 +8,11 @@ import (
)
// Termios is the Unix API for terminal I/O.
+//
+// Deprecated: use [unix.Termios].
type Termios = unix.Termios
-// MakeRaw puts the terminal connected to the given file descriptor into raw
-// mode and returns the previous state of the terminal so that it can be
-// restored.
-func MakeRaw(fd uintptr) (*State, error) {
+func makeRaw(fd uintptr) (*State, error) {
termios, err := tcget(fd)
if err != nil {
return nil, err
@@ -21,10 +20,10 @@ func MakeRaw(fd uintptr) (*State, error) {
oldState := State{termios: *termios}
- termios.Iflag &^= (unix.IGNBRK | unix.BRKINT | unix.PARMRK | unix.ISTRIP | unix.INLCR | unix.IGNCR | unix.ICRNL | unix.IXON)
+ termios.Iflag &^= unix.IGNBRK | unix.BRKINT | unix.PARMRK | unix.ISTRIP | unix.INLCR | unix.IGNCR | unix.ICRNL | unix.IXON
termios.Oflag &^= unix.OPOST
- termios.Lflag &^= (unix.ECHO | unix.ECHONL | unix.ICANON | unix.ISIG | unix.IEXTEN)
- termios.Cflag &^= (unix.CSIZE | unix.PARENB)
+ termios.Lflag &^= unix.ECHO | unix.ECHONL | unix.ICANON | unix.ISIG | unix.IEXTEN
+ termios.Cflag &^= unix.CSIZE | unix.PARENB
termios.Cflag |= unix.CS8
termios.Cc[unix.VMIN] = 1
termios.Cc[unix.VTIME] = 0
diff --git a/vendor/github.com/moby/term/termios_windows.go b/vendor/github.com/moby/term/termios_windows.go
new file mode 100644
index 000000000..5be4e7601
--- /dev/null
+++ b/vendor/github.com/moby/term/termios_windows.go
@@ -0,0 +1,37 @@
+package term
+
+import "golang.org/x/sys/windows"
+
+func makeRaw(fd uintptr) (*State, error) {
+ state, err := SaveState(fd)
+ if err != nil {
+ return nil, err
+ }
+
+ mode := state.mode
+
+ // See
+ // -- https://msdn.microsoft.com/en-us/library/windows/desktop/ms686033(v=vs.85).aspx
+ // -- https://msdn.microsoft.com/en-us/library/windows/desktop/ms683462(v=vs.85).aspx
+
+ // Disable these modes
+ mode &^= windows.ENABLE_ECHO_INPUT
+ mode &^= windows.ENABLE_LINE_INPUT
+ mode &^= windows.ENABLE_MOUSE_INPUT
+ mode &^= windows.ENABLE_WINDOW_INPUT
+ mode &^= windows.ENABLE_PROCESSED_INPUT
+
+ // Enable these modes
+ mode |= windows.ENABLE_EXTENDED_FLAGS
+ mode |= windows.ENABLE_INSERT_MODE
+ mode |= windows.ENABLE_QUICK_EDIT_MODE
+ if vtInputSupported {
+ mode |= windows.ENABLE_VIRTUAL_TERMINAL_INPUT
+ }
+
+ err = windows.SetConsoleMode(windows.Handle(fd), mode)
+ if err != nil {
+ return nil, err
+ }
+ return state, nil
+}
diff --git a/vendor/github.com/moby/term/windows/ansi_reader.go b/vendor/github.com/moby/term/windows/ansi_reader.go
index f32aa537e..fb34c547a 100644
--- a/vendor/github.com/moby/term/windows/ansi_reader.go
+++ b/vendor/github.com/moby/term/windows/ansi_reader.go
@@ -195,10 +195,10 @@ func keyToString(keyEvent *winterm.KEY_EVENT_RECORD, escapeSequence []byte) stri
// +Key generates ESC N Key
if !control && alt {
- return ansiterm.KEY_ESC_N + strings.ToLower(string(keyEvent.UnicodeChar))
+ return ansiterm.KEY_ESC_N + strings.ToLower(string(rune(keyEvent.UnicodeChar)))
}
- return string(keyEvent.UnicodeChar)
+ return string(rune(keyEvent.UnicodeChar))
}
// formatVirtualKey converts a virtual key (e.g., up arrow) into the appropriate ANSI string.
diff --git a/vendor/github.com/moby/term/windows/console.go b/vendor/github.com/moby/term/windows/console.go
index 116b74e8f..21e57bd52 100644
--- a/vendor/github.com/moby/term/windows/console.go
+++ b/vendor/github.com/moby/term/windows/console.go
@@ -30,8 +30,11 @@ func GetHandleInfo(in interface{}) (uintptr, bool) {
// IsConsole returns true if the given file descriptor is a Windows Console.
// The code assumes that GetConsoleMode will return an error for file descriptors that are not a console.
-// Deprecated: use golang.org/x/sys/windows.GetConsoleMode() or golang.org/x/term.IsTerminal()
-var IsConsole = isConsole
+//
+// Deprecated: use [windows.GetConsoleMode] or [golang.org/x/term.IsTerminal].
+func IsConsole(fd uintptr) bool {
+ return isConsole(fd)
+}
func isConsole(fd uintptr) bool {
var mode uint32
diff --git a/vendor/github.com/moby/term/winsize.go b/vendor/github.com/moby/term/winsize.go
deleted file mode 100644
index bea8d4595..000000000
--- a/vendor/github.com/moby/term/winsize.go
+++ /dev/null
@@ -1,21 +0,0 @@
-//go:build !windows
-// +build !windows
-
-package term
-
-import (
- "golang.org/x/sys/unix"
-)
-
-// GetWinsize returns the window size based on the specified file descriptor.
-func GetWinsize(fd uintptr) (*Winsize, error) {
- uws, err := unix.IoctlGetWinsize(int(fd), unix.TIOCGWINSZ)
- ws := &Winsize{Height: uws.Row, Width: uws.Col, x: uws.Xpixel, y: uws.Ypixel}
- return ws, err
-}
-
-// SetWinsize tries to set the specified window size for the specified file descriptor.
-func SetWinsize(fd uintptr, ws *Winsize) error {
- uws := &unix.Winsize{Row: ws.Height, Col: ws.Width, Xpixel: ws.x, Ypixel: ws.y}
- return unix.IoctlSetWinsize(int(fd), unix.TIOCSWINSZ, uws)
-}
diff --git a/vendor/google.golang.org/grpc/CONTRIBUTING.md b/vendor/google.golang.org/grpc/CONTRIBUTING.md
index 608aa6e1a..0854d298e 100644
--- a/vendor/google.golang.org/grpc/CONTRIBUTING.md
+++ b/vendor/google.golang.org/grpc/CONTRIBUTING.md
@@ -66,7 +66,7 @@ How to get your contributions merged smoothly and quickly.
- **All tests need to be passing** before your change can be merged. We
recommend you **run tests locally** before creating your PR to catch breakages
early on.
- - `VET_SKIP_PROTO=1 ./vet.sh` to catch vet errors
+ - `./scripts/vet.sh` to catch vet errors
- `go test -cpu 1,4 -timeout 7m ./...` to run the tests
- `go test -race -cpu 1,4 -timeout 7m ./...` to run tests in race mode
diff --git a/vendor/google.golang.org/grpc/MAINTAINERS.md b/vendor/google.golang.org/grpc/MAINTAINERS.md
index c6672c0a3..6a8a07781 100644
--- a/vendor/google.golang.org/grpc/MAINTAINERS.md
+++ b/vendor/google.golang.org/grpc/MAINTAINERS.md
@@ -9,6 +9,7 @@ for general contribution guidelines.
## Maintainers (in alphabetical order)
+- [atollena](https://github.com/atollena), Datadog, Inc.
- [cesarghali](https://github.com/cesarghali), Google LLC
- [dfawley](https://github.com/dfawley), Google LLC
- [easwars](https://github.com/easwars), Google LLC
diff --git a/vendor/google.golang.org/grpc/Makefile b/vendor/google.golang.org/grpc/Makefile
index 1f8960922..be38384ff 100644
--- a/vendor/google.golang.org/grpc/Makefile
+++ b/vendor/google.golang.org/grpc/Makefile
@@ -30,17 +30,20 @@ testdeps:
GO111MODULE=on go get -d -v -t google.golang.org/grpc/...
vet: vetdeps
- ./vet.sh
+ ./scripts/vet.sh
vetdeps:
- ./vet.sh -install
+ ./scripts/vet.sh -install
.PHONY: \
all \
build \
clean \
+ deps \
proto \
test \
+ testsubmodule \
testrace \
+ testdeps \
vet \
vetdeps
diff --git a/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go b/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go
index 856c75dd4..1afb1e84a 100644
--- a/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go
+++ b/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go
@@ -18,7 +18,7 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
-// protoc-gen-go v1.32.0
+// protoc-gen-go v1.33.0
// protoc v4.25.2
// source: grpc/binlog/v1/binarylog.proto
diff --git a/vendor/google.golang.org/grpc/clientconn.go b/vendor/google.golang.org/grpc/clientconn.go
index c7f260711..2359f94b8 100644
--- a/vendor/google.golang.org/grpc/clientconn.go
+++ b/vendor/google.golang.org/grpc/clientconn.go
@@ -37,7 +37,6 @@ import (
"google.golang.org/grpc/internal/channelz"
"google.golang.org/grpc/internal/grpcsync"
"google.golang.org/grpc/internal/idle"
- "google.golang.org/grpc/internal/pretty"
iresolver "google.golang.org/grpc/internal/resolver"
"google.golang.org/grpc/internal/transport"
"google.golang.org/grpc/keepalive"
@@ -121,8 +120,9 @@ func (dcs *defaultConfigSelector) SelectConfig(rpcInfo iresolver.RPCInfo) (*ires
// https://github.com/grpc/grpc/blob/master/doc/naming.md. e.g. to use dns
// resolver, a "dns:///" prefix should be applied to the target.
//
-// The DialOptions returned by WithBlock, WithTimeout, and
-// WithReturnConnectionError are ignored by this function.
+// The DialOptions returned by WithBlock, WithTimeout,
+// WithReturnConnectionError, and FailOnNonTempDialError are ignored by this
+// function.
func NewClient(target string, opts ...DialOption) (conn *ClientConn, err error) {
cc := &ClientConn{
target: target,
@@ -196,6 +196,8 @@ func NewClient(target string, opts ...DialOption) (conn *ClientConn, err error)
}
// Dial calls DialContext(context.Background(), target, opts...).
+//
+// Deprecated: use NewClient instead. Will be supported throughout 1.x.
func Dial(target string, opts ...DialOption) (*ClientConn, error) {
return DialContext(context.Background(), target, opts...)
}
@@ -209,6 +211,8 @@ func Dial(target string, opts ...DialOption) (*ClientConn, error) {
// "passthrough" for backward compatibility. This distinction should not matter
// to most users, but could matter to legacy users that specify a custom dialer
// and expect it to receive the target string directly.
+//
+// Deprecated: use NewClient instead. Will be supported throughout 1.x.
func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *ClientConn, err error) {
// At the end of this method, we kick the channel out of idle, rather than
// waiting for the first rpc.
@@ -838,6 +842,9 @@ func (cc *ClientConn) newAddrConnLocked(addrs []resolver.Address, opts balancer.
stateChan: make(chan struct{}),
}
ac.ctx, ac.cancel = context.WithCancel(cc.ctx)
+ // Start with our address set to the first address; this may be updated if
+ // we connect to different addresses.
+ ac.channelz.ChannelMetrics.Target.Store(&addrs[0].Addr)
channelz.AddTraceEvent(logger, ac.channelz, 0, &channelz.TraceEvent{
Desc: "Subchannel created",
@@ -929,10 +936,14 @@ func equalAddresses(a, b []resolver.Address) bool {
// updateAddrs updates ac.addrs with the new addresses list and handles active
// connections or connection attempts.
func (ac *addrConn) updateAddrs(addrs []resolver.Address) {
- ac.mu.Lock()
- channelz.Infof(logger, ac.channelz, "addrConn: updateAddrs curAddr: %v, addrs: %v", pretty.ToJSON(ac.curAddr), pretty.ToJSON(addrs))
-
addrs = copyAddressesWithoutBalancerAttributes(addrs)
+ limit := len(addrs)
+ if limit > 5 {
+ limit = 5
+ }
+ channelz.Infof(logger, ac.channelz, "addrConn: updateAddrs addrs (%d of %d): %v", limit, len(addrs), addrs[:limit])
+
+ ac.mu.Lock()
if equalAddresses(ac.addrs, addrs) {
ac.mu.Unlock()
return
@@ -1167,6 +1178,10 @@ type addrConn struct {
// is received, transport is closed, ac has been torn down).
transport transport.ClientTransport // The current transport.
+ // This mutex is used on the RPC path, so its usage should be minimized as
+ // much as possible.
+ // TODO: Find a lock-free way to retrieve the transport and state from the
+ // addrConn.
mu sync.Mutex
curAddr resolver.Address // The current address.
addrs []resolver.Address // All addresses that the resolver resolved to.
@@ -1292,6 +1307,7 @@ func (ac *addrConn) resetTransport() {
func (ac *addrConn) tryAllAddrs(ctx context.Context, addrs []resolver.Address, connectDeadline time.Time) error {
var firstConnErr error
for _, addr := range addrs {
+ ac.channelz.ChannelMetrics.Target.Store(&addr.Addr)
if ctx.Err() != nil {
return errConnClosing
}
@@ -1739,7 +1755,7 @@ func encodeAuthority(authority string) string {
return false
case '!', '$', '&', '\'', '(', ')', '*', '+', ',', ';', '=': // Subdelim characters
return false
- case ':', '[', ']', '@': // Authority related delimeters
+ case ':', '[', ']', '@': // Authority related delimiters
return false
}
// Everything else must be escaped.
diff --git a/vendor/google.golang.org/grpc/codegen.sh b/vendor/google.golang.org/grpc/codegen.sh
deleted file mode 100644
index 4cdc6ba7c..000000000
--- a/vendor/google.golang.org/grpc/codegen.sh
+++ /dev/null
@@ -1,17 +0,0 @@
-#!/usr/bin/env bash
-
-# This script serves as an example to demonstrate how to generate the gRPC-Go
-# interface and the related messages from .proto file.
-#
-# It assumes the installation of i) Google proto buffer compiler at
-# https://github.com/google/protobuf (after v2.6.1) and ii) the Go codegen
-# plugin at https://github.com/golang/protobuf (after 2015-02-20). If you have
-# not, please install them first.
-#
-# We recommend running this script at $GOPATH/src.
-#
-# If this is not what you need, feel free to make your own scripts. Again, this
-# script is for demonstration purpose.
-#
-proto=$1
-protoc --go_out=plugins=grpc:. $proto
diff --git a/vendor/google.golang.org/grpc/codes/codes.go b/vendor/google.golang.org/grpc/codes/codes.go
index 08476ad1f..0b42c302b 100644
--- a/vendor/google.golang.org/grpc/codes/codes.go
+++ b/vendor/google.golang.org/grpc/codes/codes.go
@@ -235,7 +235,7 @@ func (c *Code) UnmarshalJSON(b []byte) error {
if ci, err := strconv.ParseUint(string(b), 10, 32); err == nil {
if ci >= _maxCode {
- return fmt.Errorf("invalid code: %q", ci)
+ return fmt.Errorf("invalid code: %d", ci)
}
*c = Code(ci)
diff --git a/vendor/google.golang.org/grpc/credentials/credentials.go b/vendor/google.golang.org/grpc/credentials/credentials.go
index f6b55c68b..665e790bb 100644
--- a/vendor/google.golang.org/grpc/credentials/credentials.go
+++ b/vendor/google.golang.org/grpc/credentials/credentials.go
@@ -30,7 +30,7 @@ import (
"google.golang.org/grpc/attributes"
icredentials "google.golang.org/grpc/internal/credentials"
- "google.golang.org/protobuf/protoadapt"
+ "google.golang.org/protobuf/proto"
)
// PerRPCCredentials defines the common interface for the credentials which need to
@@ -237,7 +237,7 @@ func ClientHandshakeInfoFromContext(ctx context.Context) ClientHandshakeInfo {
}
// CheckSecurityLevel checks if a connection's security level is greater than or equal to the specified one.
-// It returns success if 1) the condition is satisified or 2) AuthInfo struct does not implement GetCommonAuthInfo() method
+// It returns success if 1) the condition is satisfied or 2) AuthInfo struct does not implement GetCommonAuthInfo() method
// or 3) CommonAuthInfo.SecurityLevel has an invalid zero value. For 2) and 3), it is for the purpose of backward-compatibility.
//
// This API is experimental.
@@ -287,5 +287,5 @@ type ChannelzSecurityValue interface {
type OtherChannelzSecurityValue struct {
ChannelzSecurityValue
Name string
- Value protoadapt.MessageV1
+ Value proto.Message
}
diff --git a/vendor/google.golang.org/grpc/dialoptions.go b/vendor/google.golang.org/grpc/dialoptions.go
index 402493224..00273702b 100644
--- a/vendor/google.golang.org/grpc/dialoptions.go
+++ b/vendor/google.golang.org/grpc/dialoptions.go
@@ -300,6 +300,9 @@ func withBackoff(bs internalbackoff.Strategy) DialOption {
//
// Use of this feature is not recommended. For more information, please see:
// https://github.com/grpc/grpc-go/blob/master/Documentation/anti-patterns.md
+//
+// Deprecated: this DialOption is not supported by NewClient.
+// Will be supported throughout 1.x.
func WithBlock() DialOption {
return newFuncDialOption(func(o *dialOptions) {
o.block = true
@@ -314,10 +317,8 @@ func WithBlock() DialOption {
// Use of this feature is not recommended. For more information, please see:
// https://github.com/grpc/grpc-go/blob/master/Documentation/anti-patterns.md
//
-// # Experimental
-//
-// Notice: This API is EXPERIMENTAL and may be changed or removed in a
-// later release.
+// Deprecated: this DialOption is not supported by NewClient.
+// Will be supported throughout 1.x.
func WithReturnConnectionError() DialOption {
return newFuncDialOption(func(o *dialOptions) {
o.block = true
@@ -387,8 +388,8 @@ func WithCredentialsBundle(b credentials.Bundle) DialOption {
// WithTimeout returns a DialOption that configures a timeout for dialing a
// ClientConn initially. This is valid if and only if WithBlock() is present.
//
-// Deprecated: use DialContext instead of Dial and context.WithTimeout
-// instead. Will be supported throughout 1.x.
+// Deprecated: this DialOption is not supported by NewClient.
+// Will be supported throughout 1.x.
func WithTimeout(d time.Duration) DialOption {
return newFuncDialOption(func(o *dialOptions) {
o.timeout = d
@@ -470,9 +471,8 @@ func withBinaryLogger(bl binarylog.Logger) DialOption {
// Use of this feature is not recommended. For more information, please see:
// https://github.com/grpc/grpc-go/blob/master/Documentation/anti-patterns.md
//
-// # Experimental
-//
-// Notice: This API is EXPERIMENTAL and may be changed or removed in a
+// Deprecated: this DialOption is not supported by NewClient.
+// This API may be changed or removed in a
// later release.
func FailOnNonTempDialError(f bool) DialOption {
return newFuncDialOption(func(o *dialOptions) {
@@ -601,12 +601,22 @@ func WithDisableRetry() DialOption {
})
}
+// MaxHeaderListSizeDialOption is a DialOption that specifies the maximum
+// (uncompressed) size of header list that the client is prepared to accept.
+type MaxHeaderListSizeDialOption struct {
+ MaxHeaderListSize uint32
+}
+
+func (o MaxHeaderListSizeDialOption) apply(do *dialOptions) {
+ do.copts.MaxHeaderListSize = &o.MaxHeaderListSize
+}
+
// WithMaxHeaderListSize returns a DialOption that specifies the maximum
// (uncompressed) size of header list that the client is prepared to accept.
func WithMaxHeaderListSize(s uint32) DialOption {
- return newFuncDialOption(func(o *dialOptions) {
- o.copts.MaxHeaderListSize = &s
- })
+ return MaxHeaderListSizeDialOption{
+ MaxHeaderListSize: s,
+ }
}
// WithDisableHealthCheck disables the LB channel health checking for all
@@ -648,7 +658,7 @@ func defaultDialOptions() dialOptions {
}
}
-// withGetMinConnectDeadline specifies the function that clientconn uses to
+// withMinConnectDeadline specifies the function that clientconn uses to
// get minConnectDeadline. This can be used to make connection attempts happen
// faster/slower.
//
diff --git a/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go b/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go
index 5bf880d41..6a93475a7 100644
--- a/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go
+++ b/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go
@@ -17,7 +17,7 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
-// protoc-gen-go v1.32.0
+// protoc-gen-go v1.33.0
// protoc v4.25.2
// source: grpc/health/v1/health.proto
diff --git a/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go b/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go
index 4c46c098d..8f793e6e8 100644
--- a/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go
+++ b/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go
@@ -32,8 +32,8 @@ import (
// This is a compile-time assertion to ensure that this generated file
// is compatible with the grpc package it is being compiled against.
-// Requires gRPC-Go v1.32.0 or later.
-const _ = grpc.SupportPackageIsVersion7
+// Requires gRPC-Go v1.62.0 or later.
+const _ = grpc.SupportPackageIsVersion8
const (
Health_Check_FullMethodName = "/grpc.health.v1.Health/Check"
@@ -81,8 +81,9 @@ func NewHealthClient(cc grpc.ClientConnInterface) HealthClient {
}
func (c *healthClient) Check(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (*HealthCheckResponse, error) {
+ cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(HealthCheckResponse)
- err := c.cc.Invoke(ctx, Health_Check_FullMethodName, in, out, opts...)
+ err := c.cc.Invoke(ctx, Health_Check_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
@@ -90,11 +91,12 @@ func (c *healthClient) Check(ctx context.Context, in *HealthCheckRequest, opts .
}
func (c *healthClient) Watch(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (Health_WatchClient, error) {
- stream, err := c.cc.NewStream(ctx, &Health_ServiceDesc.Streams[0], Health_Watch_FullMethodName, opts...)
+ cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
+ stream, err := c.cc.NewStream(ctx, &Health_ServiceDesc.Streams[0], Health_Watch_FullMethodName, cOpts...)
if err != nil {
return nil, err
}
- x := &healthWatchClient{stream}
+ x := &healthWatchClient{ClientStream: stream}
if err := x.ClientStream.SendMsg(in); err != nil {
return nil, err
}
@@ -198,7 +200,7 @@ func _Health_Watch_Handler(srv interface{}, stream grpc.ServerStream) error {
if err := stream.RecvMsg(m); err != nil {
return err
}
- return srv.(HealthServer).Watch(m, &healthWatchServer{stream})
+ return srv.(HealthServer).Watch(m, &healthWatchServer{ServerStream: stream})
}
type Health_WatchServer interface {
diff --git a/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/config.go b/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/config.go
index 6bf7f8739..13821a926 100644
--- a/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/config.go
+++ b/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/config.go
@@ -75,7 +75,6 @@ func ParseConfig(cfg json.RawMessage) (serviceconfig.LoadBalancingConfig, error)
if err != nil {
return nil, fmt.Errorf("error parsing config for policy %q: %v", name, err)
}
-
return &lbConfig{childBuilder: builder, childConfig: cfg}, nil
}
diff --git a/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go b/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go
index 45d5e50ea..73bb4c4ee 100644
--- a/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go
+++ b/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go
@@ -169,7 +169,6 @@ func (gsb *Balancer) latestBalancer() *balancerWrapper {
func (gsb *Balancer) UpdateClientConnState(state balancer.ClientConnState) error {
// The resolver data is only relevant to the most recent LB Policy.
balToUpdate := gsb.latestBalancer()
-
gsbCfg, ok := state.BalancerConfig.(*lbConfig)
if ok {
// Switch to the child in the config unless it is already active.
diff --git a/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go b/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go
index e8456a77c..aa4505a87 100644
--- a/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go
+++ b/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go
@@ -65,7 +65,7 @@ type TruncatingMethodLogger struct {
callID uint64
idWithinCallGen *callIDGenerator
- sink Sink // TODO(blog): make this plugable.
+ sink Sink // TODO(blog): make this pluggable.
}
// NewTruncatingMethodLogger returns a new truncating method logger.
@@ -80,7 +80,7 @@ func NewTruncatingMethodLogger(h, m uint64) *TruncatingMethodLogger {
callID: idGen.next(),
idWithinCallGen: &callIDGenerator{},
- sink: DefaultSink, // TODO(blog): make it plugable.
+ sink: DefaultSink, // TODO(blog): make it pluggable.
}
}
@@ -397,7 +397,7 @@ func metadataKeyOmit(key string) bool {
switch key {
case "lb-token", ":path", ":authority", "content-encoding", "content-type", "user-agent", "te":
return true
- case "grpc-trace-bin": // grpc-trace-bin is special because it's visiable to users.
+ case "grpc-trace-bin": // grpc-trace-bin is special because it's visible to users.
return false
}
return strings.HasPrefix(key, "grpc-")
diff --git a/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go
index 685a3cb41..9c915d9e4 100644
--- a/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go
+++ b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go
@@ -28,9 +28,6 @@ import (
var (
// TXTErrIgnore is set if TXT errors should be ignored ("GRPC_GO_IGNORE_TXT_ERRORS" is not "false").
TXTErrIgnore = boolFromEnv("GRPC_GO_IGNORE_TXT_ERRORS", true)
- // AdvertiseCompressors is set if registered compressor should be advertised
- // ("GRPC_GO_ADVERTISE_COMPRESSORS" is not "false").
- AdvertiseCompressors = boolFromEnv("GRPC_GO_ADVERTISE_COMPRESSORS", true)
// RingHashCap indicates the maximum ring size which defaults to 4096
// entries but may be overridden by setting the environment variable
// "GRPC_RING_HASH_CAP". This does not override the default bounds
diff --git a/vendor/google.golang.org/grpc/internal/grpcutil/compressor.go b/vendor/google.golang.org/grpc/internal/grpcutil/compressor.go
index 9f4090967..e8d866984 100644
--- a/vendor/google.golang.org/grpc/internal/grpcutil/compressor.go
+++ b/vendor/google.golang.org/grpc/internal/grpcutil/compressor.go
@@ -20,8 +20,6 @@ package grpcutil
import (
"strings"
-
- "google.golang.org/grpc/internal/envconfig"
)
// RegisteredCompressorNames holds names of the registered compressors.
@@ -40,8 +38,5 @@ func IsCompressorNameRegistered(name string) bool {
// RegisteredCompressors returns a string of registered compressor names
// separated by comma.
func RegisteredCompressors() string {
- if !envconfig.AdvertiseCompressors {
- return ""
- }
return strings.Join(RegisteredCompressorNames, ",")
}
diff --git a/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go b/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go
index abab35e25..f3f52a59a 100644
--- a/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go
+++ b/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go
@@ -41,18 +41,24 @@ import (
"google.golang.org/grpc/serviceconfig"
)
-// EnableSRVLookups controls whether the DNS resolver attempts to fetch gRPCLB
-// addresses from SRV records. Must not be changed after init time.
-var EnableSRVLookups = false
-
-// ResolvingTimeout specifies the maximum duration for a DNS resolution request.
-// If the timeout expires before a response is received, the request will be canceled.
-//
-// It is recommended to set this value at application startup. Avoid modifying this variable
-// after initialization as it's not thread-safe for concurrent modification.
-var ResolvingTimeout = 30 * time.Second
-
-var logger = grpclog.Component("dns")
+var (
+ // EnableSRVLookups controls whether the DNS resolver attempts to fetch gRPCLB
+ // addresses from SRV records. Must not be changed after init time.
+ EnableSRVLookups = false
+
+ // MinResolutionInterval is the minimum interval at which re-resolutions are
+ // allowed. This helps to prevent excessive re-resolution.
+ MinResolutionInterval = 30 * time.Second
+
+ // ResolvingTimeout specifies the maximum duration for a DNS resolution request.
+ // If the timeout expires before a response is received, the request will be canceled.
+ //
+ // It is recommended to set this value at application startup. Avoid modifying this variable
+ // after initialization as it's not thread-safe for concurrent modification.
+ ResolvingTimeout = 30 * time.Second
+
+ logger = grpclog.Component("dns")
+)
func init() {
resolver.Register(NewBuilder())
@@ -208,7 +214,7 @@ func (d *dnsResolver) watcher() {
// Success resolving, wait for the next ResolveNow. However, also wait 30
// seconds at the very least to prevent constantly re-resolving.
backoffIndex = 1
- waitTime = internal.MinResolutionRate
+ waitTime = MinResolutionInterval
select {
case <-d.ctx.Done():
return
diff --git a/vendor/google.golang.org/grpc/internal/resolver/dns/internal/internal.go b/vendor/google.golang.org/grpc/internal/resolver/dns/internal/internal.go
index c7fc557d0..a7ecaf8d5 100644
--- a/vendor/google.golang.org/grpc/internal/resolver/dns/internal/internal.go
+++ b/vendor/google.golang.org/grpc/internal/resolver/dns/internal/internal.go
@@ -28,7 +28,7 @@ import (
// NetResolver groups the methods on net.Resolver that are used by the DNS
// resolver implementation. This allows the default net.Resolver instance to be
-// overidden from tests.
+// overridden from tests.
type NetResolver interface {
LookupHost(ctx context.Context, host string) (addrs []string, err error)
LookupSRV(ctx context.Context, service, proto, name string) (cname string, addrs []*net.SRV, err error)
@@ -50,10 +50,6 @@ var (
// The following vars are overridden from tests.
var (
- // MinResolutionRate is the minimum rate at which re-resolutions are
- // allowed. This helps to prevent excessive re-resolution.
- MinResolutionRate = 30 * time.Second
-
// TimeAfterFunc is used by the DNS resolver to wait for the given duration
// to elapse. In non-test code, this is implemented by time.After. In test
// code, this can be used to control the amount of time the resolver is
diff --git a/vendor/google.golang.org/grpc/internal/transport/controlbuf.go b/vendor/google.golang.org/grpc/internal/transport/controlbuf.go
index 83c382982..3deadfb4a 100644
--- a/vendor/google.golang.org/grpc/internal/transport/controlbuf.go
+++ b/vendor/google.golang.org/grpc/internal/transport/controlbuf.go
@@ -193,7 +193,7 @@ type goAway struct {
code http2.ErrCode
debugData []byte
headsUp bool
- closeConn error // if set, loopyWriter will exit, resulting in conn closure
+ closeConn error // if set, loopyWriter will exit with this error
}
func (*goAway) isTransportResponseFrame() bool { return false }
@@ -336,7 +336,7 @@ func (c *controlBuffer) put(it cbItem) error {
return err
}
-func (c *controlBuffer) executeAndPut(f func(it any) bool, it cbItem) (bool, error) {
+func (c *controlBuffer) executeAndPut(f func() bool, it cbItem) (bool, error) {
var wakeUp bool
c.mu.Lock()
if c.err != nil {
@@ -344,7 +344,7 @@ func (c *controlBuffer) executeAndPut(f func(it any) bool, it cbItem) (bool, err
return false, c.err
}
if f != nil {
- if !f(it) { // f wasn't successful
+ if !f() { // f wasn't successful
c.mu.Unlock()
return false, nil
}
@@ -495,21 +495,22 @@ type loopyWriter struct {
ssGoAwayHandler func(*goAway) (bool, error)
}
-func newLoopyWriter(s side, fr *framer, cbuf *controlBuffer, bdpEst *bdpEstimator, conn net.Conn, logger *grpclog.PrefixLogger) *loopyWriter {
+func newLoopyWriter(s side, fr *framer, cbuf *controlBuffer, bdpEst *bdpEstimator, conn net.Conn, logger *grpclog.PrefixLogger, goAwayHandler func(*goAway) (bool, error)) *loopyWriter {
var buf bytes.Buffer
l := &loopyWriter{
- side: s,
- cbuf: cbuf,
- sendQuota: defaultWindowSize,
- oiws: defaultWindowSize,
- estdStreams: make(map[uint32]*outStream),
- activeStreams: newOutStreamList(),
- framer: fr,
- hBuf: &buf,
- hEnc: hpack.NewEncoder(&buf),
- bdpEst: bdpEst,
- conn: conn,
- logger: logger,
+ side: s,
+ cbuf: cbuf,
+ sendQuota: defaultWindowSize,
+ oiws: defaultWindowSize,
+ estdStreams: make(map[uint32]*outStream),
+ activeStreams: newOutStreamList(),
+ framer: fr,
+ hBuf: &buf,
+ hEnc: hpack.NewEncoder(&buf),
+ bdpEst: bdpEst,
+ conn: conn,
+ logger: logger,
+ ssGoAwayHandler: goAwayHandler,
}
return l
}
diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_client.go b/vendor/google.golang.org/grpc/internal/transport/http2_client.go
index deba0c4d9..3c63c7069 100644
--- a/vendor/google.golang.org/grpc/internal/transport/http2_client.go
+++ b/vendor/google.golang.org/grpc/internal/transport/http2_client.go
@@ -114,11 +114,11 @@ type http2Client struct {
streamQuota int64
streamsQuotaAvailable chan struct{}
waitingStreams uint32
- nextID uint32
registeredCompressors string
// Do not access controlBuf with mu held.
mu sync.Mutex // guard the following variables
+ nextID uint32
state transportState
activeStreams map[uint32]*Stream
// prevGoAway ID records the Last-Stream-ID in the previous GOAway frame.
@@ -408,10 +408,10 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts
readerErrCh := make(chan error, 1)
go t.reader(readerErrCh)
defer func() {
- if err == nil {
- err = <-readerErrCh
- }
if err != nil {
+ // writerDone should be closed since the loopy goroutine
+ // wouldn't have started in the case this function returns an error.
+ close(t.writerDone)
t.Close(err)
}
}()
@@ -458,8 +458,12 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts
if err := t.framer.writer.Flush(); err != nil {
return nil, err
}
+ // Block until the server preface is received successfully or an error occurs.
+ if err = <-readerErrCh; err != nil {
+ return nil, err
+ }
go func() {
- t.loopy = newLoopyWriter(clientSide, t.framer, t.controlBuf, t.bdpEst, t.conn, t.logger)
+ t.loopy = newLoopyWriter(clientSide, t.framer, t.controlBuf, t.bdpEst, t.conn, t.logger, t.outgoingGoAwayHandler)
if err := t.loopy.run(); !isIOError(err) {
// Immediately close the connection, as the loopy writer returns
// when there are no more active streams and we were draining (the
@@ -517,6 +521,17 @@ func (t *http2Client) getPeer() *peer.Peer {
}
}
+// OutgoingGoAwayHandler writes a GOAWAY to the connection. Always returns (false, err) as we want the GoAway
+// to be the last frame loopy writes to the transport.
+func (t *http2Client) outgoingGoAwayHandler(g *goAway) (bool, error) {
+ t.mu.Lock()
+ defer t.mu.Unlock()
+ if err := t.framer.fr.WriteGoAway(t.nextID-2, http2.ErrCodeNo, g.debugData); err != nil {
+ return false, err
+ }
+ return false, g.closeConn
+}
+
func (t *http2Client) createHeaderFields(ctx context.Context, callHdr *CallHdr) ([]hpack.HeaderField, error) {
aud := t.createAudience(callHdr)
ri := credentials.RequestInfo{
@@ -781,7 +796,7 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream,
firstTry := true
var ch chan struct{}
transportDrainRequired := false
- checkForStreamQuota := func(it any) bool {
+ checkForStreamQuota := func() bool {
if t.streamQuota <= 0 { // Can go negative if server decreases it.
if firstTry {
t.waitingStreams++
@@ -793,23 +808,24 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream,
t.waitingStreams--
}
t.streamQuota--
- h := it.(*headerFrame)
- h.streamID = t.nextID
- t.nextID += 2
-
- // Drain client transport if nextID > MaxStreamID which signals gRPC that
- // the connection is closed and a new one must be created for subsequent RPCs.
- transportDrainRequired = t.nextID > MaxStreamID
- s.id = h.streamID
- s.fc = &inFlow{limit: uint32(t.initialWindowSize)}
t.mu.Lock()
if t.state == draining || t.activeStreams == nil { // Can be niled from Close().
t.mu.Unlock()
return false // Don't create a stream if the transport is already closed.
}
+
+ hdr.streamID = t.nextID
+ t.nextID += 2
+ // Drain client transport if nextID > MaxStreamID which signals gRPC that
+ // the connection is closed and a new one must be created for subsequent RPCs.
+ transportDrainRequired = t.nextID > MaxStreamID
+
+ s.id = hdr.streamID
+ s.fc = &inFlow{limit: uint32(t.initialWindowSize)}
t.activeStreams[s.id] = s
t.mu.Unlock()
+
if t.streamQuota > 0 && t.waitingStreams > 0 {
select {
case t.streamsQuotaAvailable <- struct{}{}:
@@ -819,13 +835,12 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream,
return true
}
var hdrListSizeErr error
- checkForHeaderListSize := func(it any) bool {
+ checkForHeaderListSize := func() bool {
if t.maxSendHeaderListSize == nil {
return true
}
- hdrFrame := it.(*headerFrame)
var sz int64
- for _, f := range hdrFrame.hf {
+ for _, f := range hdr.hf {
if sz += int64(f.Size()); sz > int64(*t.maxSendHeaderListSize) {
hdrListSizeErr = status.Errorf(codes.Internal, "header list size to send violates the maximum size (%d bytes) set by server", *t.maxSendHeaderListSize)
return false
@@ -834,8 +849,8 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream,
return true
}
for {
- success, err := t.controlBuf.executeAndPut(func(it any) bool {
- return checkForHeaderListSize(it) && checkForStreamQuota(it)
+ success, err := t.controlBuf.executeAndPut(func() bool {
+ return checkForHeaderListSize() && checkForStreamQuota()
}, hdr)
if err != nil {
// Connection closed.
@@ -946,7 +961,7 @@ func (t *http2Client) closeStream(s *Stream, err error, rst bool, rstCode http2.
rst: rst,
rstCode: rstCode,
}
- addBackStreamQuota := func(any) bool {
+ addBackStreamQuota := func() bool {
t.streamQuota++
if t.streamQuota > 0 && t.waitingStreams > 0 {
select {
@@ -966,7 +981,7 @@ func (t *http2Client) closeStream(s *Stream, err error, rst bool, rstCode http2.
// Close kicks off the shutdown process of the transport. This should be called
// only once on a transport. Once it is called, the transport should not be
-// accessed any more.
+// accessed anymore.
func (t *http2Client) Close(err error) {
t.mu.Lock()
// Make sure we only close once.
@@ -991,7 +1006,10 @@ func (t *http2Client) Close(err error) {
t.kpDormancyCond.Signal()
}
t.mu.Unlock()
- t.controlBuf.finish()
+ // Per HTTP/2 spec, a GOAWAY frame must be sent before closing the
+ // connection. See https://httpwg.org/specs/rfc7540.html#GOAWAY.
+ t.controlBuf.put(&goAway{code: http2.ErrCodeNo, debugData: []byte("client transport shutdown"), closeConn: err})
+ <-t.writerDone
t.cancel()
t.conn.Close()
channelz.RemoveEntry(t.channelz.ID)
@@ -1099,7 +1117,7 @@ func (t *http2Client) updateWindow(s *Stream, n uint32) {
// for the transport and the stream based on the current bdp
// estimation.
func (t *http2Client) updateFlowControl(n uint32) {
- updateIWS := func(any) bool {
+ updateIWS := func() bool {
t.initialWindowSize = int32(n)
t.mu.Lock()
for _, s := range t.activeStreams {
@@ -1252,7 +1270,7 @@ func (t *http2Client) handleSettings(f *http2.SettingsFrame, isFirst bool) {
}
updateFuncs = append(updateFuncs, updateStreamQuota)
}
- t.controlBuf.executeAndPut(func(any) bool {
+ t.controlBuf.executeAndPut(func() bool {
for _, f := range updateFuncs {
f()
}
diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_server.go b/vendor/google.golang.org/grpc/internal/transport/http2_server.go
index d582e0471..cab0e2d3d 100644
--- a/vendor/google.golang.org/grpc/internal/transport/http2_server.go
+++ b/vendor/google.golang.org/grpc/internal/transport/http2_server.go
@@ -330,8 +330,7 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport,
t.handleSettings(sf)
go func() {
- t.loopy = newLoopyWriter(serverSide, t.framer, t.controlBuf, t.bdpEst, t.conn, t.logger)
- t.loopy.ssGoAwayHandler = t.outgoingGoAwayHandler
+ t.loopy = newLoopyWriter(serverSide, t.framer, t.controlBuf, t.bdpEst, t.conn, t.logger, t.outgoingGoAwayHandler)
err := t.loopy.run()
close(t.loopyWriterDone)
if !isIOError(err) {
@@ -860,7 +859,7 @@ func (t *http2Server) handleSettings(f *http2.SettingsFrame) {
}
return nil
})
- t.controlBuf.executeAndPut(func(any) bool {
+ t.controlBuf.executeAndPut(func() bool {
for _, f := range updateFuncs {
f()
}
@@ -1014,12 +1013,13 @@ func (t *http2Server) writeHeaderLocked(s *Stream) error {
headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-encoding", Value: s.sendCompress})
}
headerFields = appendHeaderFieldsFromMD(headerFields, s.header)
- success, err := t.controlBuf.executeAndPut(t.checkForHeaderListSize, &headerFrame{
+ hf := &headerFrame{
streamID: s.id,
hf: headerFields,
endStream: false,
onWrite: t.setResetPingStrikes,
- })
+ }
+ success, err := t.controlBuf.executeAndPut(func() bool { return t.checkForHeaderListSize(hf) }, hf)
if !success {
if err != nil {
return err
@@ -1208,7 +1208,7 @@ func (t *http2Server) keepalive() {
continue
}
if outstandingPing && kpTimeoutLeft <= 0 {
- t.Close(fmt.Errorf("keepalive ping not acked within timeout %s", t.kp.Time))
+ t.Close(fmt.Errorf("keepalive ping not acked within timeout %s", t.kp.Timeout))
return
}
if !outstandingPing {
diff --git a/vendor/google.golang.org/grpc/internal/transport/transport.go b/vendor/google.golang.org/grpc/internal/transport/transport.go
index 0d2a6e47f..4b39c0ade 100644
--- a/vendor/google.golang.org/grpc/internal/transport/transport.go
+++ b/vendor/google.golang.org/grpc/internal/transport/transport.go
@@ -304,7 +304,7 @@ func (s *Stream) isHeaderSent() bool {
}
// updateHeaderSent updates headerSent and returns true
-// if it was alreay set. It is valid only on server-side.
+// if it was already set. It is valid only on server-side.
func (s *Stream) updateHeaderSent() bool {
return atomic.SwapUint32(&s.headerSent, 1) == 1
}
diff --git a/vendor/google.golang.org/grpc/peer/peer.go b/vendor/google.golang.org/grpc/peer/peer.go
index a821ff9b2..499a49c8c 100644
--- a/vendor/google.golang.org/grpc/peer/peer.go
+++ b/vendor/google.golang.org/grpc/peer/peer.go
@@ -22,7 +22,9 @@ package peer
import (
"context"
+ "fmt"
"net"
+ "strings"
"google.golang.org/grpc/credentials"
)
@@ -39,6 +41,34 @@ type Peer struct {
AuthInfo credentials.AuthInfo
}
+// String ensures the Peer types implements the Stringer interface in order to
+// allow to print a context with a peerKey value effectively.
+func (p *Peer) String() string {
+ if p == nil {
+ return "Peer"
+ }
+ sb := &strings.Builder{}
+ sb.WriteString("Peer{")
+ if p.Addr != nil {
+ fmt.Fprintf(sb, "Addr: '%s', ", p.Addr.String())
+ } else {
+ fmt.Fprintf(sb, "Addr: , ")
+ }
+ if p.LocalAddr != nil {
+ fmt.Fprintf(sb, "LocalAddr: '%s', ", p.LocalAddr.String())
+ } else {
+ fmt.Fprintf(sb, "LocalAddr: , ")
+ }
+ if p.AuthInfo != nil {
+ fmt.Fprintf(sb, "AuthInfo: '%s'", p.AuthInfo.AuthType())
+ } else {
+ fmt.Fprintf(sb, "AuthInfo: ")
+ }
+ sb.WriteString("}")
+
+ return sb.String()
+}
+
type peerKey struct{}
// NewContext creates a new context with peer information attached.
diff --git a/vendor/google.golang.org/grpc/picker_wrapper.go b/vendor/google.golang.org/grpc/picker_wrapper.go
index bf56faa76..56e8aba78 100644
--- a/vendor/google.golang.org/grpc/picker_wrapper.go
+++ b/vendor/google.golang.org/grpc/picker_wrapper.go
@@ -20,6 +20,7 @@ package grpc
import (
"context"
+ "fmt"
"io"
"sync"
@@ -117,7 +118,7 @@ func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer.
if lastPickErr != nil {
errStr = "latest balancer error: " + lastPickErr.Error()
} else {
- errStr = ctx.Err().Error()
+ errStr = fmt.Sprintf("received context error while waiting for new LB policy update: %s", ctx.Err().Error())
}
switch ctx.Err() {
case context.DeadlineExceeded:
diff --git a/vendor/google.golang.org/grpc/pickfirst.go b/vendor/google.golang.org/grpc/pickfirst.go
index e3ea42ba9..885362661 100644
--- a/vendor/google.golang.org/grpc/pickfirst.go
+++ b/vendor/google.golang.org/grpc/pickfirst.go
@@ -54,7 +54,7 @@ type pfConfig struct {
serviceconfig.LoadBalancingConfig `json:"-"`
// If set to true, instructs the LB policy to shuffle the order of the list
- // of addresses received from the name resolver before attempting to
+ // of endpoints received from the name resolver before attempting to
// connect to them.
ShuffleAddressList bool `json:"shuffleAddressList"`
}
@@ -94,8 +94,7 @@ func (b *pickfirstBalancer) ResolverError(err error) {
}
func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState) error {
- addrs := state.ResolverState.Addresses
- if len(addrs) == 0 {
+ if len(state.ResolverState.Addresses) == 0 && len(state.ResolverState.Endpoints) == 0 {
// The resolver reported an empty address list. Treat it like an error by
// calling b.ResolverError.
if b.subConn != nil {
@@ -107,22 +106,49 @@ func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState
b.ResolverError(errors.New("produced zero addresses"))
return balancer.ErrBadResolverState
}
-
// We don't have to guard this block with the env var because ParseConfig
// already does so.
cfg, ok := state.BalancerConfig.(pfConfig)
if state.BalancerConfig != nil && !ok {
return fmt.Errorf("pickfirst: received illegal BalancerConfig (type %T): %v", state.BalancerConfig, state.BalancerConfig)
}
- if cfg.ShuffleAddressList {
- addrs = append([]resolver.Address{}, addrs...)
- grpcrand.Shuffle(len(addrs), func(i, j int) { addrs[i], addrs[j] = addrs[j], addrs[i] })
- }
if b.logger.V(2) {
b.logger.Infof("Received new config %s, resolver state %s", pretty.ToJSON(cfg), pretty.ToJSON(state.ResolverState))
}
+ var addrs []resolver.Address
+ if endpoints := state.ResolverState.Endpoints; len(endpoints) != 0 {
+ // Perform the optional shuffling described in gRFC A62. The shuffling will
+ // change the order of endpoints but not touch the order of the addresses
+ // within each endpoint. - A61
+ if cfg.ShuffleAddressList {
+ endpoints = append([]resolver.Endpoint{}, endpoints...)
+ grpcrand.Shuffle(len(endpoints), func(i, j int) { endpoints[i], endpoints[j] = endpoints[j], endpoints[i] })
+ }
+
+ // "Flatten the list by concatenating the ordered list of addresses for each
+ // of the endpoints, in order." - A61
+ for _, endpoint := range endpoints {
+ // "In the flattened list, interleave addresses from the two address
+ // families, as per RFC-8304 section 4." - A61
+ // TODO: support the above language.
+ addrs = append(addrs, endpoint.Addresses...)
+ }
+ } else {
+ // Endpoints not set, process addresses until we migrate resolver
+ // emissions fully to Endpoints. The top channel does wrap emitted
+ // addresses with endpoints, however some balancers such as weighted
+ // target do not forwarrd the corresponding correct endpoints down/split
+ // endpoints properly. Once all balancers correctly forward endpoints
+ // down, can delete this else conditional.
+ addrs = state.ResolverState.Addresses
+ if cfg.ShuffleAddressList {
+ addrs = append([]resolver.Address{}, addrs...)
+ grpcrand.Shuffle(len(addrs), func(i, j int) { addrs[i], addrs[j] = addrs[j], addrs[i] })
+ }
+ }
+
if b.subConn != nil {
b.cc.UpdateAddresses(b.subConn, addrs)
return nil
diff --git a/vendor/google.golang.org/grpc/regenerate.sh b/vendor/google.golang.org/grpc/regenerate.sh
index a6f26c8ab..3edca296c 100644
--- a/vendor/google.golang.org/grpc/regenerate.sh
+++ b/vendor/google.golang.org/grpc/regenerate.sh
@@ -63,7 +63,7 @@ LEGACY_SOURCES=(
# Generates only the new gRPC Service symbols
SOURCES=(
- $(git ls-files --exclude-standard --cached --others "*.proto" | grep -v '^\(profiling/proto/service.proto\|reflection/grpc_reflection_v1alpha/reflection.proto\)$')
+ $(git ls-files --exclude-standard --cached --others "*.proto" | grep -v '^profiling/proto/service.proto$')
${WORKDIR}/grpc-proto/grpc/gcp/altscontext.proto
${WORKDIR}/grpc-proto/grpc/gcp/handshaker.proto
${WORKDIR}/grpc-proto/grpc/gcp/transport_security_common.proto
@@ -93,7 +93,7 @@ Mgrpc/testing/empty.proto=google.golang.org/grpc/interop/grpc_testing
for src in ${SOURCES[@]}; do
echo "protoc ${src}"
- protoc --go_out=${OPTS}:${WORKDIR}/out --go-grpc_out=${OPTS}:${WORKDIR}/out \
+ protoc --go_out=${OPTS}:${WORKDIR}/out --go-grpc_out=${OPTS},use_generic_streams_experimental=true:${WORKDIR}/out \
-I"." \
-I${WORKDIR}/grpc-proto \
-I${WORKDIR}/googleapis \
@@ -118,6 +118,6 @@ mv ${WORKDIR}/out/google.golang.org/grpc/lookup/grpc_lookup_v1/* ${WORKDIR}/out/
# grpc_testing_not_regenerate/*.pb.go are not re-generated,
# see grpc_testing_not_regenerate/README.md for details.
-rm ${WORKDIR}/out/google.golang.org/grpc/reflection/grpc_testing_not_regenerate/*.pb.go
+rm ${WORKDIR}/out/google.golang.org/grpc/reflection/test/grpc_testing_not_regenerate/*.pb.go
cp -R ${WORKDIR}/out/google.golang.org/grpc/* .
diff --git a/vendor/google.golang.org/grpc/resolver/dns/dns_resolver.go b/vendor/google.golang.org/grpc/resolver/dns/dns_resolver.go
index b54a3a322..ef3d6ed6c 100644
--- a/vendor/google.golang.org/grpc/resolver/dns/dns_resolver.go
+++ b/vendor/google.golang.org/grpc/resolver/dns/dns_resolver.go
@@ -18,9 +18,6 @@
// Package dns implements a dns resolver to be installed as the default resolver
// in grpc.
-//
-// Deprecated: this package is imported by grpc and should not need to be
-// imported directly by users.
package dns
import (
@@ -52,3 +49,12 @@ func SetResolvingTimeout(timeout time.Duration) {
func NewBuilder() resolver.Builder {
return dns.NewBuilder()
}
+
+// SetMinResolutionInterval sets the default minimum interval at which DNS
+// re-resolutions are allowed. This helps to prevent excessive re-resolution.
+//
+// It must be called only at application startup, before any gRPC calls are
+// made. Modifying this value after initialization is not thread-safe.
+func SetMinResolutionInterval(d time.Duration) {
+ dns.MinResolutionInterval = d
+}
diff --git a/vendor/google.golang.org/grpc/rpc_util.go b/vendor/google.golang.org/grpc/rpc_util.go
index 998e251dd..fdd49e6e9 100644
--- a/vendor/google.golang.org/grpc/rpc_util.go
+++ b/vendor/google.golang.org/grpc/rpc_util.go
@@ -964,7 +964,7 @@ func setCallInfoCodec(c *callInfo) error {
// The SupportPackageIsVersion variables are referenced from generated protocol
// buffer files to ensure compatibility with the gRPC version used. The latest
-// support package version is 7.
+// support package version is 9.
//
// Older versions are kept for compatibility.
//
@@ -976,6 +976,7 @@ const (
SupportPackageIsVersion6 = true
SupportPackageIsVersion7 = true
SupportPackageIsVersion8 = true
+ SupportPackageIsVersion9 = true
)
const grpcUA = "grpc-go/" + Version
diff --git a/vendor/google.golang.org/grpc/server.go b/vendor/google.golang.org/grpc/server.go
index fd4558daa..89f8e4792 100644
--- a/vendor/google.golang.org/grpc/server.go
+++ b/vendor/google.golang.org/grpc/server.go
@@ -527,12 +527,22 @@ func ConnectionTimeout(d time.Duration) ServerOption {
})
}
+// MaxHeaderListSizeServerOption is a ServerOption that sets the max
+// (uncompressed) size of header list that the server is prepared to accept.
+type MaxHeaderListSizeServerOption struct {
+ MaxHeaderListSize uint32
+}
+
+func (o MaxHeaderListSizeServerOption) apply(so *serverOptions) {
+ so.maxHeaderListSize = &o.MaxHeaderListSize
+}
+
// MaxHeaderListSize returns a ServerOption that sets the max (uncompressed) size
// of header list that the server is prepared to accept.
func MaxHeaderListSize(s uint32) ServerOption {
- return newFuncServerOption(func(o *serverOptions) {
- o.maxHeaderListSize = &s
- })
+ return MaxHeaderListSizeServerOption{
+ MaxHeaderListSize: s,
+ }
}
// HeaderTableSize returns a ServerOption that sets the size of dynamic
diff --git a/vendor/google.golang.org/grpc/service_config.go b/vendor/google.golang.org/grpc/service_config.go
index 2b35c5d21..9da8fc802 100644
--- a/vendor/google.golang.org/grpc/service_config.go
+++ b/vendor/google.golang.org/grpc/service_config.go
@@ -172,7 +172,7 @@ func parseServiceConfig(js string) *serviceconfig.ParseResult {
var rsc jsonSC
err := json.Unmarshal([]byte(js), &rsc)
if err != nil {
- logger.Warningf("grpc: unmarshaling service config %s: %v", js, err)
+ logger.Warningf("grpc: unmarshalling service config %s: %v", js, err)
return &serviceconfig.ParseResult{Err: err}
}
sc := ServiceConfig{
@@ -219,7 +219,7 @@ func parseServiceConfig(js string) *serviceconfig.ParseResult {
Timeout: (*time.Duration)(m.Timeout),
}
if mc.RetryPolicy, err = convertRetryPolicy(m.RetryPolicy); err != nil {
- logger.Warningf("grpc: unmarshaling service config %s: %v", js, err)
+ logger.Warningf("grpc: unmarshalling service config %s: %v", js, err)
return &serviceconfig.ParseResult{Err: err}
}
if m.MaxRequestMessageBytes != nil {
@@ -239,13 +239,13 @@ func parseServiceConfig(js string) *serviceconfig.ParseResult {
for i, n := range *m.Name {
path, err := n.generatePath()
if err != nil {
- logger.Warningf("grpc: error unmarshaling service config %s due to methodConfig[%d]: %v", js, i, err)
+ logger.Warningf("grpc: error unmarshalling service config %s due to methodConfig[%d]: %v", js, i, err)
return &serviceconfig.ParseResult{Err: err}
}
if _, ok := paths[path]; ok {
err = errDuplicatedName
- logger.Warningf("grpc: error unmarshaling service config %s due to methodConfig[%d]: %v", js, i, err)
+ logger.Warningf("grpc: error unmarshalling service config %s due to methodConfig[%d]: %v", js, i, err)
return &serviceconfig.ParseResult{Err: err}
}
paths[path] = struct{}{}
diff --git a/vendor/google.golang.org/grpc/stats/stats.go b/vendor/google.golang.org/grpc/stats/stats.go
index 4ab70e2d4..fdb0bd651 100644
--- a/vendor/google.golang.org/grpc/stats/stats.go
+++ b/vendor/google.golang.org/grpc/stats/stats.go
@@ -73,9 +73,12 @@ func (*PickerUpdated) isRPCStats() {}
type InPayload struct {
// Client is true if this InPayload is from client side.
Client bool
- // Payload is the payload with original type.
+ // Payload is the payload with original type. This may be modified after
+ // the call to HandleRPC which provides the InPayload returns and must be
+ // copied if needed later.
Payload any
// Data is the serialized message payload.
+ // Deprecated: Data will be removed in the next release.
Data []byte
// Length is the size of the uncompressed payload data. Does not include any
@@ -143,9 +146,12 @@ func (s *InTrailer) isRPCStats() {}
type OutPayload struct {
// Client is true if this OutPayload is from client side.
Client bool
- // Payload is the payload with original type.
+ // Payload is the payload with original type. This may be modified after
+ // the call to HandleRPC which provides the OutPayload returns and must be
+ // copied if needed later.
Payload any
// Data is the serialized message payload.
+ // Deprecated: Data will be removed in the next release.
Data []byte
// Length is the size of the uncompressed payload data. Does not include any
// framing (gRPC or HTTP/2).
diff --git a/vendor/google.golang.org/grpc/stream.go b/vendor/google.golang.org/grpc/stream.go
index d939ffc63..b54563e81 100644
--- a/vendor/google.golang.org/grpc/stream.go
+++ b/vendor/google.golang.org/grpc/stream.go
@@ -516,6 +516,7 @@ func (a *csAttempt) newStream() error {
return toRPCErr(nse.Err)
}
a.s = s
+ a.ctx = s.Context()
a.p = &parser{r: s, recvBufferPool: a.cs.cc.dopts.recvBufferPool}
return nil
}
diff --git a/vendor/google.golang.org/grpc/stream_interfaces.go b/vendor/google.golang.org/grpc/stream_interfaces.go
new file mode 100644
index 000000000..8b813529c
--- /dev/null
+++ b/vendor/google.golang.org/grpc/stream_interfaces.go
@@ -0,0 +1,152 @@
+/*
+ *
+ * Copyright 2024 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package grpc
+
+// ServerStreamingClient represents the client side of a server-streaming (one
+// request, many responses) RPC. It is generic over the type of the response
+// message. It is used in generated code.
+type ServerStreamingClient[Res any] interface {
+ Recv() (*Res, error)
+ ClientStream
+}
+
+// ServerStreamingServer represents the server side of a server-streaming (one
+// request, many responses) RPC. It is generic over the type of the response
+// message. It is used in generated code.
+type ServerStreamingServer[Res any] interface {
+ Send(*Res) error
+ ServerStream
+}
+
+// ClientStreamingClient represents the client side of a client-streaming (many
+// requests, one response) RPC. It is generic over both the type of the request
+// message stream and the type of the unary response message. It is used in
+// generated code.
+type ClientStreamingClient[Req any, Res any] interface {
+ Send(*Req) error
+ CloseAndRecv() (*Res, error)
+ ClientStream
+}
+
+// ClientStreamingServer represents the server side of a client-streaming (many
+// requests, one response) RPC. It is generic over both the type of the request
+// message stream and the type of the unary response message. It is used in
+// generated code.
+type ClientStreamingServer[Req any, Res any] interface {
+ Recv() (*Req, error)
+ SendAndClose(*Res) error
+ ServerStream
+}
+
+// BidiStreamingClient represents the client side of a bidirectional-streaming
+// (many requests, many responses) RPC. It is generic over both the type of the
+// request message stream and the type of the response message stream. It is
+// used in generated code.
+type BidiStreamingClient[Req any, Res any] interface {
+ Send(*Req) error
+ Recv() (*Res, error)
+ ClientStream
+}
+
+// BidiStreamingServer represents the server side of a bidirectional-streaming
+// (many requests, many responses) RPC. It is generic over both the type of the
+// request message stream and the type of the response message stream. It is
+// used in generated code.
+type BidiStreamingServer[Req any, Res any] interface {
+ Recv() (*Req, error)
+ Send(*Res) error
+ ServerStream
+}
+
+// GenericClientStream implements the ServerStreamingClient, ClientStreamingClient,
+// and BidiStreamingClient interfaces. It is used in generated code.
+type GenericClientStream[Req any, Res any] struct {
+ ClientStream
+}
+
+var _ ServerStreamingClient[string] = (*GenericClientStream[int, string])(nil)
+var _ ClientStreamingClient[int, string] = (*GenericClientStream[int, string])(nil)
+var _ BidiStreamingClient[int, string] = (*GenericClientStream[int, string])(nil)
+
+// Send pushes one message into the stream of requests to be consumed by the
+// server. The type of message which can be sent is determined by the Req type
+// parameter of the GenericClientStream receiver.
+func (x *GenericClientStream[Req, Res]) Send(m *Req) error {
+ return x.ClientStream.SendMsg(m)
+}
+
+// Recv reads one message from the stream of responses generated by the server.
+// The type of the message returned is determined by the Res type parameter
+// of the GenericClientStream receiver.
+func (x *GenericClientStream[Req, Res]) Recv() (*Res, error) {
+ m := new(Res)
+ if err := x.ClientStream.RecvMsg(m); err != nil {
+ return nil, err
+ }
+ return m, nil
+}
+
+// CloseAndRecv closes the sending side of the stream, then receives the unary
+// response from the server. The type of message which it returns is determined
+// by the Res type parameter of the GenericClientStream receiver.
+func (x *GenericClientStream[Req, Res]) CloseAndRecv() (*Res, error) {
+ if err := x.ClientStream.CloseSend(); err != nil {
+ return nil, err
+ }
+ m := new(Res)
+ if err := x.ClientStream.RecvMsg(m); err != nil {
+ return nil, err
+ }
+ return m, nil
+}
+
+// GenericServerStream implements the ServerStreamingServer, ClientStreamingServer,
+// and BidiStreamingServer interfaces. It is used in generated code.
+type GenericServerStream[Req any, Res any] struct {
+ ServerStream
+}
+
+var _ ServerStreamingServer[string] = (*GenericServerStream[int, string])(nil)
+var _ ClientStreamingServer[int, string] = (*GenericServerStream[int, string])(nil)
+var _ BidiStreamingServer[int, string] = (*GenericServerStream[int, string])(nil)
+
+// Send pushes one message into the stream of responses to be consumed by the
+// client. The type of message which can be sent is determined by the Res
+// type parameter of the serverStreamServer receiver.
+func (x *GenericServerStream[Req, Res]) Send(m *Res) error {
+ return x.ServerStream.SendMsg(m)
+}
+
+// SendAndClose pushes the unary response to the client. The type of message
+// which can be sent is determined by the Res type parameter of the
+// clientStreamServer receiver.
+func (x *GenericServerStream[Req, Res]) SendAndClose(m *Res) error {
+ return x.ServerStream.SendMsg(m)
+}
+
+// Recv reads one message from the stream of requests generated by the client.
+// The type of the message returned is determined by the Req type parameter
+// of the clientStreamServer receiver.
+func (x *GenericServerStream[Req, Res]) Recv() (*Req, error) {
+ m := new(Req)
+ if err := x.ServerStream.RecvMsg(m); err != nil {
+ return nil, err
+ }
+ return m, nil
+}
diff --git a/vendor/google.golang.org/grpc/version.go b/vendor/google.golang.org/grpc/version.go
index 2556f7583..a0b782890 100644
--- a/vendor/google.golang.org/grpc/version.go
+++ b/vendor/google.golang.org/grpc/version.go
@@ -19,4 +19,4 @@
package grpc
// Version is the current grpc version.
-const Version = "1.63.2"
+const Version = "1.64.1"
diff --git a/vendor/google.golang.org/grpc/vet.sh b/vendor/google.golang.org/grpc/vet.sh
deleted file mode 100644
index 7e6b92e49..000000000
--- a/vendor/google.golang.org/grpc/vet.sh
+++ /dev/null
@@ -1,195 +0,0 @@
-#!/bin/bash
-
-set -ex # Exit on error; debugging enabled.
-set -o pipefail # Fail a pipe if any sub-command fails.
-
-# not makes sure the command passed to it does not exit with a return code of 0.
-not() {
- # This is required instead of the earlier (! $COMMAND) because subshells and
- # pipefail don't work the same on Darwin as in Linux.
- ! "$@"
-}
-
-die() {
- echo "$@" >&2
- exit 1
-}
-
-fail_on_output() {
- tee /dev/stderr | not read
-}
-
-# Check to make sure it's safe to modify the user's git repo.
-git status --porcelain | fail_on_output
-
-# Undo any edits made by this script.
-cleanup() {
- git reset --hard HEAD
-}
-trap cleanup EXIT
-
-PATH="${HOME}/go/bin:${GOROOT}/bin:${PATH}"
-go version
-
-if [[ "$1" = "-install" ]]; then
- # Install the pinned versions as defined in module tools.
- pushd ./test/tools
- go install \
- golang.org/x/tools/cmd/goimports \
- honnef.co/go/tools/cmd/staticcheck \
- github.com/client9/misspell/cmd/misspell
- popd
- if [[ -z "${VET_SKIP_PROTO}" ]]; then
- if [[ "${GITHUB_ACTIONS}" = "true" ]]; then
- PROTOBUF_VERSION=25.2 # a.k.a. v4.22.0 in pb.go files.
- PROTOC_FILENAME=protoc-${PROTOBUF_VERSION}-linux-x86_64.zip
- pushd /home/runner/go
- wget https://github.com/google/protobuf/releases/download/v${PROTOBUF_VERSION}/${PROTOC_FILENAME}
- unzip ${PROTOC_FILENAME}
- bin/protoc --version
- popd
- elif not which protoc > /dev/null; then
- die "Please install protoc into your path"
- fi
- fi
- exit 0
-elif [[ "$#" -ne 0 ]]; then
- die "Unknown argument(s): $*"
-fi
-
-# - Check that generated proto files are up to date.
-if [[ -z "${VET_SKIP_PROTO}" ]]; then
- make proto && git status --porcelain 2>&1 | fail_on_output || \
- (git status; git --no-pager diff; exit 1)
-fi
-
-if [[ -n "${VET_ONLY_PROTO}" ]]; then
- exit 0
-fi
-
-# - Ensure all source files contain a copyright message.
-# (Done in two parts because Darwin "git grep" has broken support for compound
-# exclusion matches.)
-(grep -L "DO NOT EDIT" $(git grep -L "\(Copyright [0-9]\{4,\} gRPC authors\)" -- '*.go') || true) | fail_on_output
-
-# - Make sure all tests in grpc and grpc/test use leakcheck via Teardown.
-not grep 'func Test[^(]' *_test.go
-not grep 'func Test[^(]' test/*.go
-
-# - Check for typos in test function names
-git grep 'func (s) ' -- "*_test.go" | not grep -v 'func (s) Test'
-git grep 'func [A-Z]' -- "*_test.go" | not grep -v 'func Test\|Benchmark\|Example'
-
-# - Do not import x/net/context.
-not git grep -l 'x/net/context' -- "*.go"
-
-# - Do not use time.After except in tests. It has the potential to leak the
-# timer since there is no way to stop it early.
-git grep -l 'time.After(' -- "*.go" | not grep -v '_test.go\|test_utils\|testutils'
-
-# - Do not import math/rand for real library code. Use internal/grpcrand for
-# thread safety.
-git grep -l '"math/rand"' -- "*.go" 2>&1 | not grep -v '^examples\|^interop/stress\|grpcrand\|^benchmark\|wrr_test'
-
-# - Do not use "interface{}"; use "any" instead.
-git grep -l 'interface{}' -- "*.go" 2>&1 | not grep -v '\.pb\.go\|protoc-gen-go-grpc\|grpc_testing_not_regenerate'
-
-# - Do not call grpclog directly. Use grpclog.Component instead.
-git grep -l -e 'grpclog.I' --or -e 'grpclog.W' --or -e 'grpclog.E' --or -e 'grpclog.F' --or -e 'grpclog.V' -- "*.go" | not grep -v '^grpclog/component.go\|^internal/grpctest/tlogger_test.go'
-
-# - Ensure all ptypes proto packages are renamed when importing.
-not git grep "\(import \|^\s*\)\"github.com/golang/protobuf/ptypes/" -- "*.go"
-
-# - Ensure all usages of grpc_testing package are renamed when importing.
-not git grep "\(import \|^\s*\)\"google.golang.org/grpc/interop/grpc_testing" -- "*.go"
-
-# - Ensure all xds proto imports are renamed to *pb or *grpc.
-git grep '"github.com/envoyproxy/go-control-plane/envoy' -- '*.go' ':(exclude)*.pb.go' | not grep -v 'pb "\|grpc "'
-
-misspell -error .
-
-# - gofmt, goimports, go vet, go mod tidy.
-# Perform these checks on each module inside gRPC.
-for MOD_FILE in $(find . -name 'go.mod'); do
- MOD_DIR=$(dirname ${MOD_FILE})
- pushd ${MOD_DIR}
- go vet -all ./... | fail_on_output
- gofmt -s -d -l . 2>&1 | fail_on_output
- goimports -l . 2>&1 | not grep -vE "\.pb\.go"
-
- go mod tidy -compat=1.19
- git status --porcelain 2>&1 | fail_on_output || \
- (git status; git --no-pager diff; exit 1)
- popd
-done
-
-# - Collection of static analysis checks
-SC_OUT="$(mktemp)"
-staticcheck -go 1.19 -checks 'all' ./... > "${SC_OUT}" || true
-
-# Error for anything other than checks that need exclusions.
-grep -v "(ST1000)" "${SC_OUT}" | grep -v "(SA1019)" | grep -v "(ST1003)" | not grep -v "(ST1019)\|\(other import of\)"
-
-# Exclude underscore checks for generated code.
-grep "(ST1003)" "${SC_OUT}" | not grep -v '\(.pb.go:\)\|\(code_string_test.go:\)\|\(grpc_testing_not_regenerate\)'
-
-# Error for duplicate imports not including grpc protos.
-grep "(ST1019)\|\(other import of\)" "${SC_OUT}" | not grep -Fv 'XXXXX PleaseIgnoreUnused
-channelz/grpc_channelz_v1"
-go-control-plane/envoy
-grpclb/grpc_lb_v1"
-health/grpc_health_v1"
-interop/grpc_testing"
-orca/v3"
-proto/grpc_gcp"
-proto/grpc_lookup_v1"
-reflection/grpc_reflection_v1"
-reflection/grpc_reflection_v1alpha"
-XXXXX PleaseIgnoreUnused'
-
-# Error for any package comments not in generated code.
-grep "(ST1000)" "${SC_OUT}" | not grep -v "\.pb\.go:"
-
-# Only ignore the following deprecated types/fields/functions and exclude
-# generated code.
-grep "(SA1019)" "${SC_OUT}" | not grep -Fv 'XXXXX PleaseIgnoreUnused
-XXXXX Protobuf related deprecation errors:
-"github.com/golang/protobuf
-.pb.go:
-grpc_testing_not_regenerate
-: ptypes.
-proto.RegisterType
-XXXXX gRPC internal usage deprecation errors:
-"google.golang.org/grpc
-: grpc.
-: v1alpha.
-: v1alphareflectionpb.
-BalancerAttributes is deprecated:
-CredsBundle is deprecated:
-Metadata is deprecated: use Attributes instead.
-NewSubConn is deprecated:
-OverrideServerName is deprecated:
-RemoveSubConn is deprecated:
-SecurityVersion is deprecated:
-Target is deprecated: Use the Target field in the BuildOptions instead.
-UpdateAddresses is deprecated:
-UpdateSubConnState is deprecated:
-balancer.ErrTransientFailure is deprecated:
-grpc/reflection/v1alpha/reflection.proto
-SwitchTo is deprecated:
-XXXXX xDS deprecated fields we support
-.ExactMatch
-.PrefixMatch
-.SafeRegexMatch
-.SuffixMatch
-GetContainsMatch
-GetExactMatch
-GetMatchSubjectAltNames
-GetPrefixMatch
-GetSafeRegexMatch
-GetSuffixMatch
-GetTlsCertificateCertificateProviderInstance
-GetValidationContextCertificateProviderInstance
-XXXXX PleaseIgnoreUnused'
-
-echo SUCCESS
diff --git a/vendor/modules.txt b/vendor/modules.txt
index 17aca5bcc..7291ede95 100644
--- a/vendor/modules.txt
+++ b/vendor/modules.txt
@@ -33,6 +33,11 @@ github.com/cespare/xxhash/v2
# github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc
## explicit
github.com/davecgh/go-spew/spew
+# github.com/elazarl/goproxy v0.0.0-20190421051319-9d40249d3c2f
+## explicit
+github.com/elazarl/goproxy
+# github.com/elazarl/goproxy/ext v0.0.0-20240909085733-6741dbfc16a1
+## explicit
# github.com/emicklei/go-restful/v3 v3.11.0
## explicit; go 1.13
github.com/emicklei/go-restful/v3
@@ -172,7 +177,7 @@ github.com/mailru/easyjson/jwriter
## explicit; go 1.13
github.com/moby/spdystream
github.com/moby/spdystream/spdy
-# github.com/moby/term v0.0.0-20221205130635-1aeaba878587
+# github.com/moby/term v0.5.0
## explicit; go 1.18
github.com/moby/term
github.com/moby/term/windows
@@ -397,14 +402,14 @@ gomodules.xyz/jsonpatch/v2
# google.golang.org/api v0.172.0
## explicit; go 1.19
google.golang.org/api/support/bundler
-# google.golang.org/genproto/googleapis/api v0.0.0-20240311173647-c811ad7063a7
+# google.golang.org/genproto/googleapis/api v0.0.0-20240318140521-94a12d6c2237
## explicit; go 1.19
google.golang.org/genproto/googleapis/api/expr/v1alpha1
google.golang.org/genproto/googleapis/api/httpbody
# google.golang.org/genproto/googleapis/rpc v0.0.0-20240401170217-c3f982113cda
## explicit; go 1.19
google.golang.org/genproto/googleapis/rpc/status
-# google.golang.org/grpc v1.63.2
+# google.golang.org/grpc v1.64.1
## explicit; go 1.19
google.golang.org/grpc
google.golang.org/grpc/attributes