Skip to content

Commit

Permalink
Install Tekton Logs Collector using Kustomize generator
Browse files Browse the repository at this point in the history
ArgoCD CR's Application/ApplicationSet CR isn't available in every cluster.
So we switched to using kustomize generator for vector installation.
  • Loading branch information
khrm committed Dec 18, 2024
1 parent 88e5bc6 commit 60a93f0
Show file tree
Hide file tree
Showing 26 changed files with 647 additions and 808 deletions.
1 change: 1 addition & 0 deletions .github/workflows/kube-linter.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,7 @@ jobs:

- name: Run kustomize build
run: |
rm -rf components/pipeline-service/base/log-collector/charts && \
find argo-cd-apps components -name 'kustomization.yaml' \
! -path '*/k-components/*' \
! -path 'components/repository-validator/staging/*' \
Expand Down
1 change: 1 addition & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -4,3 +4,4 @@ cosign.pub
.tmp/
tmp
.idea/*
components/pipeline-service/base/log-collector/charts/*
Original file line number Diff line number Diff line change
Expand Up @@ -29,5 +29,6 @@ resources:
- konflux-ui
- konflux-rbac
- konflux-info
- vector-tekton-logs-collector
components:
- ../../../k-components/inject-infra-deployments-repo-details
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- vector-tekton-logs-collector.yaml
components:
- ../../../../k-components/deploy-to-member-cluster-merge-generator
Original file line number Diff line number Diff line change
@@ -0,0 +1,41 @@
apiVersion: argoproj.io/v1alpha1
kind: ApplicationSet
metadata:
name: vector-tekton-logs-collector
spec:
generators:
- merge:
mergeKeys:
- nameNormalized
generators:
- clusters:
values:
sourceRoot: components/vector-tekton-logs-collector
environment: staging
clusterDir: ""
- list:
elements: []
template:
metadata:
name: vector-tekton-logs-collector-{{nameNormalized}}
spec:
project: default
source:
path: '{{values.sourceRoot}}/{{values.environment}}/{{values.clusterDir}}'
repoURL: https://github.com/redhat-appstudio/infra-deployments.git
targetRevision: main
destination:
namespace: tekton-logging
server: '{{server}}'
syncPolicy:
automated:
prune: true
selfHeal: true
syncOptions:
- CreateNamespace=true
retry:
limit: -1
backoff:
duration: 10s
factor: 2
maxDuration: 3m
5 changes: 5 additions & 0 deletions argo-cd-apps/overlays/development/kustomization.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -184,3 +184,8 @@ patches:
kind: ApplicationSet
version: v1alpha1
name: crossplane-control-plane
- path: development-overlay-patch.yaml
target:
kind: ApplicationSet
version: v1alpha1
name: vector-tekton-logs-collector
Original file line number Diff line number Diff line change
Expand Up @@ -201,3 +201,8 @@ patches:
kind: ApplicationSet
version: v1alpha1
name: konflux-info
- path: production-overlay-patch.yaml
target:
kind: ApplicationSet
version: v1alpha1
name: vector-tekton-logs-collector
Original file line number Diff line number Diff line change
Expand Up @@ -201,3 +201,8 @@ patches:
kind: ApplicationSet
version: v1alpha1
name: konflux-info
- path: production-overlay-patch.yaml
target:
kind: ApplicationSet
version: v1alpha1
name: vector-tekton-logs-collector
Original file line number Diff line number Diff line change
Expand Up @@ -1767,196 +1767,6 @@ spec:
- name: AUTOINSTALL_COMPONENTS
value: "false"
---
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: vectors-tekton-logs-collector
namespace: openshift-gitops
spec:
destination:
namespace: tekton-logging
server: https://kubernetes.default.svc
project: default
source:
path: charts/vector
repoURL: 'https://github.com/vectordotdev/helm-charts'
targetRevision: "08506fdc01c7cc3fcf2dd83102add7b44980ee23"
helm:
valueFiles:
- values.yaml
values: |-
role: Agent
customConfig:
data_dir: /vector-data-dir
api:
enabled: true
address: 127.0.0.1:8686
playground: false
sources:
kubernetes_logs:
type: kubernetes_logs
rotate_wait_secs: 5
glob_minimum_cooldown_ms: 15000
auto_partial_merge: true
extra_label_selector: "app.kubernetes.io/managed-by in (tekton-pipelines,pipelinesascode.tekton.dev)"
internal_metrics:
type: internal_metrics
transforms:
remap_app_logs:
type: remap
inputs: [kubernetes_logs]
source: |-
.log_type = "application"
.kubernetes_namespace_name = .kubernetes.pod_namespace
if exists(.kubernetes.pod_labels."tekton.dev/taskRunUID") {
.taskRunUID = del(.kubernetes.pod_labels."tekton.dev/taskRunUID")
} else {
.taskRunUID = "none"
}
if exists(.kubernetes.pod_labels."tekton.dev/pipelineRunUID") {
.pipelineRunUID = del(.kubernetes.pod_labels."tekton.dev/pipelineRunUID")
.result = .pipelineRunUID
} else {
.result = .taskRunUID
}
if exists(.kubernetes.pod_labels."tekton.dev/task") {
.task = del(.kubernetes.pod_labels."tekton.dev/task")
} else {
.task = "none"
}
if exists(.kubernetes.pod_namespace) {
.namespace = del(.kubernetes.pod_namespace)
} else {
.namespace = "unlabeled"
}
.pod = .kubernetes.pod_name
.container = .kubernetes.container_name
sinks:
aws_s3:
type: "aws_s3"
bucket: ${BUCKET}
buffer:
type: "disk"
max_size: 1073741824
inputs: ["remap_app_logs"]
compression: "none"
endpoint: ${ENDPOINT}
encoding:
codec: "text"
key_prefix: "/logs/{{ `{{ .namespace }}` }}/{{`{{ .result }}`}}/{{`{{ .taskRunUID }}`}}/{{`{{ .container }}`}}"
filename_time_format: ""
filename_append_uuid: false
env:
- name: AWS_ACCESS_KEY_ID
valueFrom:
secretKeyRef:
name: tekton-results-s3
key: aws_access_key_id
- name: AWS_SECRET_ACCESS_KEY
valueFrom:
secretKeyRef:
name: tekton-results-s3
key: aws_secret_access_key
- name: AWS_DEFAULT_REGION
valueFrom:
secretKeyRef:
name: tekton-results-s3
key: aws_region
- name: BUCKET
valueFrom:
secretKeyRef:
name: tekton-results-s3
key: bucket
- name: ENDPOINT
valueFrom:
secretKeyRef:
name: tekton-results-s3
key: endpoint
tolerations:
- effect: NoSchedule
key: konflux-ci.dev/workload
operator: Exists
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- CHOWN
- DAC_OVERRIDE
- FOWNER
- FSETID
- KILL
- NET_BIND_SERVICE
- SETGID
- SETPCAP
- SETUID
readOnlyRootFilesystem: true
seLinuxOptions:
type: spc_t
seccompProfile:
type: RuntimeDefault
syncPolicy:
automated:
prune: true
selfHeal: true
retry:
backoff:
duration: 10s
factor: 2
maxDuration: 3m
limit: -1
syncOptions:
- CreateNamespace=true
- Validate=false
---
allowHostDirVolumePlugin: true
allowHostIPC: false
allowHostNetwork: false
allowHostPID: false
allowHostPorts: false
allowPrivilegeEscalation: false
allowPrivilegedContainer: false
allowedCapabilities: null
apiVersion: security.openshift.io/v1
defaultAddCapabilities: null
defaultAllowPrivilegeEscalation: false
forbiddenSysctls:
- '*'
fsGroup:
type: RunAsAny
groups: []
kind: SecurityContextConstraints
metadata:
name: logging-scc
namespace: tekton-logging
priority: null
readOnlyRootFilesystem: true
requiredDropCapabilities:
- CHOWN
- DAC_OVERRIDE
- FSETID
- FOWNER
- SETGID
- SETUID
- SETPCAP
- NET_BIND_SERVICE
- KILL
runAsUser:
type: RunAsAny
seLinuxContext:
type: RunAsAny
seccompProfiles:
- runtime/default
supplementalGroups:
type: RunAsAny
users:
- system:serviceaccount:tekton-logging:vectors-tekton-logs-collector
volumes:
- configMap
- emptyDir
- hostPath
- projected
- secret
---
apiVersion: route.openshift.io/v1
kind: Route
metadata:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@ kind: Kustomization
commonAnnotations:
argocd.argoproj.io/sync-options: SkipDryRunOnMissingResource=true


resources:
- main-pipeline-service-configuration.yaml
- pipelines-as-code-secret.yaml
Expand Down
Loading

0 comments on commit 60a93f0

Please sign in to comment.