-
Notifications
You must be signed in to change notification settings - Fork 1
/
values.yaml
242 lines (208 loc) · 5.98 KB
/
values.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
---
# Default values for deploying Celestia
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
##
global:
## Celestia node chain-id
##
chainId: mocha
name: celestia
## Deploy pods on a specific node-pool if enabled
##
nodePool:
enabled: false
## If enabled, provide kv pair for node-pool affinity
# key: "env"
# value: "prod"
## Override the deployment namespace
##
namespaceOverride: ""
## Common labels for chart resources
##
labels:
app.kubernetes.io/instance: "celestia"
app.kubernetes.io/name: "celestia"
app.kubernetes.io/component: "celestia"
app.kubernetes.io/managed-by: "helm"
## Init container script for Celestia App
##
appInitContainer:
configmapName: init-celestia-app
scriptName: init-celestia-app.sh
## Init container script for Celestia Node
##
nodeInitContainer:
configmapName: init-celestia-node
scriptName: init-celestia-node.sh
## Celestia App node configuration
##
celestiaApp:
## celestia App docker image
##
image:
registry: registry.hub.docker.com
repository: jeremymc99
name: celestia-app
tag: v0.11.1
pullPolicy: IfNotPresent
## Celestia App home directory
##
home:
## Path to Celestia home directory
##
path: "/{{ .Values.global.name }}/.celestia-appd"
## Celestia App home directory Persistent Volume configuration
##
volume:
name: "{{ .Values.global.name }}-{{ .Values.global.chainId }}-app-pvc-01"
accessMode: ReadWriteOnce
storageClass: standard
storage: 100Gi
## Deploy a Load Balancer for Celestia App RPC endpoint
##
rpcLoadBalancer: false
## Celestia App node ports - will open them in the Statefulset and create dedicated service for each ones
#
# All Service we be deployed as ClusterIp, except P2P that will be LoadBalancer and RPC one if .Values.celestia.rpcLoadBalancer is set to true
#
##
ports:
grpc:
port: 9090
name: "grpc"
p2p:
port: 26656
name: "p2p"
rpc:
port: 26657
name: "rpc"
metrics:
port: 26660
name: "metrics"
## Configuration files for the Celestia node
##
files:
## Files to store to the /data folder (.celestia-appd/data/)
##
data:
- name: validator-state-json
fileName: priv_validator_state.json
## Files to store to the /config folder (.celestia-appd/config/)
##
config:
- name: app-toml
fileName: app.toml
- name: config-toml
fileName: config.toml
- name: client-toml
fileName: client.toml
## You can provide the address-book directly
##
- name: address-book
fileName: addrbook.json
## You can provide your celestia-app node_key.json file using a secret, if not one will be generated at start
##
nodeKey:
existingSecret: ""
## You can provide your celestia-app priv_validator_key.json file using a secret
# /!\ warning: not recommended for valdidator, use remote signer instead (see Horcrux or TMKMS)
##
privateValidatorKey:
existingSecret: ""
## Enable quick sync (it will download a <24h snapshot in an initContainer before starting the node)
##
snapshotQuickSync:
enabled: false
## Define resources requests and limits for single Pods.
##
resources: {}
# requests:
# cpu: 200m
# memory: 10Gi
# limits:
# memory: 10Gi
## Celestia Node configuration
##
celestiaNode:
enabled: true
## There are 3 different type of nodes: bridge, light and full.
# note: when running a validator it's recommended to run a bridge node also
##
type: bridge
## Celestia node binary
# Warning: before v0.8.0 binary is in root folder (not inside /bin folder) so you will need to specifcy `./celestia` to use it
# After v0.8.0 set to 'celestia' or '/bin/celestia'
binary: ./celestia
## Node network (caution could be different from chain-id)
##
network: mocha
## Quantum Gravity Bridge address
##
ethereumAddress: ""
## celestia node docker image
##
image:
registry: ghcr.io
repository: celestiaorg
name: celestia-node
tag: sha-747c9e5 #tag for v0.6.4
pullPolicy: IfNotPresent
## Celestia Node ports - will open them in the Statefulset and create dedicated ClusterIp service for each ones
##
ports:
rpc:
port: 26658
name: rpc
## Celestia Node home directory
##
home:
## Path to node store
##
path: "/root/.celestia-{{ .Values.celestiaNode.type }}-{{ .Values.global.chainId }}"
## Node store Persistent Volume configuration
##
volume:
name: "{{ .Values.global.name }}-{{ .Values.global.chainId }}-{{ .Values.celestiaNode.type }}-pvc-01"
accessMode: ReadWriteOnce
storageClass: standard
storage: 100Gi
## Configuration file for Celestia Node
##
config:
file: config.toml
configmapName: node-config-toml
## IP address used to communicate with the celestia-app node
coreIp: localhost
## Celestia Node cel-key
##
celKey:
## If not enabled, a cel-key will be generated for you at node start
##
enabled: false
## If .Values.celestiaNode.celkey.enabled is true, then you will need to provide secret names and file names of your key files (see README)
# address:
# secretName: "celkey-address"
# fileName: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx.address"
# validatorInfo:
# secretName: "celkey-validator-info"
# fileName: "my_celes_key.info"
## Define resources requests and limits for single Pods.
##
resources: {}
# requests:
# cpu: 200m
# memory: 10Gi
# limits:
# memory: 10Gi
## Prometheus configuration to collect metrics - it will deploy a ServiceMonitor and RBAC resources to allow Prometheus server to collect Celestia node metrics
##
monitoring:
enabled: false
namespace: monitoring
labels:
app.kubernetes.io/component: prometheus
app.kubernetes.io/instance: k8s
app.kubernetes.io/name: prometheus
app.kubernetes.io/part-of: kube-prometheus
app: "celestia"