forked from EESSI/eessi-bot-software-layer
-
Notifications
You must be signed in to change notification settings - Fork 0
/
app.cfg.example
140 lines (110 loc) · 5.83 KB
/
app.cfg.example
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
# Also see documentation at https://github.com/EESSI/eessi-bot-software-layer/blob/main/README.md#step5.5
[github]
# replace '123456' with the ID of your GitHub App; see https://github.com/settings/apps
app_id = 123456
# a short (!) name for your app instance that can be used for example
# when adding/updating a comment to a PR
# (!) a short yet descriptive name is preferred because it appears in
# comments to the PR
# for example, the name could include the name of the cluster the bot
# runs on and the username which runs the bot
# NOTE avoid putting an actual username here as it will be visible on
# potentially publicly accessible GitHub pages.
app_name = MY-bot
# replace '12345678' with the ID of the installation of your GitHub App
# (can be derived by creating an event and then checking for the list
# of sent events and its payload either via the Smee channel's web page
# or via the Advanced section of your GitHub App on github.com)
installation_id = 12345678
# path to the private key that was generated when the GitHub App was registered
private_key = PATH_TO_PRIVATE_KEY
[buildenv]
# name of the job script used for building an EESSI stack
build_job_script = PATH_TO_EESSI_BOT/scripts/bot-build.slurm
#The container_cachedir may be used to reuse downloaded container image files
#across jobs. Thus, jobs can more quickly launch containers.
container_cachedir = PATH_TO_SHARED_DIRECTORY
# it may happen that we need to customize some CVMFS configuration
# the value of cvmfs_customizations is a dictionary which maps a file
# name to an entry that needs to be added to that file
cvmfs_customizations = { "/etc/cvmfs/default.local": "CVMFS_HTTP_PROXY=\"http://PROXY_DNS_NAME:3128|http://PROXY_IP_ADDRESS:3128\"" }
# if compute nodes have no internet connection, we need to set http(s)_proxy
# or commands such as pip3 cannot download software from package repositories
# for example, the temporary EasyBuild is installed via pip3 first
http_proxy = http://PROXY_DNS:3128/
https_proxy = http://PROXY_DNS:3128/
# directory under which the bot prepares directories per job
# structure created is as follows: YYYY.MM/pr_PR_NUMBER/event_EVENT_ID/run_RUN_NUMBER/OS+SUBDIR
jobs_base_dir = $HOME/jobs
# configure environment
# list of comma-separated modules to be loaded by build_job_script
# useful/needed if some tool is not provided as system-wide package
# (read by bot and handed over to build_job_script via parameter
# --load-modules)
load_modules =
# PATH to temporary directory on build node ... ends up being used for
# for example, EESSI_TMPDIR --> /tmp/$USER/EESSI
# escaping variables with '\' delays expansion to the start of the
# build_job_script; this can be used for referencing environment
# variables that are only set inside a Slurm job
local_tmp = /tmp/$USER/EESSI
# parameters to be added to all job submissions
# NOTE do not quote parameter string. Quotes are retained when reading in config and
# then the whole 'string' is recognised as a single parameter.
# NOTE 2 '--get-user-env' may be needed on systems where the job's environment needs
# to be initialised as if it is for a login shell.
slurm_params = --hold
# full path to the job submission command
submit_command = /usr/bin/sbatch
# which GH account has the permission to trigger the build (by setting
# the label 'bot:build' (apparently this cannot be restricted on GitHub)
# if value is left/empty everyone can trigger the build
# value can be a space delimited list of GH accounts
build_permission = Hafsa-Naeem
[deploycfg]
# script for uploading built software packages
tarball_upload_script = PATH_TO_EESSI_BOT/scripts/eessi-upload-to-staging
# URL to S3/minio bucket
# if attribute is set, bucket_base will be constructed as follows
# bucket_base=${endpoint_url}/${bucket_name}
# otherwise, bucket_base will be constructed as follows
# bucket_base=https://${bucket_name}.s3.amazonaws.com
# - The former variant is used for non AWS S3 services, eg, minio, or when
# the bucket name is not provided in the hostname (see latter case).
# - The latter variant is used for AWS S3 services.
endpoint_url = URL_TO_S3_SERVER
# bucket name
bucket_name = eessi-staging
# upload policy: defines what policy is used for uploading built artefacts
# to an S3 bucket
# 'all' ..: upload all artefacts (mulitple uploads of the same artefact possible)
# 'latest': for each build target (eessi-VERSION-{software,init,compat}-OS-ARCH)
# only upload the latest built artefact
# 'once' : only once upload any built artefact for the build target
# 'none' : do not upload any built artefacts
upload_policy = once
# which GH account has the permission to trigger the deployment (by setting
# the label 'bot:deploy' (apparently this cannot be restricted on GitHub)
# if value is left/empty everyone can trigger the deployment
# value can be a space delimited list of GH accounts
deploy_permission = trz42
[architecturetargets]
# defines both for which architectures the bot will build
# and what submission parameters shall be used
arch_target_map = { "linux/x86_64/generic" : "--constraint shape=c4.2xlarge", "linux/x86_64/amd/zen2": "--constraint shape=c5a.2xlarge" }
# configuration for event handler which receives events from a GitHub repository.
[event_handler]
# path to the log file to log messages for event handler
log_path = /path/to/eessi_bot_event_handler.log
[job_manager]
# path to the log file to log messages for job manager
log_path = /path/to/eessi_bot_job_manager.log
# directory where job manager stores information about jobs to be tracked
# e.g. as symbolic link JOBID -> directory to job
job_ids_dir = $HOME/jobs/ids
# full path to the job status checking command
poll_command = /usr/bin/squeue
# polling interval in seconds
poll_interval = 60
# full path to the command for manipulating existing jobs
scontrol_command = /usr/bin/scontrol