From 1d5657edc6e3b0d096c5293f96b0a46bc4e40789 Mon Sep 17 00:00:00 2001 From: rdlrt <3169068+rdlrt@users.noreply.github.com> Date: Tue, 18 Jun 2024 18:13:57 +1000 Subject: [PATCH] Remove files/docker/grest as it was quite outdated, and future docker work would be via koios-lite repo --- files/docker/grest/.env.example | 21 - files/docker/grest/README.md | 16 - files/docker/grest/config/haproxy/haproxy.cfg | 43 -- files/docker/grest/config/postgres/crontab | 28 - .../grest/config/postgres/postgresql.conf | 705 ------------------ files/docker/grest/docker-compose.yml | 85 --- .../01_postgres_config.sql | 14 - .../docker/grest/scripts/docker-getmetrics.sh | 139 ---- .../grest/scripts/docker-setup-grest.sh | 255 ------- .../grest/scripts/postgres-entrypoint.sh | 385 ---------- 10 files changed, 1691 deletions(-) delete mode 100644 files/docker/grest/.env.example delete mode 100644 files/docker/grest/README.md delete mode 100644 files/docker/grest/config/haproxy/haproxy.cfg delete mode 100644 files/docker/grest/config/postgres/crontab delete mode 100644 files/docker/grest/config/postgres/postgresql.conf delete mode 100644 files/docker/grest/docker-compose.yml delete mode 100644 files/docker/grest/docker-entrypoint-initdb/01_postgres_config.sql delete mode 100755 files/docker/grest/scripts/docker-getmetrics.sh delete mode 100755 files/docker/grest/scripts/docker-setup-grest.sh delete mode 100755 files/docker/grest/scripts/postgres-entrypoint.sh diff --git a/files/docker/grest/.env.example b/files/docker/grest/.env.example deleted file mode 100644 index f3d33ece4..000000000 --- a/files/docker/grest/.env.example +++ /dev/null @@ -1,21 +0,0 @@ -POSTGRES_LOGGING=true -POSTGRES_HOST_AUTH_METHOD=trust -POSTGRES_USER=postgres -POSTGRES_PASSWORD=YOUR_PASSWORD -POSTGRES_DB=cexplorer -POSTGRES_CONF_FILE=/etc/postgresql/postgresql.conf -POSTGRES_HOST=postgres -POSTGRES_PORT=5432 -DBSYNC_PROM_PORT=8080 -EXTENDED=true -SOCKET=/opt/cardano/cnode/sockets/node.socket -EKG=Y -PGRST_DB_SCHEMA=grest -PGRST_DB_ANON_ROLE=web_anon -PGRST_DB_EXTRA_SEARCH_PATH=public -PGRST_MAX_ROWS=1000 -PGRST_SERVER_HOST=0.0.0.0 -PGRST_SERVER_PORT=8050 -# Attention this has a very different apprroach in docker -Restore db from snapshot - left optional for now as still needs testing -# RESTORE_SNAPSHOT=${RESTORE_SNAPSHOT:-https://update-cardano-mainnet.iohk.io/cardano-db-sync/11/db-sync-snapshot-schema-11-block-6510444-x86_64.tgz} -# RESTORE_RECREATE_DB=N diff --git a/files/docker/grest/README.md b/files/docker/grest/README.md deleted file mode 100644 index 53697c2fc..000000000 --- a/files/docker/grest/README.md +++ /dev/null @@ -1,16 +0,0 @@ -Docker compose for Koios is still under development. - -# ENVIRONMENT - -Before running, for security, change the default password and username for the postgres database. -To do that, you need to create the `.env` file following the `.env.example` format. - -This is done to prevent users from running setups with default passwords available publicly on this repo. - -Make sure you change the `POSTGRES_PASSWORD` to your own in the `.env` file. - -# RUNNING - -`cd files/docker/grest` - -`docker-compose up -d` diff --git a/files/docker/grest/config/haproxy/haproxy.cfg b/files/docker/grest/config/haproxy/haproxy.cfg deleted file mode 100644 index 658b85652..000000000 --- a/files/docker/grest/config/haproxy/haproxy.cfg +++ /dev/null @@ -1,43 +0,0 @@ -global - daemon - nbthread 3 - maxconn 256 - stats socket ipv4@127.0.0.1:8055 mode 0600 level admin - log 127.0.0.1 local2 - insecure-fork-wanted - external-check - -defaults - mode http - log global - option httplog - option dontlognull - option http-ignore-probes - option dontlog-normal - timeout client 10s - timeout server 10s - timeout connect 3s - timeout server-fin 2s - timeout http-request 5s - -frontend app - bind 0.0.0.0:8053 - #bind :8453 ssl crt /etc/ssl/server.pem no-sslv3 - #redirect scheme https code 301 if !{ ssl_fc } - http-request track-sc0 src table flood_lmt_rate - http-request deny deny_status 429 if { sc_http_req_rate(0) gt 100 } - default_backend grest_core - -backend flood_lmt_rate - stick-table type ip size 1m expire 10m store http_req_rate(10s) - -backend grest_core - balance first - #option external-check - #external-check path "/usr/bin:/bin:/tmp:/sbin:/usr/sbin" - #external-check command /usr/local/etc/haproxy/grest-poll.sh - http-response set-header X-Frame-Options: DENY - server local postgrest:8050 check inter 10000 - server rdlrt 207.244.252.116:8053 check inter 10000 backup - server damjan 65.21.183.97:8053 check inter 10000 backup - server markus 185.161.193.32:8053 check inter 10000 backup diff --git a/files/docker/grest/config/postgres/crontab b/files/docker/grest/config/postgres/crontab deleted file mode 100644 index d013aac85..000000000 --- a/files/docker/grest/config/postgres/crontab +++ /dev/null @@ -1,28 +0,0 @@ -# Unlike any other crontab you don't have to run the `crontab' -# command to install the new version when you edit this file -# and files in /etc/cron.d. These files also have username fields, -# that none of the other crontabs do. - -SHELL=/bin/bash -PATH=/nix/var/nix/profiles/per-user/guild/profile/bin:/nix/var/nix/profiles/per-user/guild/profile/sbin:/opt/cardano/cnode/scripts:/bin:/sbin:/usr/bin:/usr/sbin:/usr/local/sbin:/usr/local/bin \ - -# Example of job definition: -# .---------------- minute (0 - 59) -# | .------------- hour (0 - 23) -# | | .---------- day of month (1 - 31) -# | | | .------- month (1 - 12) OR jan,feb,mar,apr ... -# | | | | .---- day of week (0 - 6) (Sunday=0 or 7) OR sun,mon,tue,wed,thu,fri,sat -# | | | | | -# * * * * * user-name command to be executed -#17 * * * * root cd / && run-parts --report /etc/cron.hourly -#25 6 * * * root test -x /usr/sbin/anacron || ( cd / && run-parts --report /etc/cron.daily ) -#47 6 * * 7 root test -x /usr/sbin/anacron || ( cd / && run-parts --report /etc/cron.weekly ) -#52 6 1 * * root test -x /usr/sbin/anacron || ( cd / && run-parts --report /etc/cron.monthly ) - -# Examples -#*/10 * * * * if [ -z $NOIP2 ]; then /home/guild/.scripts/ip2loc.sh 2>/dev/null; fi -#*/5 * * * * if [ -z $NOBLW ]; then /home/guild/.scripts/block_watcher.sh 2>/dev/null; fi -#*/130 * * * * if [ -z $KEEP ]; then find /opt/cardano/cnode/logs/ -name "*.json" -type f -mtime +0 -type f -delete; fi -#0 */3 * * * if [ -z $NOREC ]; then cp -rf /opt/cardano/cnode/db/* /opt/cardano/cnode/priv/$NETWORK-db/ 2>/dev/null; fi -*/35 * * * * history -c -# Empty \ No newline at end of file diff --git a/files/docker/grest/config/postgres/postgresql.conf b/files/docker/grest/config/postgres/postgresql.conf deleted file mode 100644 index b6c426428..000000000 --- a/files/docker/grest/config/postgres/postgresql.conf +++ /dev/null @@ -1,705 +0,0 @@ -# ----------------------------- -# PostgreSQL configuration file -# ----------------------------- -# -# This file consists of lines of the form: -# -# name = value -# -# (The "=" is optional.) Whitespace may be used. Comments are introduced with -# "#" anywhere on a line. The complete list of parameter names and allowed -# values can be found in the PostgreSQL documentation. -# -# The commented-out settings shown in this file represent the default values. -# Re-commenting a setting is NOT sufficient to revert it to the default value; -# you need to reload the server. -# -# This file is read on server startup and when the server receives a SIGHUP -# signal. If you edit the file on a running system, you have to SIGHUP the -# server for the changes to take effect, run "pg_ctl reload", or execute -# "SELECT pg_reload_conf()". Some parameters, which are marked below, -# require a server shutdown and restart to take effect. -# -# Any parameter can also be given as a command-line option to the server, e.g., -# "postgres -c log_connections=on". Some parameters can be changed at run time -# with the "SET" SQL command. -# -# Memory units: kB = kilobytes Time units: ms = milliseconds -# MB = megabytes s = seconds -# GB = gigabytes min = minutes -# TB = terabytes h = hours -# d = days - - -#------------------------------------------------------------------------------ -# FILE LOCATIONS -#------------------------------------------------------------------------------ - -# The default values of these variables are driven from the -D command-line -# option or PGDATA environment variable, represented here as ConfigDir. - -#data_directory = 'ConfigDir' # use data in another directory -data_directory = '/var/lib/postgresql/data' - # (change requires restart) -#hba_file = 'ConfigDir/pg_hba.conf' # host-based authentication file -hba_file = '/etc/pg_hba.conf' - # (change requires restart) -#ident_file = 'ConfigDir/pg_ident.conf' # ident configuration file -ident_file = '/etc/pg_ident.conf' # ident configuration file - # (change requires restart) - -# If external_pid_file is not explicitly set, no extra PID file is written. -#external_pid_file = '' # write an extra PID file - # (change requires restart) - - -#------------------------------------------------------------------------------ -# CONNECTIONS AND AUTHENTICATION -#------------------------------------------------------------------------------ - -# - Connection Settings - - -listen_addresses = '*' - # comma-separated list of addresses; - # defaults to 'localhost'; use '*' for all - # (change requires restart) -#port = 5432 # (change requires restart) -max_connections = 100 # (change requires restart) -#superuser_reserved_connections = 3 # (change requires restart) -#unix_socket_directories = '/var/run/postgresql' # comma-separated list of directories - # (change requires restart) -#unix_socket_group = '' # (change requires restart) -#unix_socket_permissions = 0777 # begin with 0 to use octal notation - # (change requires restart) -#bonjour = off # advertise server via Bonjour - # (change requires restart) -#bonjour_name = '' # defaults to the computer name - # (change requires restart) - -# - TCP Keepalives - -# see "man 7 tcp" for details - -#tcp_keepalives_idle = 0 # TCP_KEEPIDLE, in seconds; - # 0 selects the system default -#tcp_keepalives_interval = 0 # TCP_KEEPINTVL, in seconds; - # 0 selects the system default -#tcp_keepalives_count = 0 # TCP_KEEPCNT; - # 0 selects the system default - -# - Authentication - - -#authentication_timeout = 1min # 1s-600s -#password_encryption = md5 # md5 or scram-sha-256 -#db_user_namespace = off - -# GSSAPI using Kerberos -#krb_server_keyfile = '' -#krb_caseins_users = off - -# - SSL - - -#ssl = off -#ssl_ca_file = '' -#ssl_cert_file = 'server.crt' -#ssl_crl_file = '' -#ssl_key_file = 'server.key' -#ssl_ciphers = 'HIGH:MEDIUM:+3DES:!aNULL' # allowed SSL ciphers -#ssl_prefer_server_ciphers = on -#ssl_ecdh_curve = 'prime256v1' -#ssl_dh_params_file = '' -#ssl_passphrase_command = '' -#ssl_passphrase_command_supports_reload = off - - -#------------------------------------------------------------------------------ -# RESOURCE USAGE (except WAL) -#------------------------------------------------------------------------------ - -# - Memory - - -shared_buffers = 128MB # min 128kB - # (change requires restart) -#huge_pages = try # on, off, or try - # (change requires restart) -#temp_buffers = 8MB # min 800kB -#max_prepared_transactions = 0 # zero disables the feature - # (change requires restart) -# Caution: it is not advisable to set max_prepared_transactions nonzero unless -# you actively intend to use prepared transactions. -#work_mem = 4MB # min 64kB -#maintenance_work_mem = 64MB # min 1MB -#autovacuum_work_mem = -1 # min 1MB, or -1 to use maintenance_work_mem -#max_stack_depth = 2MB # min 100kB -dynamic_shared_memory_type = posix # the default is the first option - # supported by the operating system: - # posix - # sysv - # windows - # mmap - # use none to disable dynamic shared memory - # (change requires restart) - -# - Disk - - -#temp_file_limit = -1 # limits per-process temp file space - # in kB, or -1 for no limit - -# - Kernel Resources - - -#max_files_per_process = 1000 # min 25 - # (change requires restart) - -# - Cost-Based Vacuum Delay - - -#vacuum_cost_delay = 0 # 0-100 milliseconds -#vacuum_cost_page_hit = 1 # 0-10000 credits -#vacuum_cost_page_miss = 10 # 0-10000 credits -#vacuum_cost_page_dirty = 20 # 0-10000 credits -#vacuum_cost_limit = 200 # 1-10000 credits - -# - Background Writer - - -#bgwriter_delay = 200ms # 10-10000ms between rounds -#bgwriter_lru_maxpages = 100 # max buffers written/round, 0 disables -#bgwriter_lru_multiplier = 2.0 # 0-10.0 multiplier on buffers scanned/round -#bgwriter_flush_after = 512kB # measured in pages, 0 disables - -# - Asynchronous Behavior - - -#effective_io_concurrency = 1 # 1-1000; 0 disables prefetching -#max_worker_processes = 8 # (change requires restart) -#max_parallel_maintenance_workers = 2 # taken from max_parallel_workers -#max_parallel_workers_per_gather = 2 # taken from max_parallel_workers -#parallel_leader_participation = on -#max_parallel_workers = 8 # maximum number of max_worker_processes that - # can be used in parallel operations -#old_snapshot_threshold = -1 # 1min-60d; -1 disables; 0 is immediate - # (change requires restart) -#backend_flush_after = 0 # measured in pages, 0 disables - - -#------------------------------------------------------------------------------ -# WRITE-AHEAD LOG -#------------------------------------------------------------------------------ - -# - Settings - - -wal_level = minimal # minimal, replica, or logical - # (change requires restart) -#fsync = on # flush data to disk for crash safety - # (turning this off can cause - # unrecoverable data corruption) -synchronous_commit = off # synchronization level; - # off, local, remote_write, remote_apply, or on -#wal_sync_method = fsync # the default is the first option - # supported by the operating system: - # open_datasync - # fdatasync (default on Linux) - # fsync - # fsync_writethrough - # open_sync -#full_page_writes = on # recover from partial page writes -#wal_compression = off # enable compression of full-page writes -#wal_log_hints = off # also do full page writes of non-critical updates - # (change requires restart) -#wal_buffers = -1 # min 32kB, -1 sets based on shared_buffers - # (change requires restart) -#wal_writer_delay = 200ms # 1-10000 milliseconds -#wal_writer_flush_after = 1MB # measured in pages, 0 disables - -#commit_delay = 0 # range 0-100000, in microseconds -#commit_siblings = 5 # range 1-1000 - -# - Checkpoints - - -checkpoint_timeout = 15min # range 30s-1d -max_wal_size = 1GB -min_wal_size = 80MB -#checkpoint_completion_target = 0.5 # checkpoint target duration, 0.0 - 1.0 -#checkpoint_flush_after = 256kB # measured in pages, 0 disables -#checkpoint_warning = 30s # 0 disables - -# - Archiving - - -#archive_mode = off # enables archiving; off, on, or always - # (change requires restart) -#archive_command = '' # command to use to archive a logfile segment - # placeholders: %p = path of file to archive - # %f = file name only - # e.g. 'test ! -f /mnt/server/archivedir/%f && cp %p /mnt/server/archivedir/%f' -#archive_timeout = 0 # force a logfile segment switch after this - # number of seconds; 0 disables - - -#------------------------------------------------------------------------------ -# REPLICATION -#------------------------------------------------------------------------------ - -# - Sending Servers - - -# Set these on the master and on any standby that will send replication data. - -max_wal_senders = 0 # max number of walsender processes - # (change requires restart) -#wal_keep_segments = 0 # in logfile segments; 0 disables -#wal_sender_timeout = 60s # in milliseconds; 0 disables - -#max_replication_slots = 10 # max number of replication slots - # (change requires restart) -#track_commit_timestamp = off # collect timestamp of transaction commit - # (change requires restart) - -# - Master Server - - -# These settings are ignored on a standby server. - -#synchronous_standby_names = '' # standby servers that provide sync rep - # method to choose sync standbys, number of sync standbys, - # and comma-separated list of application_name - # from standby(s); '*' = all -#vacuum_defer_cleanup_age = 0 # number of xacts by which cleanup is delayed - -# - Standby Servers - - -# These settings are ignored on a master server. - -#hot_standby = on # "off" disallows queries during recovery - # (change requires restart) -#max_standby_archive_delay = 30s # max delay before canceling queries - # when reading WAL from archive; - # -1 allows indefinite delay -#max_standby_streaming_delay = 30s # max delay before canceling queries - # when reading streaming WAL; - # -1 allows indefinite delay -#wal_receiver_status_interval = 10s # send replies at least this often - # 0 disables -#hot_standby_feedback = off # send info from standby to prevent - # query conflicts -#wal_receiver_timeout = 60s # time that receiver waits for - # communication from master - # in milliseconds; 0 disables -#wal_retrieve_retry_interval = 5s # time to wait before retrying to - # retrieve WAL after a failed attempt - -# - Subscribers - - -# These settings are ignored on a publisher. - -#max_logical_replication_workers = 4 # taken from max_worker_processes - # (change requires restart) -#max_sync_workers_per_subscription = 2 # taken from max_logical_replication_workers - - -#------------------------------------------------------------------------------ -# QUERY TUNING -#------------------------------------------------------------------------------ - -# - Planner Method Configuration - - -#enable_bitmapscan = on -#enable_hashagg = on -#enable_hashjoin = on -#enable_indexscan = on -#enable_indexonlyscan = on -#enable_material = on -#enable_mergejoin = on -#enable_nestloop = on -#enable_parallel_append = on -#enable_seqscan = on -#enable_sort = on -#enable_tidscan = on -#enable_partitionwise_join = off -#enable_partitionwise_aggregate = off -#enable_parallel_hash = on -#enable_partition_pruning = on - -# - Planner Cost Constants - - -#seq_page_cost = 1.0 # measured on an arbitrary scale -#random_page_cost = 4.0 # same scale as above -#cpu_tuple_cost = 0.01 # same scale as above -#cpu_index_tuple_cost = 0.005 # same scale as above -#cpu_operator_cost = 0.0025 # same scale as above -#parallel_tuple_cost = 0.1 # same scale as above -#parallel_setup_cost = 1000.0 # same scale as above - -#jit_above_cost = 100000 # perform JIT compilation if available - # and query more expensive than this; - # -1 disables -#jit_inline_above_cost = 500000 # inline small functions if query is - # more expensive than this; -1 disables -#jit_optimize_above_cost = 500000 # use expensive JIT optimizations if - # query is more expensive than this; - # -1 disables - -#min_parallel_table_scan_size = 8MB -#min_parallel_index_scan_size = 512kB -#effective_cache_size = 4GB - -# - Genetic Query Optimizer - - -#geqo = on -#geqo_threshold = 12 -#geqo_effort = 5 # range 1-10 -#geqo_pool_size = 0 # selects default based on effort -#geqo_generations = 0 # selects default based on effort -#geqo_selection_bias = 2.0 # range 1.5-2.0 -#geqo_seed = 0.0 # range 0.0-1.0 - -# - Other Planner Options - - -#default_statistics_target = 100 # range 1-10000 -#constraint_exclusion = partition # on, off, or partition -#cursor_tuple_fraction = 0.1 # range 0.0-1.0 -#from_collapse_limit = 8 -#join_collapse_limit = 8 # 1 disables collapsing of explicit - # JOIN clauses -#force_parallel_mode = off -#jit = off # allow JIT compilation - - -#------------------------------------------------------------------------------ -# REPORTING AND LOGGING -#------------------------------------------------------------------------------ - -# - Where to Log - - -log_destination = 'stderr,syslog' -#log_destination = 'stderr' # Valid values are combinations of - # stderr, csvlog, syslog, and eventlog, - # depending on platform. csvlog - # requires logging_collector to be on. - -# This is used when logging to stderr: -logging_collector = on -#logging_collector = off # Enable capturing of stderr and csvlog - # into log files. Required to be on for - # csvlogs. - # (change requires restart) - -# These are only used if logging_collector is on: -log_directory = '/var/lib/postgresql/data/pg_log' -#log_directory = 'log' # directory where log files are written, - # can be absolute or relative to PGDATA -#log_filename = 'postgresql-%Y-%m-%d_%H%M%S.log' # log file name pattern, -log_filename = 'postgresql.log' - # can include strftime() escapes -#log_file_mode = 0600 # creation mode for log files, - # begin with 0 to use octal notation -log_truncate_on_rotation = on # If on, an existing log file with the - # same name as the new log file will be - # truncated rather than appended to. - # But such truncation only occurs on - # time-driven rotation, not on restarts - # or size-driven rotation. Default is - # off, meaning append to existing files - # in all cases. -log_rotation_age = 1d # Automatic rotation of logfiles will - # happen after that time. 0 disables. -log_rotation_size = 10MB # Automatic rotation of logfiles will - # happen after that much log output. - # 0 disables. - -# These are relevant when logging to syslog: -#syslog_facility = 'LOCAL0' -#syslog_ident = 'postgres' -#syslog_sequence_numbers = on -#syslog_split_messages = on - -# This is only relevant when logging to eventlog (win32): -# (change requires restart) -#event_source = 'PostgreSQL' - -# - When to Log - - -#log_min_messages = warning # values in order of decreasing detail: - # debug5 - # debug4 - # debug3 - # debug2 - # debug1 - # info - # notice - # warning - # error - # log - # fatal - # panic - -#log_min_error_statement = error # values in order of decreasing detail: - # debug5 - # debug4 - # debug3 - # debug2 - # debug1 - # info - # notice - # warning - # error - # log - # fatal - # panic (effectively off) - -log_min_duration_statement = 300 # -1 is disabled, 0 logs all statements - # and their durations, > 0 logs only - # statements running at least this number - # of milliseconds - - -# - What to Log - - -#debug_print_parse = off -#debug_print_rewritten = off -#debug_print_plan = off -#debug_pretty_print = on -#log_checkpoints = off -#log_connections = off -#log_disconnections = off -#log_duration = off -#log_error_verbosity = default # terse, default, or verbose messages -#log_hostname = off -log_line_prefix = '%t [%p]: user=%u,db=%d,app=%a,client=%h' -log_checkpoints = on -log_connections = on -log_disconnections = on -log_lock_waits = on -log_temp_files = 0 -log_autovacuum_min_duration = 0 -log_error_verbosity = default -#log_line_prefix = '%m [%p] ' # special values: - # %a = application name - # %u = user name - # %d = database name - # %r = remote host and port - # %h = remote host - # %p = process ID - # %t = timestamp without milliseconds - # %m = timestamp with milliseconds - # %n = timestamp with milliseconds (as a Unix epoch) - # %i = command tag - # %e = SQL state - # %c = session ID - # %l = session line number - # %s = session start timestamp - # %v = virtual transaction ID - # %x = transaction ID (0 if none) - # %q = stop here in non-session - # processes - # %% = '%' - # e.g. '<%u%%%d> ' -#log_lock_waits = off # log lock waits >= deadlock_timeout -#log_statement = 'none' # none, ddl, mod, all -#log_replication_commands = off -#log_temp_files = -1 # log temporary files equal or larger - # than the specified size in kilobytes; - # -1 disables, 0 logs all temp files -log_timezone = 'UTC' - -#------------------------------------------------------------------------------ -# PROCESS TITLE -#------------------------------------------------------------------------------ - -#cluster_name = '' # added to process titles if nonempty - # (change requires restart) -#update_process_title = on - - -#------------------------------------------------------------------------------ -# STATISTICS -#------------------------------------------------------------------------------ - -# - Query and Index Statistics Collector - - -#track_activities = on -#track_counts = on -#track_io_timing = off -#track_functions = none # none, pl, all -#track_activity_query_size = 1024 # (change requires restart) -#stats_temp_directory = 'pg_stat_tmp' - - -# - Monitoring - - -#log_parser_stats = off -#log_planner_stats = off -#log_executor_stats = off -#log_statement_stats = off - - -#------------------------------------------------------------------------------ -# AUTOVACUUM -#------------------------------------------------------------------------------ - -#autovacuum = on # Enable autovacuum subprocess? 'on' - # requires track_counts to also be on. -#log_autovacuum_min_duration = -1 # -1 disables, 0 logs all actions and - # their durations, > 0 logs only - # actions running at least this number - # of milliseconds. -#autovacuum_max_workers = 3 # max number of autovacuum subprocesses - # (change requires restart) -#autovacuum_naptime = 1min # time between autovacuum runs -#autovacuum_vacuum_threshold = 50 # min number of row updates before - # vacuum -#autovacuum_analyze_threshold = 50 # min number of row updates before - # analyze -#autovacuum_vacuum_scale_factor = 0.2 # fraction of table size before vacuum -#autovacuum_analyze_scale_factor = 0.1 # fraction of table size before analyze -#autovacuum_freeze_max_age = 200000000 # maximum XID age before forced vacuum - # (change requires restart) -#autovacuum_multixact_freeze_max_age = 400000000 # maximum multixact age - # before forced vacuum - # (change requires restart) -#autovacuum_vacuum_cost_delay = 20ms # default vacuum cost delay for - # autovacuum, in milliseconds; - # -1 means use vacuum_cost_delay -#autovacuum_vacuum_cost_limit = -1 # default vacuum cost limit for - # autovacuum, -1 means use - # vacuum_cost_limit - - -#------------------------------------------------------------------------------ -# CLIENT CONNECTION DEFAULTS -#------------------------------------------------------------------------------ - -# - Statement Behavior - - -#client_min_messages = notice # values in order of decreasing detail: - # debug5 - # debug4 - # debug3 - # debug2 - # debug1 - # log - # notice - # warning - # error -#search_path = '"$user", public' # schema names -#row_security = on -#default_tablespace = '' # a tablespace name, '' uses the default -#temp_tablespaces = '' # a list of tablespace names, '' uses - # only default tablespace -#check_function_bodies = on -#default_transaction_isolation = 'read committed' -#default_transaction_read_only = off -#default_transaction_deferrable = off -#session_replication_role = 'origin' -#statement_timeout = 0 # in milliseconds, 0 is disabled -#lock_timeout = 0 # in milliseconds, 0 is disabled -#idle_in_transaction_session_timeout = 0 # in milliseconds, 0 is disabled -#vacuum_freeze_min_age = 50000000 -#vacuum_freeze_table_age = 150000000 -#vacuum_multixact_freeze_min_age = 5000000 -#vacuum_multixact_freeze_table_age = 150000000 -#vacuum_cleanup_index_scale_factor = 0.1 # fraction of total number of tuples - # before index cleanup, 0 always performs - # index cleanup -#bytea_output = 'hex' # hex, escape -#xmlbinary = 'base64' -#xmloption = 'content' -#gin_fuzzy_search_limit = 0 -#gin_pending_list_limit = 4MB - -# - Locale and Formatting - - -datestyle = 'iso, mdy' -#intervalstyle = 'postgres' -timezone = 'UTC' -#timezone_abbreviations = 'Default' # Select the set of available time zone - # abbreviations. Currently, there are - # Default - # Australia (historical usage) - # India - # You can create your own file in - # share/timezonesets/. -#extra_float_digits = 0 # min -15, max 3 -#client_encoding = sql_ascii # actually, defaults to database - # encoding - -# These settings are initialized by initdb, but they can be changed. -lc_messages = 'en_US.utf8' # locale for system error message - # strings -lc_monetary = 'en_US.utf8' # locale for monetary formatting -lc_numeric = 'en_US.utf8' # locale for number formatting -lc_time = 'en_US.utf8' # locale for time formatting - -# default configuration for text search -default_text_search_config = 'pg_catalog.english' - -# - Shared Library Preloading - - -#shared_preload_libraries = '' # (change requires restart) -#local_preload_libraries = '' -#session_preload_libraries = '' -#jit_provider = 'llvmjit' # JIT library to use - -# - Other Defaults - - -#dynamic_library_path = '$libdir' - - -#------------------------------------------------------------------------------ -# LOCK MANAGEMENT -#------------------------------------------------------------------------------ - -#deadlock_timeout = 1s -#max_locks_per_transaction = 64 # min 10 - # (change requires restart) -#max_pred_locks_per_transaction = 64 # min 10 - # (change requires restart) -#max_pred_locks_per_relation = -2 # negative values mean - # (max_pred_locks_per_transaction - # / -max_pred_locks_per_relation) - 1 -#max_pred_locks_per_page = 2 # min 0 - - -#------------------------------------------------------------------------------ -# VERSION AND PLATFORM COMPATIBILITY -#------------------------------------------------------------------------------ - -# - Previous PostgreSQL Versions - - -#array_nulls = on -#backslash_quote = safe_encoding # on, off, or safe_encoding -#default_with_oids = off -#escape_string_warning = on -#lo_compat_privileges = off -#operator_precedence_warning = off -#quote_all_identifiers = off -#standard_conforming_strings = on -#synchronize_seqscans = on - -# - Other Platforms and Clients - - -#transform_null_equals = off - - -#------------------------------------------------------------------------------ -# ERROR HANDLING -#------------------------------------------------------------------------------ - -#exit_on_error = off # terminate session on any error? -#restart_after_crash = on # reinitialize after backend crash? -#data_sync_retry = off # retry or panic on failure to fsync - # data? - # (change requires restart) - - -#------------------------------------------------------------------------------ -# CONFIG FILE INCLUDES -#------------------------------------------------------------------------------ - -# These options allow settings to be loaded from files other than the -# default postgresql.conf. - -#include_dir = '' # include files ending in '.conf' from - # a directory, e.g., 'conf.d' -#include_if_exists = '' # include file only if it exists -#include = '' # include file - - -#------------------------------------------------------------------------------ -# CUSTOMIZED OPTIONS -#------------------------------------------------------------------------------ - -# Add settings for extensions here \ No newline at end of file diff --git a/files/docker/grest/docker-compose.yml b/files/docker/grest/docker-compose.yml deleted file mode 100644 index 4ff9c46e3..000000000 --- a/files/docker/grest/docker-compose.yml +++ /dev/null @@ -1,85 +0,0 @@ -version: "3.5" - -services: - postgres: - image: postgres:13.5-bullseye - env_file: ./.env - ports: - - 8059:8059 - - 55000:5432 - volumes: - - postgres_db:/var/lib/postgresql/data - - ./scripts/docker-getmetrics.sh:/getmetrics.sh - - ./scripts/docker-setup-grest.sh:/setup-grest.sh - - ./scripts/postgres-entrypoint.sh:/entrypoint.sh - - ./config/postgres/crontab:/etc/cron.d/crontab - - ./docker-entrypoint-initdb:/docker-entrypoint-initdb.d/ - entrypoint: /entrypoint.sh - command: postgres - restart: on-failure - logging: - driver: "json-file" - options: - max-size: "200k" - max-file: "10" - - cardano-node: - image: cardanocommunity/cardano-node:latest - env_file: ./.env - environment: - - NETWORK=${NETWORK:-mainnet} - volumes: - - node-db:/opt/cardano/cnode/db - - node-ipc:/opt/cardano/cnode/sockets - restart: on-failure - logging: - driver: "json-file" - options: - max-size: "200k" - max-file: "10" - - cardano-db-sync: - image: inputoutput/cardano-db-sync:13.0.4 - env_file: ./.env - environment: - - NETWORK=${NETWORK:-mainnet} - depends_on: - - cardano-node - - postgres - volumes: - - db-sync-data:/var/lib/cdbsync - - node-ipc:/node-ipc - restart: on-failure - logging: - driver: "json-file" - options: - max-size: "200k" - max-file: "10" - - haproxy: - image: haproxy:alpine - env_file: ./.env - depends_on: - - cardano-node - - cardano-db-sync - - postgres - - postgrest - volumes: - - ./config/haproxy/haproxy.cfg:/usr/local/etc/haproxy/haproxy.cfg - ports: - - 8053:8053 - restart: on-failure - - postgrest: - image: postgrest/postgrest - env_file: ./.env - environment: - - PGRST_DB_URI=postgres://${POSTGRES_USER}:${POSTGRES_PASSWORD}@postgres/${POSTGRES_DB} - depends_on: - - postgres - restart: on-failure -volumes: - node-db: - postgres_db: - db-sync-data: - node-ipc: diff --git a/files/docker/grest/docker-entrypoint-initdb/01_postgres_config.sql b/files/docker/grest/docker-entrypoint-initdb/01_postgres_config.sql deleted file mode 100644 index bac3b4a1b..000000000 --- a/files/docker/grest/docker-entrypoint-initdb/01_postgres_config.sql +++ /dev/null @@ -1,14 +0,0 @@ -ALTER SYSTEM -SET checkpoint_timeout = '15min'; -ALTER SYSTEM -SET synchronous_commit = 'off'; -ALTER SYSTEM -SET wal_writer_delay = '800ms'; -ALTER SYSTEM -SET max_segment_size = '64MB'; -ALTER SYSTEM -SET max_wal_size = '14GB'; -ALTER SYSTEM -SET min_wal_size = '600MB'; -ALTER SYSTEM -SET wal_level = 'minimal'; diff --git a/files/docker/grest/scripts/docker-getmetrics.sh b/files/docker/grest/scripts/docker-getmetrics.sh deleted file mode 100755 index b9b0f5300..000000000 --- a/files/docker/grest/scripts/docker-getmetrics.sh +++ /dev/null @@ -1,139 +0,0 @@ -#!/usr/bin/env bash - - -###################################### -# User Variables - Change as desired # -# Common variables set in env file # -###################################### - -#BYRON_EPOCH_LENGTH=2160 # 2160 for mainnet | other networks to-do -#BYRON_SLOT_LENGTH=20000 # 20000 for mainnet | other networks to-do -#BYRON_GENESIS_START_SEC=1506203091 # 1506203091 for mainnet | other networks to-do -#SHELLEY_TRANS_EPOCH=208 # 208 for mainnet | other networks to-do -#SHELLEY_SLOT_LENGTH=1 # 1 for mainnet | other networks to-do - -#RESTAPI_PORT=8050 # Destination PostgREST port -#HAPROXY_PORT=8053 # Destination HAProxy port -DBSYNC_PROM_HOST=cardano-db-sync # Destination DBSync Prometheus Host -DBSYNC_PROM_PORT=8080 # Destination DBSync Prometheus port - -PROM_HOST=cardano-node -PROM_PORT=12798 - -###################################### -# Do NOT modify code below # -###################################### - -# Description : Query cardano-node for current metrics -getNodeMetrics() { - node_metrics=$(curl -s "http://${PROM_HOST}:${PROM_PORT}/metrics" 2>/dev/null) - [[ ${node_metrics} =~ cardano_node_metrics_nodeStartTime_int[[:space:]]([^[:space:]]*) ]] && { nodeStartTime=${BASH_REMATCH[1]} && uptimes=$(( $(date +%s) - BASH_REMATCH[1] )); } || uptimes=0 - [[ ${node_metrics} =~ cardano_node_metrics_blockNum_int[[:space:]]([^[:space:]]*) ]] && blocknum=${BASH_REMATCH[1]} || blocknum=0 - [[ ${node_metrics} =~ cardano_node_metrics_epoch_int[[:space:]]([^[:space:]]*) ]] && epochnum=${BASH_REMATCH[1]} || epochnum=0 - [[ ${node_metrics} =~ cardano_node_metrics_slotInEpoch_int[[:space:]]([^[:space:]]*) ]] && slot_in_epoch=${BASH_REMATCH[1]} || slot_in_epoch=0 - [[ ${node_metrics} =~ cardano_node_metrics_slotNum_int[[:space:]]([^[:space:]]*) ]] && slotnum=${BASH_REMATCH[1]} || slotnum=0 - [[ ${node_metrics} =~ cardano_node_metrics_density_real[[:space:]]([^[:space:]]*) ]] && density=$(bc <<< "scale=3;$(printf '%3.5f' "${BASH_REMATCH[1]}")*100/1") || density=0.0 - [[ ${node_metrics} =~ cardano_node_metrics_txsProcessedNum_int[[:space:]]([^[:space:]]*) ]] && tx_processed=${BASH_REMATCH[1]} || tx_processed=0 - [[ ${node_metrics} =~ cardano_node_metrics_txsInMempool_int[[:space:]]([^[:space:]]*) ]] && mempool_tx=${BASH_REMATCH[1]} || mempool_tx=0 - [[ ${node_metrics} =~ cardano_node_metrics_mempoolBytes_int[[:space:]]([^[:space:]]*) ]] && mempool_bytes=${BASH_REMATCH[1]} || mempool_bytes=0 - [[ ${node_metrics} =~ cardano_node_metrics_currentKESPeriod_int[[:space:]]([^[:space:]]*) ]] && kesperiod=${BASH_REMATCH[1]} || kesperiod=0 - [[ ${node_metrics} =~ cardano_node_metrics_remainingKESPeriods_int[[:space:]]([^[:space:]]*) ]] && remaining_kes_periods=${BASH_REMATCH[1]} || remaining_kes_periods=0 - [[ ${node_metrics} =~ cardano_node_metrics_Forge_node_is_leader_int[[:space:]]([^[:space:]]*) ]] && isleader=${BASH_REMATCH[1]} || isleader=0 - [[ ${node_metrics} =~ cardano_node_metrics_Forge_adopted_int[[:space:]]([^[:space:]]*) ]] && adopted=${BASH_REMATCH[1]} || adopted=0 - [[ ${node_metrics} =~ cardano_node_metrics_Forge_didnt_adopt_int[[:space:]]([^[:space:]]*) ]] && didntadopt=${BASH_REMATCH[1]} || didntadopt=0 - [[ ${node_metrics} =~ cardano_node_metrics_Forge_forge_about_to_lead_int[[:space:]]([^[:space:]]*) ]] && about_to_lead=${BASH_REMATCH[1]} || about_to_lead=0 - [[ ${node_metrics} =~ cardano_node_metrics_slotsMissedNum_int[[:space:]]([^[:space:]]*) ]] && missed_slots=${BASH_REMATCH[1]} || missed_slots=0 - [[ ${node_metrics} =~ cardano_node_metrics_RTS_gcLiveBytes_int[[:space:]]([^[:space:]]*) ]] && mem_live=${BASH_REMATCH[1]} || mem_live=0 - [[ ${node_metrics} =~ cardano_node_metrics_RTS_gcHeapBytes_int[[:space:]]([^[:space:]]*) ]] && mem_heap=${BASH_REMATCH[1]} || mem_heap=0 - [[ ${node_metrics} =~ cardano_node_metrics_RTS_gcMinorNum_int[[:space:]]([^[:space:]]*) ]] && gc_minor=${BASH_REMATCH[1]} || gc_minor=0 - [[ ${node_metrics} =~ cardano_node_metrics_RTS_gcMajorNum_int[[:space:]]([^[:space:]]*) ]] && gc_major=${BASH_REMATCH[1]} || gc_major=0 - [[ ${node_metrics} =~ cardano_node_metrics_forks_int[[:space:]]([^[:space:]]*) ]] && forks=${BASH_REMATCH[1]} || forks=0 - [[ ${node_metrics} =~ cardano_node_metrics_blockfetchclient_blockdelay_s[[:space:]]([^[:space:]]*) ]] && block_delay=${BASH_REMATCH[1]} || block_delay=0 - [[ ${node_metrics} =~ cardano_node_metrics_served_block_count_int[[:space:]]([^[:space:]]*) ]] && blocks_served=${BASH_REMATCH[1]} || block_served=0 - [[ ${node_metrics} =~ cardano_node_metrics_blockfetchclient_lateblocks[[:space:]]([^[:space:]]*) ]] && blocks_late=${BASH_REMATCH[1]} || blocks_late=0 - [[ ${node_metrics} =~ cardano_node_metrics_blockfetchclient_blockdelay_cdfOne[[:space:]]([^[:space:]]*) ]] && printf -v blocks_w1s "%.6f" ${BASH_REMATCH[1]} || blocks_w1s=0 - [[ ${node_metrics} =~ cardano_node_metrics_blockfetchclient_blockdelay_cdfThree[[:space:]]([^[:space:]]*) ]] && printf -v blocks_w3s "%.6f" ${BASH_REMATCH[1]} || blocks_w3s=0 - [[ ${node_metrics} =~ cardano_node_metrics_blockfetchclient_blockdelay_cdfFive[[:space:]]([^[:space:]]*) ]] && printf -v blocks_w5s "%.6f" ${BASH_REMATCH[1]} || blocks_w5s=0 -} - -exec 2>/dev/null - -[[ -z ${RESTAPI_PORT} ]] && RESTAPI_PORT=8050 -[[ -z ${HAPROXY_PORT} ]] && HAPROXY_PORT=8053 -[[ -z ${SHELLEY_TRANS_EPOCH} ]] && SHELLEY_TRANS_EPOCH=208 -[[ -z ${SHELLEY_SLOT_LENGTH} ]] && SHELLEY_SLOT_LENGTH=1 -[[ -z ${BYRON_EPOCH_LENGTH} ]] && BYRON_EPOCH_LENGTH=21600 -[[ -z ${BYRON_GENESIS_START_SEC} ]] && BYRON_GENESIS_START_SEC=1506203091 -[[ -z ${BYRON_SLOT_LENGTH} ]] && BYRON_SLOT_LENGTH=20000 - - - -# Description : Get calculated slot number tip -getSlotTipRef() { - current_time_sec=$(printf '%(%s)T\n' -1) - [[ ${SHELLEY_TRANS_EPOCH} -eq -1 ]] && echo 0 && return - byron_slots=$(( SHELLEY_TRANS_EPOCH * BYRON_EPOCH_LENGTH )) - byron_end_time=$(( BYRON_GENESIS_START_SEC + ((SHELLEY_TRANS_EPOCH * BYRON_EPOCH_LENGTH * BYRON_SLOT_LENGTH) / 1000) )) - - if [[ ${current_time_sec} -lt ${byron_end_time} ]]; then # In Byron phase - echo $(( ((current_time_sec - BYRON_GENESIS_START_SEC)*1000) / BYRON_SLOT_LENGTH )) - else # In Shelley phase - echo $(( byron_slots + (( current_time_sec - byron_end_time ) / SHELLEY_SLOT_LENGTH ) )) - fi -} - - -function get-metrics() { - shopt -s expand_aliases - if [ -n "$SERVED" ]; then - echo "Content-type: text/plain" # Tells the browser what kind of content to expect - echo "" # request body starts from this empty line - fi - # Replace the value for URL as appropriate - # Stats data - getNodeMetrics - currslottip=$(getSlotTipRef) - dbsyncProm=$(curl -s http://${DBSYNC_PROM_HOST}:${DBSYNC_PROM_PORT} | grep ^cardano) - load1m=$(( $(awk '{ print $1*100 }' /proc/loadavg) / $(grep -c ^processor /proc/cpuinfo) )) - meminf=$(grep "^[MSBC][ewuah][:mafc]" /proc/meminfo) - memtotal=$(( $(echo "${meminf}" | grep MemTotal | awk '{print $2}') + $(echo "${meminf}" | grep SwapTotal | awk '{print $2}') )) - memused=$(( memtotal + $(echo "${meminf}" | grep Shmem: | awk '{print $2}') - $(echo "${meminf}" | grep MemFree | awk '{print $2}') - $(echo "${meminf}" | grep SwapFree | awk '{print $2}') - $(echo "${meminf}" | grep ^Buffers | awk '{print $2}') - $(echo "${meminf}" | grep ^Cached | awk '{print $2}') )) - cpuutil=$(awk -v a="$(awk '/cpu /{print $2+$4,$2+$4+$5}' /proc/stat; sleep 1)" '/cpu /{split(a,b," "); print 100*($2+$4-b[1])/($2+$4+$5-b[2])}' /proc/stat) - # in Bytes - pubschsize=$(psql -d "${POSTGRES_DB}" -U "${POSTGRES_USER}" -c "SELECT sum(pg_relation_size(quote_ident(schemaname) || '.' || quote_ident(tablename))::bigint) FROM pg_tables WHERE schemaname = 'public'" | awk 'FNR == 3 {print $1 $2}') - grestschsize=$(psql -d "${POSTGRES_DB}" -U "${POSTGRES_USER}" -c "SELECT sum(pg_relation_size(quote_ident(schemaname) || '.' || quote_ident(tablename))::bigint) FROM pg_tables WHERE schemaname = 'grest'" | awk 'FNR == 3 {print $1 $2}') - dbsize=$(( pubschsize + grestschsize )) - - # Metrics - [[ -n "${dbsyncProm}" ]] && export METRIC_dbsynctipref=$(( currslottip - $(printf %f "$(echo "${dbsyncProm}" | grep cardano_db_sync_db_slot_height | awk '{print $2}')" |cut -d. -f1) )) - export METRIC_nodetipref=$(( currslottip - slotnum )) - export METRIC_uptime="${uptimes}" - export METRIC_dbsyncBlockHeight=$(echo "${dbsyncProm}" | grep cardano_db_sync_db_block_height | awk '{print $2}' | cut -d. -f1) - export METRIC_nodeBlockHeight=${blocknum} - export METRIC_dbsyncQueueLength=$(echo "${dbsyncProm}" | grep cardano_db_sync_db_queue_length | awk '{print $2}' | cut -d. -f1) - export METRIC_memtotal="${memtotal}" - export METRIC_memused="${memused}" - export METRIC_cpuutil="${cpuutil}" - export METRIC_load1m="$(( load1m ))" - export METRIC_pubschsize="${pubschsize}" - export METRIC_grestschsize="${grestschsize}" - export METRIC_dbsize="${dbsize}" - #export METRIC_cnodeversion="$(echo $(cardano-node --version) | awk '{print $2 "-" $9}')" - #export METRIC_dbsyncversion="$(echo $(cardano-db-sync --version) | awk '{print $2 "-" $9}')" - #export METRIC_psqlversion="$(echo "" | psql -U "${POSTGRES_USER}" -d "${POSTGRES_DB}" -c "SELECT version();" | grep PostgreSQL | awk '{print $2}')" - - for metric_var_name in $(env | grep ^METRIC | sort | awk -F= '{print $1}') - do - METRIC_NAME=${metric_var_name//METRIC_/} - # default NULL values to 0 - if [ -z "${!metric_var_name}" ] - then - METRIC_VALUE="0" - else - METRIC_VALUE="${!metric_var_name}" - fi - echo "${METRIC_NAME} ${METRIC_VALUE}" - done -} - -get-metrics diff --git a/files/docker/grest/scripts/docker-setup-grest.sh b/files/docker/grest/scripts/docker-setup-grest.sh deleted file mode 100755 index 4bd0038af..000000000 --- a/files/docker/grest/scripts/docker-setup-grest.sh +++ /dev/null @@ -1,255 +0,0 @@ -#!/bin/bash - -usage() { - cat <<-EOF >&2 - - Usage: $(basename "$0") [-f] [-i [p][r][m][c][d]] [-u] [-b ] - - Install and setup haproxy, PostgREST, polling services and create systemd services for haproxy, postgREST and dbsync - - -u Skip update check for setup script itself - -r Reset grest schema - drop all cron jobs and triggers, and remove all deployed RPC functions and cached tables - -q Run all DB Queries to update on postgres (includes creating grest schema, and re-creating views/genesis table/functions/triggers and setting up cron jobs) - -b Use alternate branch of scripts to download - only recommended for testing/development (Default: master) - - EOF - exit 1 -} - -# Description : Set default env variables. -set_environment_variables() { - CURL_TIMEOUT=60 - [[ -z "${BRANCH}" ]] && BRANCH=alpha - REPO_URL_RAW="https://raw.githubusercontent.com/cardano-community/guild-operators/${BRANCH}" - GREST_DOCKER_SCRIPTS_URL="${REPO_URL_RAW}/files/docker/grest/scripts" - GREST_DB_SCRIPTS_URL="${REPO_URL_RAW}/scripts/grest-helper-scripts/db-scripts" - DOCS_URL="https://cardano-community.github.io/guild-operators" - API_DOCS_URL="https://api.koios.rest" - CRON_SCRIPTS_DIR="${CNODE_HOME}/scripts/cron-scripts" - CRON_DIR="/etc/cron.d" - [[ -z "${PGPASSFILE}" ]] && export PGPASSFILE="${CNODE_HOME}"/priv/.pgpass -} - -# Description : Exit with error message -# : $1 = Error message we'd like to display before exiting (function will pre-fix 'ERROR: ' to the argument) -err_exit() { - printf "ERROR: %s\n". "${1}" >&2 - echo -e "Exiting...\n" >&2 - pushd -0 >/dev/null && dirs -c - exit 1 -} - -jqDecode() { - base64 --decode <<<$2 | jq -r "$1" -} - -# Description : Check and apply updates to this docker-setup-grest.sh script. -# : $1 = name of script to update -# return code : 0 = no update -# : 1 = update applied -# : 2 = update failed -checkUpdate() { - [[ "${UPDATE_CHECK}" != "Y" ]] && return 0 - - if [[ ${BRANCH} != master && ${BRANCH} != alpha ]]; then - if ! curl -s -f -m "${CURL_TIMEOUT}" "https://api.github.com/repos/cardano-community/guild-operators/branches" | jq -e ".[] | select(.name == \"${BRANCH}\")" &>/dev/null; then - err_exit "The selected branch - ${BRANCH} - does not exist anymore." - fi - fi - - # Get the script - if curl -s -f -m "${CURL_TIMEOUT}" -o "${PARENT}/${1}".tmp "${GREST_DOCKER_SCRIPTS_URL}/${1}" 2>/dev/null; then - - # Make sure the script exist locally, else just rename - [[ ! -f "${PARENT}/${1}" ]] && mv -f "${PARENT}/${1}".tmp "${PARENT}/${1}" && chmod +x "${PARENT}/${1}" && return 0 - - # Full file comparison - if [[ ("$(sha256sum "${PARENT}/${1}" | cut -d' ' -f1)" != "$(sha256sum "${PARENT}/${1}.tmp" | cut -d' ' -f1)") ]]; then - cp "${PARENT}/${1}" "${PARENT}/${1}_bkp$(date +%s)" - mv "${PARENT}/${1}".tmp "${PARENT}/${1}" - chmod +x "${PARENT}/${1}" - echo -e "\n${1} update successfully applied! Old script backed up in this directory." - return 1 - fi - fi - rm -f "${PARENT}/${1}".tmp - return 0 -} - -update_check() { - [[ ${SKIP_UPDATE} == Y ]] && return 0 - echo "Checking for script updates..." - - checkUpdate docker-setup-grest.sh - case $? in - 1) - echo - $0 "$@" "-u" - exit 0 - ;; # re-launch script with same args skipping update check - 2) exit 1 ;; - esac -} - -# Description : Setup grest schema, web_anon user, and genesis and control tables. -# : SQL sourced from grest-helper-scrips/db-scripts/basics.sql. -setup_db_basics() { - local basics_sql_url="${GREST_DB_SCRIPTS_URL}/basics.sql" - - if ! basics_sql=$(curl -s -f -m "${CURL_TIMEOUT}" "${basics_sql_url}" 2>&1); then - err_exit "Failed to get basic db setup SQL from ${basics_sql_url}" - fi - echo -e "Adding grest schema if missing and granting usage for web_anon..." - ! output=$(psql -U "${POSTGRES_USER}" -d "${POSTGRES_DB}" -v "ON_ERROR_STOP=1" -q <<<"${basics_sql}" 2>&1) && err_exit "${output}" - return 0 -} - -# Description : Deployment list (will only proceed if sync status check passes): -# : 1) grest DB basics - schema, web_anon user, basic grest-specific tables -# : 2) RPC endpoints - with SQL sourced from files/grest/rpc/**.sql -# : 3) Cached tables setup - with SQL sourced from files/grest/rpc/cached_tables/*.sql -# : This includes table structure setup and caching existing data (for most tables). -# : Some heavy cache tables are intentionally populated post-setup (point 4) to avoid long setup runtimes. -# : 4) Cron jobs - deploy cron entries to /etc/cron.d/ from files/grest/cron/jobs/*.sh -# : Used for updating cached tables data. -deploy_query_updates() { - echo "(Re)Deploying Postgres RPCs/views/schedule..." - check_db_status - if [[ $? -eq 1 ]]; then - err_exit "Please wait for Cardano DBSync to populate PostgreSQL DB at least until Mary fork, and then re-run this setup script with the -q flag." - fi - - echo -e " Downloading DBSync RPC functions from Guild Operators GitHub store..." - if ! rpc_file_list=$(curl -s -f -m "${CURL_TIMEOUT}" https://api.github.com/repos/cardano-community/guild-operators/contents/files/grest/rpc?ref=${BRANCH} 2>&1); then - err_exit "${rpc_file_list}" - fi - echo -e " (Re)Deploying GRest objects to DBSync..." - - # populate_genesis_table - - for row in $(jq -r '.[] | @base64' <<<"${rpc_file_list}"); do - if [[ $(jqDecode '.type' "${row}") = 'dir' ]]; then - echo -e "\n Downloading pSQL executions from subdir $(jqDecode '.name' "${row}")" - if ! rpc_file_list_subdir=$(curl -s -m "${CURL_TIMEOUT}" "https://api.github.com/repos/cardano-community/guild-operators/contents/files/grest/rpc/$(jqDecode '.name' "${row}")?ref=${BRANCH}"); then - echo -e " \e[31mERROR\e[0m: ${rpc_file_list_subdir}" && continue - fi - for row2 in $(jq -r '.[] | @base64' <<<"${rpc_file_list_subdir}"); do - deployRPC "${row2}" - done - else - deployRPC "${row}" - fi - done - - # setup_cron_jobs - - echo -e "\n All RPC functions successfully added to DBSync! For detailed query specs and examples, visit ${API_DOCS_URL}!\n" - echo -e "Please restart PostgREST before attempting to use the added functions" - echo -e " \e[94msudo systemctl restart postgrest.service\e[0m\n" - return 0 -} - -# Description : Check sync until Mary hard-fork. -check_db_status() { - if ! command -v psql &>/dev/null; then - err_exit "We could not find 'psql' binary in \$PATH , please ensure you've followed the instructions below:\n ${DOCS_URL}/Appendix/postgres" - fi - - if [[ "$(psql -qtAX -U "${POSTGRES_USER}" -d "${POSTGRES_DB}" -c "SELECT protocol_major FROM public.param_proposal WHERE protocol_major >= 4 ORDER BY protocol_major DESC LIMIT 1" 2>/dev/null)" == "" ]]; then - return 1 - fi - - return 0 -} - -deployRPC() { - file_name=$(jqDecode '.name' "${1}") - [[ -z ${file_name} || ${file_name} != *.sql ]] && return - dl_url=$(jqDecode '.download_url //empty' "${1}") - [[ -z ${dl_url} ]] && return - ! rpc_sql=$(curl -s -f -m "${CURL_TIMEOUT}" "${dl_url}" 2>/dev/null) && echo -e "\e[31mERROR\e[0m: download failed: ${dl_url%.json}.sql" && return 1 - echo -e " Deploying Function : \e[32m${file_name%.sql}\e[0m" - ! output=$(psql -U "${POSTGRES_USER}" -d "${POSTGRES_DB}" -v "ON_ERROR_STOP=1" <<<"${rpc_sql}" 2>&1) && echo -e " \e[31mERROR\e[0m: ${output}" -} - -########################Cron######################## -##### - -get_cron_job_executable() { - local job=$1 - local job_path="${CRON_SCRIPTS_DIR}/${job}.sh" - local job_url="${REPO_URL_RAW}/files/grest/cron/jobs/${job}.sh" - is_file "${job_path}" && rm "${job_path}" - if curl -s -f -m "${CURL_TIMEOUT}" -o "${job_path}" "${job_url}"; then - echo -e " Downloaded \e[32m${job_path}\e[0m" - chmod +x "${job_path}" - else - err_exit "Could not download ${job_url}" - fi -} - -install_cron_job() { - local job=$1 - local cron_pattern=$2 - local cron_job_path="${CRON_DIR}/${CNODE_VNAME}-${job}" - local cron_scripts_path="${CRON_SCRIPTS_DIR}/${job}.sh" - local cron_log_path="${LOG_DIR}/${job}.log" - local cron_job_entry="${cron_pattern} ${USER} /bin/bash ${cron_scripts_path} >> ${cron_log_path}" - remove_cron_job "${job}" - sudo bash -c "{ echo '${cron_job_entry}'; } > ${cron_job_path}" -} - -set_cron_variables() { - local job=$1 - [[ ${POSTGRES_DB} != cexplorer ]] && sed -e "s@DB_NAME=.*@DB_NAME=${POSTGRES_DB}@" -i "${CRON_SCRIPTS_DIR}/${job}.sh" - # update last modified date of all json files to trigger cron job to process all - [[ -d "${HOME}/git/${CNODE_VNAME}-token-registry" ]] && find "${HOME}/git/${CNODE_VNAME}-token-registry" -mindepth 2 -maxdepth 2 -type f -name "*.json" -exec touch {} + -} - -# Description : Alters the asset-registry-update.sh script to point to the testnet registry. -set_cron_asset_registry_testnet_variables() { - sed -e "s@CNODE_VNAME=.*@CNODE_VNAME=${CNODE_VNAME}@" \ - -e "s@TR_URL=.*@TR_URL=https://github.com/input-output-hk/metadata-registry-testnet@" \ - -e "s@TR_SUBDIR=.*@TR_SUBDIR=registry@" \ - -i "${CRON_SCRIPTS_DIR}/asset-registry-update.sh" -} - -# Description : Setup grest-related cron jobs. -setup_cron_jobs() { - ! is_dir "${CRON_SCRIPTS_DIR}" && mkdir -p "${CRON_SCRIPTS_DIR}" - - get_cron_job_executable "stake-distribution-update" - set_cron_variables "stake-distribution-update" - install_cron_job "stake-distribution-update" "*/30 * * * *" - - get_cron_job_executable "pool-history-cache-update" - set_cron_variables "pool-history-cache-update" - install_cron_job "pool-history-cache-update" "*/10 * * * *" - - if [[ ${NWMAGIC} -eq 764824073 || ${NWMAGIC} -eq 1 || ${NWMAGIC} -eq 2 || ${NWMAGIC} -eq 141 ]]; then - get_cron_job_executable "asset-registry-update" - set_cron_variables "asset-registry-update" - # Point the update script to testnet regisry repo structure (default: mainnet) - [[ ${NWMAGIC} -eq 1 || ${NWMAGIC} -eq 2 || ${NWMAGIC} -eq 141 ]] && set_cron_asset_registry_testnet_variables - install_cron_job "asset-registry-update" "*/10 * * * *" - fi -} - -######## Execution ######## -# Parse command line options -while getopts :urqb: opt; do - case ${opt} in - u) SKIP_UPDATE='Y' ;; - r) RESET_GREST='Y' ;; - q) DB_QRY_UPDATES='Y' ;; - b) BRANCH="${OPTARG}" ;; - \?) usage ;; - esac -done -update_check "$@" -set_environment_variables -setup_db_basics -[[ "${RESET_GREST}" == "Y" ]] && reset_grest # Where is reset_grest defined? -[[ "${DB_QRY_UPDATES}" == "Y" ]] && deploy_query_updates -pushd -0 >/dev/null || err_exit -dirs -c diff --git a/files/docker/grest/scripts/postgres-entrypoint.sh b/files/docker/grest/scripts/postgres-entrypoint.sh deleted file mode 100755 index 971e772fc..000000000 --- a/files/docker/grest/scripts/postgres-entrypoint.sh +++ /dev/null @@ -1,385 +0,0 @@ -#!/usr/bin/env bash - -###################### Customisations - START ################################## -apt-get update > /dev/null 2>&1 -apt-get install -y socat curl gawk jq sudo postgresql-13-pglogical > /dev/null 2>&1 -apt-get install -y --no-install-recommends cron > /dev/null 2>&1 -curl https://access.2ndquadrant.com/api/repository/dl/default/release/deb | bash - -# To add a user without a password the command is the following: -# adduser --disabled-password --gecos '' guild# -sed -i 's/%sudo.*/%sudo ALL=(ALL) NOPASSWD:ALL/g' /etc/sudoers > /dev/null 2>&1 -adduser postgres sudo > /dev/null 2>&1 - - -# Sets the postgres user cronJobs -chown postgres:root /etc/cron.d/crontab > /dev/null 2>&1 -chmod 0660 /etc/cron.d/crontab > /dev/null 2>&1 -touch /var/log/cron.log > /dev/null 2>&1 -chmod 0660 /var/log/cron.log > /dev/null 2>&1 -chown postgres:root /var/log/cron.log > /dev/null 2>&1 -crontab -u postgres /etc/cron.d/crontab > /dev/null 2>&1 -sudo -u postgres crontab & - -# Listen for metrics via postgres user -socat TCP-LISTEN:8059,reuseaddr,fork SYSTEM:"echo HTTP/1.1 200 OK;SERVED=true bash /getmetrics.sh " & - -###################### Customisations - END ################################### - -###################### Official entrypoint ########################################## -set -Eeo pipefail -# TODO swap to -Eeuo pipefail above (after handling all potentially-unset variables) - -# usage: file_env VAR [DEFAULT] -# ie: file_env 'XYZ_DB_PASSWORD' 'example' -# (will allow for "$XYZ_DB_PASSWORD_FILE" to fill in the value of -# "$XYZ_DB_PASSWORD" from a file, especially for Docker's secrets feature) -file_env() { - local var="$1" - local fileVar="${var}_FILE" - local def="${2:-}" - if [ "${!var:-}" ] && [ "${!fileVar:-}" ]; then - echo >&2 "error: both $var and $fileVar are set (but are exclusive)" - exit 1 - fi - local val="$def" - if [ "${!var:-}" ]; then - val="${!var}" - elif [ "${!fileVar:-}" ]; then - val="$(<"${!fileVar}")" - fi - export "$var"="$val" - unset "$fileVar" -} - -# check to see if this file is being run or sourced from another script -_is_sourced() { - # https://unix.stackexchange.com/a/215279 - [ "${#FUNCNAME[@]}" -ge 2 ] && - [ "${FUNCNAME[0]}" = '_is_sourced' ] && - [ "${FUNCNAME[1]}" = 'source' ] -} - -# used to create initial postgres directories and if run as root, ensure ownership to the "postgres" user -docker_create_db_directories() { - local user - user="$(id -u)" - - mkdir -p "$PGDATA" - # ignore failure since there are cases where we can't chmod (and PostgreSQL might fail later anyhow - it's picky about permissions of this directory) - chmod 700 "$PGDATA" || : - - # ignore failure since it will be fine when using the image provided directory; see also https://github.com/docker-library/postgres/pull/289 - mkdir -p /var/run/postgresql || : - chmod 775 /var/run/postgresql || : - - # Create the transaction log directory before initdb is run so the directory is owned by the correct user - if [ -n "$POSTGRES_INITDB_WALDIR" ]; then - mkdir -p "$POSTGRES_INITDB_WALDIR" - if [ "$user" = '0' ]; then - find "$POSTGRES_INITDB_WALDIR" \! -user postgres -exec chown postgres '{}' + - fi - chmod 700 "$POSTGRES_INITDB_WALDIR" - fi - - # allow the container to be started with `--user` - if [ "$user" = '0' ]; then - find "$PGDATA" \! -user postgres -exec chown postgres '{}' + - find /var/run/postgresql \! -user postgres -exec chown postgres '{}' + - fi -} - -# initialize empty PGDATA directory with new database via 'initdb' -# arguments to `initdb` can be passed via POSTGRES_INITDB_ARGS or as arguments to this function -# `initdb` automatically creates the "postgres", "template0", and "template1" dbnames -# this is also where the database user is created, specified by `POSTGRES_USER` env -docker_init_database_dir() { - # "initdb" is particular about the current user existing in "/etc/passwd", so we use "nss_wrapper" to fake that if necessary - # see https://github.com/docker-library/postgres/pull/253, https://github.com/docker-library/postgres/issues/359, https://cwrap.org/nss_wrapper.html - local uid - uid="$(id -u)" - if ! getent passwd "$uid" &>/dev/null; then - # see if we can find a suitable "libnss_wrapper.so" (https://salsa.debian.org/sssd-team/nss-wrapper/-/commit/b9925a653a54e24d09d9b498a2d913729f7abb15) - local wrapper - for wrapper in {/usr,}/lib{/*,}/libnss_wrapper.so; do - if [ -s "$wrapper" ]; then - NSS_WRAPPER_PASSWD="$(mktemp)" - NSS_WRAPPER_GROUP="$(mktemp)" - export LD_PRELOAD="$wrapper" NSS_WRAPPER_PASSWD NSS_WRAPPER_GROUP - local gid - gid="$(id -g)" - echo "postgres:x:$uid:$gid:PostgreSQL:$PGDATA:/bin/false" >"$NSS_WRAPPER_PASSWD" - echo "postgres:x:$gid:" >"$NSS_WRAPPER_GROUP" - break - fi - done - fi - - if [ -n "$POSTGRES_INITDB_WALDIR" ]; then - set -- --waldir "$POSTGRES_INITDB_WALDIR" "$@" - fi - - eval 'initdb --username="$POSTGRES_USER" --pwfile=<(echo "$POSTGRES_PASSWORD") '"$POSTGRES_INITDB_ARGS"' "$@"' - - # unset/cleanup "nss_wrapper" bits - if [ "${LD_PRELOAD:-}" = '/usr/lib/libnss_wrapper.so' ]; then - rm -f "$NSS_WRAPPER_PASSWD" "$NSS_WRAPPER_GROUP" - unset LD_PRELOAD NSS_WRAPPER_PASSWD NSS_WRAPPER_GROUP - fi -} - -# print large warning if POSTGRES_PASSWORD is long -# error if both POSTGRES_PASSWORD is empty and POSTGRES_HOST_AUTH_METHOD is not 'trust' -# print large warning if POSTGRES_HOST_AUTH_METHOD is set to 'trust' -# assumes database is not set up, ie: [ -z "$DATABASE_ALREADY_EXISTS" ] -docker_verify_minimum_env() { - # check password first so we can output the warning before postgres - # messes it up - if [ "${#POSTGRES_PASSWORD}" -ge 100 ]; then - cat >&2 <<-'EOWARN' - WARNING: The supplied POSTGRES_PASSWORD is 100+ characters. - This will not work if used via PGPASSWORD with "psql". - https://www.postgresql.org/message-id/flat/E1Rqxp2-0004Qt-PL%40wrigleys.postgresql.org (BUG #6412) - https://github.com/docker-library/postgres/issues/507 - EOWARN - fi - if [ -z "$POSTGRES_PASSWORD" ] && [ 'trust' != "$POSTGRES_HOST_AUTH_METHOD" ]; then - # The - option suppresses leading tabs but *not* spaces. :) - cat >&2 <<-'EOE' - Error: Database is uninitialized and superuser password is not specified. - You must specify POSTGRES_PASSWORD to a non-empty value for the - superuser. For example, "-e POSTGRES_PASSWORD=password" on "docker run". - You may also use "POSTGRES_HOST_AUTH_METHOD=trust" to allow all - connections without a password. This is *not* recommended. - See PostgreSQL documentation about "trust": - https://www.postgresql.org/docs/current/auth-trust.html - EOE - exit 1 - fi - if [ 'trust' = "$POSTGRES_HOST_AUTH_METHOD" ]; then - cat >&2 <<-'EOWARN' - ******************************************************************************** - WARNING: POSTGRES_HOST_AUTH_METHOD has been set to "trust". This will allow - anyone with access to the Postgres port to access your database without - a password, even if POSTGRES_PASSWORD is set. See PostgreSQL - documentation about "trust": - https://www.postgresql.org/docs/current/auth-trust.html - In Docker's default configuration, this is effectively any other - container on the same system. - It is not recommended to use POSTGRES_HOST_AUTH_METHOD=trust. Replace - it with "-e POSTGRES_PASSWORD=password" instead to set a password in - "docker run". - ******************************************************************************** - EOWARN - fi -} - -# usage: docker_process_init_files [file [file [...]]] -# ie: docker_process_init_files /always-initdb.d/* -# process initializer files, based on file extensions and permissions -docker_process_init_files() { - # psql here for backwards compatibility "${psql[@]}" - psql=(docker_process_sql) - - echo - local f - for f; do - case "$f" in - *.sh) - # https://github.com/docker-library/postgres/issues/450#issuecomment-393167936 - # https://github.com/docker-library/postgres/pull/452 - if [ -x "$f" ]; then - echo "$0: running $f" - "$f" - else - echo "$0: sourcing $f" - . "$f" - fi - ;; - *.sql) - echo "$0: running $f" - docker_process_sql -f "$f" - echo - ;; - *.sql.gz) - echo "$0: running $f" - gunzip -c "$f" | docker_process_sql - echo - ;; - *.sql.xz) - echo "$0: running $f" - xzcat "$f" | docker_process_sql - echo - ;; - *) echo "$0: ignoring $f" ;; - esac - echo - done -} - -# Execute sql script, passed via stdin (or -f flag of pqsl) -# usage: docker_process_sql [psql-cli-args] -# ie: docker_process_sql --dbname=mydb <<<'INSERT ...' -# ie: docker_process_sql -f my-file.sql -# ie: docker_process_sql >"$PGDATA/pg_hba.conf" -} - -# start socket-only postgresql server for setting up or running scripts -# all arguments will be passed along as arguments to `postgres` (via pg_ctl) -docker_temp_server_start() { - if [ "$1" = 'postgres' ]; then - shift - fi - - # internal start of server in order to allow setup using psql client - # does not listen on external TCP/IP and waits until start finishes - set -- "$@" -c listen_addresses='' -p "${PGPORT:-5432}" - - PGUSER="${PGUSER:-$POSTGRES_USER}" \ - pg_ctl -D "$PGDATA" \ - -o "$(printf '%q ' "$@")" \ - -w start -} - -# stop postgresql server after done setting up user and running scripts -docker_temp_server_stop() { - PGUSER="${PGUSER:-postgres}" \ - pg_ctl -D "$PGDATA" -m fast -w stop -} - -# check arguments for an option that would cause postgres to stop -# return true if there is one -_pg_want_help() { - local arg - for arg; do - case "$arg" in - # postgres --help | grep 'then exit' - # leaving out -C on purpose since it always fails and is unhelpful: - # postgres: could not access the server configuration file "/var/lib/postgresql/data/postgresql.conf": No such file or directory - -'?' | --help | --describe-config | -V | --version) - return 0 - ;; - esac - done - return 1 -} - -_main() { - # if first arg looks like a flag, assume we want to run postgres server - if [ "${1:0:1}" = '-' ]; then - set -- postgres "$@" - fi - - if [ "$1" = 'postgres' ] && ! _pg_want_help "$@"; then - docker_setup_env - # setup data directories and permissions (when run as root) - docker_create_db_directories - if [ "$(id -u)" = '0' ]; then - # then restart script as postgres user - exec gosu postgres "$BASH_SOURCE" "$@" - fi - - # only run initialization on an empty data directory - if [ -z "$DATABASE_ALREADY_EXISTS" ]; then - docker_verify_minimum_env - - # check dir permissions to reduce likelihood of half-initialized database - ls /docker-entrypoint-initdb.d/ >/dev/null - - docker_init_database_dir - pg_setup_hba_conf "$@" - - # PGPASSWORD is required for psql when authentication is required for 'local' connections via pg_hba.conf and is otherwise harmless - # e.g. when '--auth=md5' or '--auth-local=md5' is used in POSTGRES_INITDB_ARGS - export PGPASSWORD="${PGPASSWORD:-$POSTGRES_PASSWORD}" - docker_temp_server_start "$@" - - docker_setup_db - docker_process_init_files /docker-entrypoint-initdb.d/* - - docker_temp_server_stop - unset PGPASSWORD - - echo - echo 'PostgreSQL init process complete; ready for start up.' - echo - else - echo - echo 'PostgreSQL Database directory appears to contain a database; Skipping initialization' - echo - fi - fi - - exec "$@" -} - -if ! _is_sourced; then - _main "$@" -fi