From d18243155e72d8e6460c24a25e3136da075e61e8 Mon Sep 17 00:00:00 2001 From: rdlrt <3169068+rdlrt@users.noreply.github.com> Date: Sun, 21 Apr 2024 10:10:45 +0000 Subject: [PATCH] Update configs and node version support for 8.9.x (#1743) --- Build/node-cli/index.html | 51 ++++++++++++++----- docker/tips/index.html | 101 ++++++++++++++++++++++++++++++++++++++ search/search_index.json | 2 +- sitemap.xml | 76 ++++++++++++++-------------- sitemap.xml.gz | Bin 528 -> 527 bytes 5 files changed, 180 insertions(+), 50 deletions(-) diff --git a/Build/node-cli/index.html b/Build/node-cli/index.html index 391890e9e..e99167d82 100644 --- a/Build/node-cli/index.html +++ b/Build/node-cli/index.html @@ -1251,25 +1251,37 @@

Modify the node's config files in topology.json) to be shared , you may turn this setting to true.

  • Important

    -

    You'd want to set useLedgerAfterSlot to -1 for your Block Producing (Core) node - thereby, telling your Core node to remain in non-P2P mode.

    +

    On BP, You'd want to set useLedgerAfterSlot to -1 for your Block Producing (Core) node - thereby, telling your Core node to remain in non-P2P mode, and ensure PeerSharing is to false.

    The resultant topology file could look something like below:

    {
    +  "bootstrapPeers": [
    +    {
    +      "address": "backbone.cardano.iog.io",
    +      "port": 3001
    +    },
    +    {
    +      "address": "backbone.mainnet.emurgornd.com",
    +      "port": 3001
    +    }
    +  ],
       "localRoots": [
         {
           "accessPoints": [
    @@ -1277,19 +1289,36 @@ 

    Modify the node's config files {"address": "xx.xx.xx.yy", "port": 6000 } ], "advertise": false, + "trustable": true, "valency": 2 + }, + { + "accessPoints": [ + {"address": "node-dus.poolunder.com", "port": 6900, "pool": "UNDR", "location": "EU/DE/Dusseldorf" }, + {"address": "node-syd.poolunder.com", "port": 6900, "pool": "UNDR", "location": "OC/AU/Sydney" }, + {"address": "194.36.145.157", "port": 6000, "pool": "RDLRT", "location": "EU/DE/Baden" }, + {"address": "152.53.18.60", "port": 6000, "pool": "RDLRT", "location": "NA/US/StLouis" }, + {"address": "148.72.153.168", "port": 16000, "pool": "AAA", "location": "US/StLouis" }, + {"address": "78.47.99.41", "port": 6000, "pool": "AAA", "location": "EU/DE/Nuremberg" }, + {"address": "relay1-pub.ahlnet.nu", "port": 2111, "pool": "AHL", "location": "EU/SE/Malmo" }, + {"address": "relay2-pub.ahlnet.nu", "port": 2111, "pool": "AHL", "location": "EU/SE/Malmo" }, + {"address": "relay1.clio.one", "port": 6010, "pool": "CLIO", "location": "EU/IT/Milan" }, + {"address": "relay2.clio.one", "port": 6010, "pool": "CLIO", "location": "EU/IT/Bozlano" }, + {"address": "relay3.clio.one", "port": 6010, "pool": "CLIO", "location": "EU/IT/Bozlano" } + ], + "advertise": false, + "trustable": false, + "valency": 5, + "warmValency": 10 } ], "publicRoots": [ { - "accessPoints": [ - {"address": "...", "port": 3001 }, - {"address": "...", "port": 6000 } - ], + "accessPoints": [], "advertise": false } ], - "useLedgerAfterSlot": 67067585 + "useLedgerAfterSlot": 119160667 }

    Once above two files are updated, since you modified the file manually - there is always a chance of human errors (eg: missing comma/quotes). Thus, we would recommend you to start the node interactively once again before proceeding.

    diff --git a/docker/tips/index.html b/docker/tips/index.html index 60d11775c..08f8f2161 100644 --- a/docker/tips/index.html +++ b/docker/tips/index.html @@ -956,6 +956,40 @@ + + +
  • + + Configuration Update Check Functionality + + + + +
  • + +
  • + + Building Images from Forked Repositories + +
  • @@ -1161,6 +1195,40 @@ + + +
  • + + Configuration Update Check Functionality + + + + +
  • + +
  • + + Building Images from Forked Repositories + +
  • @@ -1246,6 +1314,39 @@

    Restoring from a BackupWhen the container is started with the ENABLE_RESTORE environment variable set to Y the container will automatically restore the latest backup from the /opt/cardano/cnode/backup/$NETWORK-db directory. The database will be restored when the container is started and if the backup directory is larger than the db directory.

    +

    Configuration Update Check Functionality⚓︎

    +

    The container now includes a static copy of each network's configuration files (Mainnet, Preprod, Preview, Sanchonet, +and Guild networks). The NETWORK environment variable passed into the container determines which configuration files +are copied into $CNODE_HOME/files.

    +

    The UPDATE_CHECK environment variable controls whether the container updates these configuration files from GitHub +before starting. By default, the container has the environment variable set to UPDATE_CHECK=N, meaning the container +uses the configuration files it was built with. This can be overriden either persistently or dynamically.

    +

    Persistently updating configuration files⚓︎

    +

    To always update the configuration files from GitHub, set the UPDATE_CHECK environment variable when creating the +container by using the --env option, for example --env UPDATE_CHECK=Y.

    +

    To always update the configuration files from a specific GitHub account, set the G_ACCOUNT environment variable when +creating the container by using the --env option, for example --env G_ACCOUNT=gh-fork-user.

    +

    [!NOTE] +There is no way to change the environment variable of an already running container. To rollback the configuration files and scripts stop and remove the container and start it without setting the environment variable.

    +

    Dynamically updating configuration files⚓︎

    +

    Set an environment file during create/run using --env-file=file, for example --env-file=/opt/cardano/cnode/.env.

    + +

    To rollback the configuration files to the built-in versions, remove the UPDATE_CHECK=Y or set it to UPDATE_CHECK=N in the environment file. The static configuration files in the container will be used, however the scripts will remain updated. If you want both the configuration files and scripts to be rolled back, you will need to stop and remove the container and create a new one.

    +

    Building Images from Forked Repositories⚓︎

    +

    Run the Docker Image GitHub Action to build and push images to the ghcr.io registry.

    + diff --git a/search/search_index.json b/search/search_index.json index fdfa03951..c1631d8b2 100644 --- a/search/search_index.json +++ b/search/search_index.json @@ -1 +1 @@ -{"config":{"lang":["en"],"separator":"[\\s\\-]+","pipeline":["stopWordFilter"]},"docs":[{"location":"","title":"Home","text":"

    This documentation site (rather the repository itself) is created by some of the well known and experienced community members and contains instructions/information about various guild tools which simplify various stake-ops (setting up, managing and monitoring pools) for operators. Note that the guides are present to help you simplify your tasks - but as an entity responsible for creating blocks on a financial platform, we expect some basic pre-requisite skill sets - at professional level - before entering the portal:

    Everyone is welcome to contribute to the repository (via documentation, testing, code, videos, etc). Our aim is to work together and reduce confusion rather than hosting 100 versions of documentation - each marketing their pool in a way.

    "},{"location":"#support","title":"Support","text":"

    The Telegram Support channel is used to announce new releases and changes to the code base. This is also the place to ask general questions regarding the documentation and scripts on this site.

    To report bugs and issues with scripts and documentation please open a GitHub Issue. Feature requests are best opened as a discussion thread.

    "},{"location":"#getting-started","title":"Getting Started","text":"

    Use the sidebar to navigate through the topics. Note that the instructions assume the folder structure as per here.

    Again, Feedback/Contribution and ownership of tasks is always welcome. If you're interested in collaborating regularly, make a start - and you should be part of the guild already .

    "},{"location":"basics/","title":"Basics","text":""},{"location":"basics/#architecture","title":"Architecture","text":"

    The architecture for various components are already described at docs.cardano.org by CF/IOHK. We will not reinvent the wheel

    "},{"location":"basics/#manual-software-pre-requirements","title":"Manual Software Pre-Requirements","text":"

    While we do not intend to hand out step-by-step instructions, the tools are often misused as a shortcut to avoid ensuring base skillsets mentioned on home page. Some of the common gotchas that we often find SPOs to miss out on:

    "},{"location":"basics/#pre-requisites","title":"Pre-Requisites","text":"

    Reminder !!

    You're expected to run the commands below from same session, using same working directories as indicated and using a non-root user with sudo access. You are expected to be familiar with this as part of pre-requisite skill sets for stake pool operators.

    "},{"location":"basics/#os-prereqs","title":"Set up OS packages, folder structure and fetch files from repo","text":"

    The pre-requisites for Linux systems are automated to be executed as a single script. To download the pre-requisites scripts, execute the below:

    mkdir \"$HOME/tmp\";cd \"$HOME/tmp\"\n# Install curl\n# CentOS / RedHat - sudo dnf -y install curl\n# Ubuntu / Debian - sudo apt -y install curl\ncurl -sS -o guild-deploy.sh https://raw.githubusercontent.com/cardano-community/guild-operators/master/scripts/cnode-helper-scripts/guild-deploy.sh\nchmod 755 guild-deploy.sh\n

    Please familiarise with the syntax of guild-deploy.sh before proceeding. The usage syntax can be checked using ./guild-deploy.sh -h , sample output below:

    Usage: guild-deploy.sh [-n <mainnet|preprod|guild|preview>] [-p path] [-t <name>] [-b <branch>] [-u] [-s [p][b][l][f][d][c][o][w][x]]\nSet up dependencies for building/using common tools across cardano ecosystem.\nThe script will always update dynamic content from existing scripts retaining existing user variables\n\n-n    Connect to specified network instead of mainnet network (Default: connect to cardano mainnet network) eg: -n guild\n-p    Parent folder path underneath which the top-level folder will be created (Default: /opt/cardano)\n-t    Alternate name for top level folder - only alpha-numeric chars allowed (Default: cnode)\n-b    Use alternate branch of scripts to download - only recommended for testing/development (Default: master)\n-u    Skip update check for script itself\n-s    Selective Install, only deploy specific components as below:\n  p   Install common pre-requisite OS-level Dependencies for most tools on this repo (Default: skip)\nb   Install OS level dependencies for tools required while building cardano-node/cardano-db-sync components (Default: skip)\nl   Build and Install libsodium fork from IO repositories (Default: skip)\nm   Download latest (released) binaries for mithril-signer, mithril-client (Default: skip)\nf   Force overwrite entire content of scripts and config files (backups of existing ones will be created) (Default: skip)\nd   Download latest (released) binaries for bech32, cardano-address, cardano-node, cardano-cli, cardano-db-sync and cardano-submit-api binaries (Default: skip)\nc   Install/Upgrade CNCLI binary (Default: skip) # (1)!\no   Install/Upgrade Ogmios Server binary (Default: skip)\nw   Install/Upgrade Cardano Hardware CLI (Default: skip)\nx   Install/Upgrade Cardano Signer binary (Default: skip)\n
    1. If you receive an error for glibc, it would likely be due to the build mismatch between pre-compiled binary and your OS, which is not uncommon. You may need to compile cncli manually on your OS as per instructions here - make sure to copy the output binary to \"${HOME}/.local/bin\" folder.

    This script uses opt-in election of what you'd like the script to do (as against previous version that used to try and auto-detect versions). The defaults without any arguments will only update static part of script contents for you. A typical example install to install most components but not overwrite static part of existing files for preview network would be:

    ./guild-deploy.sh -b master -n preview -t cnode -s pdlcowx\n. \"${HOME}/.bashrc\"\n

    If instead of download, you'd want to build the components yourself, you could use:

    ./guild-deploy.sh -b master -n preview -t cnode -s pblcowx\n. \"${HOME}/.bashrc\"\n

    Lastly, if you'd want to update your scripts but not install any additional dependencies, you may simply run:

    ./guild-deploy.sh -b master -n preview -t cnode\n
    "},{"location":"basics/#folder-structure","title":"Folder structure","text":"

    Running the script above will create the folder structure as per below, for your reference. You can replace the top level folder /opt/cardano/cnode by editing the value of CNODE_HOME in ~/.bashrc and $CNODE_HOME/files/env files:

    /opt/cardano/cnode            # Top-Level Folder\n\u251c\u2500\u2500 ...\n\u251c\u2500\u2500 files                     # Config, genesis and topology files\n\u2502   \u251c\u2500\u2500 ...\n\u2502   \u251c\u2500\u2500 byron-genesis.json    # Byron Genesis file referenced in config.json\n\u2502   \u251c\u2500\u2500 shelley-genesis.json  # Genesis file referenced in config.json\n\u2502   \u251c\u2500\u2500 alonzo-genesis.json    # Alonzo Genesis file referenced in config.json\n\u2502   \u251c\u2500\u2500 config.json           # Config file used by cardano-node\n\u2502   \u2514\u2500\u2500 topology.json         # Map of chain for cardano-node to boot from\n\u251c\u2500\u2500 db                        # DB Store for cardano-node\n\u251c\u2500\u2500 guild-db                  # DB Store for guild-specific tools and additions (eg: cncli, cardano-db-sync's schema)\n\u251c\u2500\u2500 logs                      # Logs for cardano-node\n\u251c\u2500\u2500 priv                      # Folder to store your keys (permission: 600)\n\u251c\u2500\u2500 scripts                   # Scripts to start and interact with cardano-node\n\u2514\u2500\u2500 sockets                   # Socket files created by cardano-node\n
    "},{"location":"build/","title":"Overview","text":"

    The documentation here uses instructions from Intersect MBO repositories as foundation, with additional info which we can contribute to where appropriate. Note that not everyone needs to build each component. You can refer to architecture to understand and qualify which of the components built by IO you want to run.

    "},{"location":"build/#components","title":"Components","text":"

    For most Pool Operators, simply building cardano-node should be enough. Use the below to decide whether you need other components:

    graph TB A([Interact with HD Walletslocally]) B([Explore blockchainlocally]) C([Easy pool-ops andfund management]) D([Create Custom Assets]) E([Monitor node using Terminal UI]) F([Sign/verify any datausing crypto keys]) N(Node) O(Ogmios) P(gRest/Koios) Q(DBSync) R(Wallet) S(CNTools) T(Tx Submit API) U(GraphQL) V(OfflineMetadataTools) X(gLiveView) Y(cardano-signer) Z[(PostgreSQL)] N --x C --x S N --x D --x S & V N --x E --x X N --x B B --x U --x Q B --x P --x Q P --x O P --x T F ---x Y N --x A --x R Q --x Z

    Important

    We strongly prefer use of gRest over GraphQL components due to performance, security, simplicity, control and most importantly - consistency benefits. Please refer to official documentations if you're interested in GraphQL or Cardano-Rest components instead.

    Note

    The instructions are intentionally limited to stack/cabal** to avoid wait times/availability of nix/docker files on a rapidly developing codebase - this also helps us prevent managing multiple versions of instructions.

    "},{"location":"build/#description-for-components-built-by-community","title":"Description for components built by community","text":""},{"location":"build/#cntools","title":"CNTools","text":"

    A swiss army knife for pool operators, primarily built by Ola, to simplify typical operations regarding their wallet keys and pool management. You can read more about it here

    "},{"location":"build/#gliveview","title":"gLiveView","text":"

    A local node monitoring tool, primarily built by Ola, to use in addition to remote monitoring tools like Prometheus/Grafana, Zabbix or IOG's RTView. This is especially useful when moving to a systemd deployment - if you haven't done so already - as it offers an intuitive UI to monitor the node status. You can read more about it here

    "},{"location":"build/#topology-updater","title":"Topology Updater","text":"

    A temporary node-to-node discovery solution, run by Markus, that was started initially to bridge the gap created while awaiting completion of P2P on cardano network, but has since become an important lifeline to the network health - to allow everyone to activate their relay nodes without having to postpone and wait for manual topology completion requests. You can read more about it here

    "},{"location":"build/#koiosgrest","title":"Koios/gRest","text":"

    A full-featured local query layer node to explore blockchain data (via dbsync) using standardised pre-built queries served via API as per standard from Koios - for which user can opt to participate in elastic query layer. You can read more about build steps here and reference API endpoints here

    "},{"location":"build/#ogmios","title":"Ogmios","text":"

    A lightweight bridge interface for cardano-node. It offers a WebSockets API that enables local clients to speak Ouroboros' mini-protocols via JSON/RPC. You can read more about it here

    "},{"location":"build/#cncli","title":"CNCLI","text":"

    A CLI tool written in Rust by Andrew Westberg for low-level communication with cardano-node. It is commonly used by SPOs to check their leader logs (integrates with CNTools as well as gLiveView) or to send their pool's health information to https://pooltool.io. You can read more about it here

    "},{"location":"build/#cardano-signer","title":"Cardano Signer","text":"

    A tool written by Martin to sign/verify data (hex, text or binary) using cryptographic keys to generate data as per CIP-8 or CIP-36 standards. You can read more about it here

    "},{"location":"catalystf11/","title":"Catalystf11","text":""},{"location":"catalystf11/#marlowehub-unifying-platform-for-marlowe-smart-contracts-phase-1-smart-contracts","title":"MarloweHub: Unifying Platform for Marlowe Smart Contracts - Phase 1 - Smart Contracts","text":"

    Category: Concept Applicant: mike (pooltool.io) Requested funds: \u20b3100,000.00 Links: Ideascale, Lidonation

    "},{"location":"catalystf11/#adastat-cardano-explorer-open-source-improved-reboot-towards-a-first-class-community-blockchain-explorer","title":"AdaStat Cardano Explorer - Open Source Improved Reboot towards a first-class community blockchain explorer","text":"

    Category: Product Applicant: Dmytro Stashenko (adastat.net) Requested funds: \u20b3180,300.00 Links: Ideascale, Lidonation

    "},{"location":"catalystf11/#adahold-decentralized-price-can-only-go-up-token-solution-for-true-ada-hodlers-smart-contract","title":"AdaHold: Decentralized Price-Can-Only-Go-Up Token, Solution For TRUE Ada Hodlers - Smart Contract","text":"

    Category: Concept Applicant: Dmytro Stashenko (adastat.net) Requested funds: \u20b399,700.00 Links: Ideascale, Lidonation

    "},{"location":"catalystf11/#sundae-labs-next-gen-uplc-debugger-with-aiken-integration","title":"Sundae Labs Next-Gen UPLC Debugger with Aiken Integration","text":"

    Category: Developers Applicant: Dan Gonzalez (sundae.fi) Requested funds: \u20b3140,000.00 Links: Ideascale, Lidonation

    "},{"location":"catalystf11/#margin-pool-enhanced-liquidity-for-margin-trading-on-sundaeswap","title":"Margin Pool: Enhanced Liquidity for Margin Trading on SundaeSwap","text":"

    Category: Solution Applicant: Dan Gonzalez (sundae.fi) Requested funds: \u20b3300,000.00 Links: Ideascale, Lidonation

    "},{"location":"catalystf11/#regulated-and-permissioned-defi-with-sundae-and-kora-labs","title":"Regulated and Permissioned DeFi with Sundae and Kora Labs","text":"

    Category: Concept Applicant: Dan Gonzalez (sundae.fi) Requested funds: \u20b3100,000.00 Links: Ideascale, Lidonation

    "},{"location":"catalystf11/#sundae-labs-comprehensive-specification-development-for-gummiworm-protocol-on-cardano","title":"Sundae Labs Comprehensive Specification Development for Gummiworm Protocol on Cardano","text":"

    Category: Concept Applicant: Dan Gonzalez (sundae.fi) Requested funds: \u20b3100,000.00 Links: Ideascale, Lidonation

    "},{"location":"catalystf11/#nftcdnio-universal-nft-viewer-api-musicvideoweb3d","title":"[nftcdn.io] Universal NFT Viewer API (Music+Video+Web+3D+\u2026)","text":"

    Category: Product Applicant: Smaug (pool.pm) Requested funds: \u20b3299,999.00 Links: Ideascale, Lidonation

    "},{"location":"catalystf11/#nftcdnio-nsfw-nft-detection-for-marketplaces-wallets-explorers","title":"[nftcdn.io] NSFW NFT Detection for Marketplaces, Wallets & Explorers","text":"

    Category: Product Applicant: Smaug (pool.pm) Requested funds: \u20b3149,999.00 Links: Ideascale, Lidonation

    "},{"location":"catalystf11/#cardanoscan-analytics-charts","title":"Cardanoscan Analytics Charts","text":"

    Category: Product Applicant: Strica (cardanoscan.io) Requested funds: \u20b344,000.00 Links: Ideascale, Lidonation

    "},{"location":"catalystf11/#cardanoscan-api-javascript-sdk","title":"Cardanoscan API Javascript SDK","text":"

    Category: Developers Applicant: Strica (cardanoscan.io) Requested funds: \u20b364,000.00 Links: Ideascale, Lidonation

    "},{"location":"catalystf11/#integrate-keystone-hardware-wallet-into-typhon","title":"Integrate Keystone Hardware Wallet into Typhon","text":"

    Category: Product Applicant: Strica (cardanoscan.io) Requested funds: \u20b384,000.00 Links: Ideascale, Lidonation

    "},{"location":"catalystf11/#add-support-for-marlowe-on-cardanoscan","title":"Add support for Marlowe on Cardanoscan","text":"

    Category: Product Applicant: Strica (cardanoscan.io) Requested funds: \u20b3133,000.00 Links: Ideascale, Lidonation

    "},{"location":"catalystf11/#cardanoscan-data-info-bubbles","title":"Cardanoscan data info bubbles","text":"

    Category: Ecosystem Applicant: Strica (cardanoscan.io) Requested funds: \u20b335,000.00 Links: Ideascale, Lidonation

    "},{"location":"catalystf11/#create-a-cnt-marketplace-on-norwegian-block-exchange-nbxcom","title":"Create a CNT marketplace on Norwegian Block Exchange (NBX.COM)","text":"

    Category: Product Applicant: Eystein Hansen (nbx.com) Requested funds: \u20b3145,000.00 Links: Ideascale, Lidonation

    "},{"location":"contributors/","title":"Contributors","text":"

    Everyone is welcome to contribute to the guide, as well as the repository. Below is just a thank you to people who have been contributing consistently:

    Adam Chris Damjan Homer Markus OCG Ola Ahlman Pal Dorogi Papacarp PegasusPool Psychomb RdLrT RedOracle SmaugPool

    To start contributing, simply hit the github repository and raise Issue/Pull Request

    "},{"location":"grest-meets/","title":"GRest Meeting summaries","text":"

    Thank you all for joining and contributing to the project

    Below you can find a short summary of every GRest meeting held, both for logging purposes and for those who were not able to attend.

    "},{"location":"grest-meets/#participants","title":"Participants:","text":"Participant 16Sep2021 02Sep2021 26Aug2021 19Aug2021 12Aug2021 29Jul2021 22Jul2021 15Jul2021 09Jul2021 02Jul2021 25Jun2021 Damjan Homer Markus Ola RdLrT Red Papacarp Paddy GimbaLabs 16Sep2021 02Sep2021 26Aug2021 19Aug2021 12Aug2021 29Jul2021 22Jul2021 15Jul2021 09Jul2021 02Jul2021

    After the initial stand-up updates from participants, we went through the entire Trello board, updating/deleting existing tickets and creating some new ones.

    25Jun2021"},{"location":"grest-meets/#scheduling-running-update-queries","title":"Scheduling running update queries","text":""},{"location":"grest-meets/#refactor-of-queries","title":"Refactor of queries","text":""},{"location":"grest-meets/#postgres-tuning","title":"postgres tuning","text":""},{"location":"grest-meets/#updates","title":"Updates","text":""},{"location":"grest-meets/#queries","title":"Queries","text":""},{"location":"grest-meets/#problems","title":"Problems","text":""},{"location":"grest-meets/#actions","title":"Actions","text":""},{"location":"grest-meets/#queries_1","title":"Queries","text":""},{"location":"grest-meets/#transaction-submission-feature","title":"Transaction submission feature","text":""},{"location":"grest-meets/#db-replication-presentation-by-redoracle","title":"DB replication presentation by Redoracle","text":""},{"location":"grest-meets/#process-for-upgrading-our-instances","title":"Process for upgrading our instances:","text":""},{"location":"grest-meets/#queries_2","title":"Queries:","text":""},{"location":"grest-meets/#stake-distribution","title":"Stake distribution","text":""},{"location":"grest-meets/#tx-history","title":"Tx History","text":""},{"location":"grest-meets/#problems_1","title":"PROBLEMS","text":""},{"location":"grest-meets/#actions_1","title":"ACTIONS","text":""},{"location":"grest-meets/#problems_2","title":"PROBLEMS","text":""},{"location":"grest-meets/#actions_2","title":"ACTIONS","text":""},{"location":"grest-meets/#problems_3","title":"PROBLEMS","text":""},{"location":"grest-meets/#actions_3","title":"ACTIONS","text":"
    1. Team

    2. Individual

    "},{"location":"grest-meets/#introduction-for-new-joiner-paddy","title":"Introduction for new joiner - Paddy","text":""},{"location":"grest-meets/#problems_4","title":"Problems","text":""},{"location":"grest-meets/#action-items","title":"Action Items","text":""},{"location":"grest-meets/#deployment-scripts","title":"Deployment scripts","text":"

    Ola added automatic deployment of services to the scripts last week. We added new tasks on Trello ticket, including flags for multiple networks (guild, testnet, mainnet), haproxy service dynamically creating hosts and doc updates. Overall, the script works well with some manual interaction still required at the moment.

    "},{"location":"grest-meets/#supported-networks","title":"Supported Networks","text":"

    Just for the record here, a 16GB (or even 8GB) instance is enough to support both testnet and guild networks.

    "},{"location":"grest-meets/#db-sync-versioning","title":"db-sync versioning","text":"

    We agreed to use the release/10.1.x branch which is not yet released but built to include Alonzo migrations to avoid rework later. This version does require Alonzo config and hash to be in the node's config.json. This has to be done manually and the files are available here. Once fully released, all members should rebuild the released version to ensure each instance is running the same code.

    "},{"location":"grest-meets/#dns-naming","title":"DNS naming","text":"

    For the DNS setup ticket, we started to think about the instance names for the 2 DNS instances (orange in the graph). Submissions for names will be made in the Telegram group, and will probably make a poll once we have the entries finalised.

    "},{"location":"grest-meets/#monitoring-system","title":"Monitoring System","text":"

    Priyank started setting up the monitoring on his instance which can then easily be switched to a separate monitoring instance. We agreed to use Prometheus / Grafana combo for data source / visualisation. We'll probably need to create some custom archiving of data to keep it long term as Prometheus stores only the last 30 days of data.

    "},{"location":"grest-meets/#next-meeting","title":"Next meeting","text":"

    We would like to make Friday @ 07:00 UTC the standard time and keep meetings at weekly frequency. A poll will still be created for next weeks, but if there are no objections / requests for switching the time around (which we have not had so far) we can go ahead with the making Friday the standard with polls no longer required and only reminders / Google invites sent every week.

    "},{"location":"grest-meets/#deployment-scripts_1","title":"Deployment scripts","text":"

    During the last week, work has been done on deployment scripts for all services (db-sync, gRest and haproxy) -> this is now in testing with updated instructions on trello. Everybody can put their name down on the ticket to signify when the setup is complete and note down any comments for bugs/improvements. This is the main priority at the moment as it would allow us to start transferring our setups to mainnet.

    "},{"location":"grest-meets/#switch-to-mainnet","title":"Switch to Mainnet","text":"

    Following on from that, we created a ticket for starting to set up mainnet instances -> we can use 32GB RAM to start and increase later. While making sure everything works against the guild network is priority, people are free to start on this as well as we anticipate we are almost ready for the switch.

    "},{"location":"grest-meets/#supported-networks_1","title":"Supported Networks","text":"

    This brings me to another discussion point which is on which networks are to be supported. After some discussion, it was agreed to keep beefy servers for mainnet, and have small independent instances for testnet maintained by those interested, while guild instance is pretty lightweight and useful to keep.

    "},{"location":"grest-meets/#monitoring-system_1","title":"Monitoring System","text":"

    The ticket for creating a centralised monitoring system was discussed and updated. I would say it would be good to have at least a basic version of the system in place around the time we switch to mainnet. The system could eventually serve for: - analysis of instance - performances and subsequent tuning - endpoints usage - anticipation of system requirements increases - etc.

    I would say that this should be an important topic of the next meeting to come up with an approach on how we will structure this system so that we can start building it in time for mainnet switch.

    "},{"location":"grest-meets/#handling-ssl","title":"Handling SSL","text":"

    Enabling SSL was agreed to not be required by each instance, but is optional and documentation should be created for how to automate the process of renewing SSL certificates for those wishing to add it to their instance. The end user facing endpoints \"Instance Checker\" will of course be SSL-enabled.

    "},{"location":"grest-meets/#next-meeting_1","title":"Next meeting","text":"

    We somewhat agreed to another meeting next week again at the same time, but some participants aren't 100% for availability. Friday at 07:00 UTC might be a good standard time we hold on to, but I will make a poll like last time so that we can get more info before confirming the meeting.

    "},{"location":"grest-meets/#meeting-structure","title":"Meeting Structure","text":"

    As this was the first meeting, at the start we discussed about the meeting structure. In general, we agreed to something like listed below, but this can definitely change in the future:

    1) 2-liner (60s) round the table stand-ups by everyone to sync up on what they were doing / are planning to do / mention struggles etc. This itself often sparks discussions. 2) going through the Trello board tasks with the intention of discussing and possbily assigning them to individuals / smaller groups (maybe 1-2-3 people choose to work together on a single task)

    "},{"location":"grest-meets/#stand-ups","title":"Stand-ups","text":"

    We then proceeded to give a status of where we are individually in terms of what's been done, a summary below:

    "},{"location":"grest-meets/#main-discussion-points","title":"Main discussion points","text":"
    1. Directory structure on the repo -> General agreement is to have anything related to db-sync/postgREST separated from the current cnode-helper-scripts directory. We can finalise the end locations of files a bit later, for now intent should be to simply add them all to /files/dbsync folder. prereqs.sh addendum can be done once artifacts are finalised (added a Trello ticket for tracking).
    2. DNS/haproxy configurations: We have two options: a. controlled approach for endpoints - wherein there is a layer of haproxy that will load balance and ensure tip being in sync for individual providers (individuals can provide haproxy OR gRest instances). b. completely decentralised - each client to maintain haproxy endpoint, and fails over to other node if its not up to recent tip. I think that in general, it was agreed to use a hybrid approach. Details are captured in diagram here. DNS endpoint can be reserved post initial testing of haproxy-agent against mainnet nodes.
    3. Internal monitoring system This would be important and useful and has not been mentioned before this meeting (as far as I know). Basically, a system for monitoring all of our instances together and also handling alerts. Not only for ensuring good quality of service, but also for logging and inspection of short- and long-term trends to better understand what's happening. A ticket is added to trello board
    "},{"location":"grest-meets/#next-meeting_2","title":"Next meeting","text":"

    All in all, I think we saw that there is need for these meetings as there are a lot of things to discuss and new ideas come up (like the monitoring system). We went for over an hour (~1h15min) and still didn't have enough time to go through the board, we basically only touched the DNS/haproxy part of the board. This tells me that we are in a stage where more frequent meetings are required, weekly instead of biweekly, as we are in the initial stage and it's important to build things right from the start rather than having to refactor later on. With that, the participants in general agreed to another meeting next week, but this will be confirmed in the TG chat and the times can be discussed then.

    "},{"location":"sidebar/","title":"Tree","text":""},{"location":"upgrade/","title":"Upgrade","text":"One-Time major upgrade for Koios Scripts from 20-Jan-2023 (expand for details)

    The scripts on guild-operators repository have gone through quite a few changes to accomodate for the below:

    Some of the above required us to add breaking changes to some scripts, but hopefully the above explains the premise for those changes. To ease this one-time upgrade process for existing deployments, we have tried to come up with the guide below, feel free to edit this file to improve the documents based on your experience. Again, apologies in advance to those who do not agree with the above changes (the old code would ofcourse remain unimpacted at tag legacy-scripts, so if you'd like to stick to old scripts , you can use -b legacy-scripts for your tools to switch back).

    "},{"location":"upgrade/#steps-for-ugrading","title":"Steps for Ugrading","text":"

    Warning

    Make sure you go through upgrade steps for your setup in a non-mainnet environment first!

    Remember

    Please add any environment-specific parameters (eg: custom top level folder, network flag, etc) to the execution command below, similar to prereqs.sh (check new syntax using guild-deploy.sh -h)

    mkdir \"$HOME/tmp\";cd \"$HOME/tmp\"\ncurl -sS -o guild-deploy.sh https://raw.githubusercontent.com/cardano-community/guild-operators/master/scripts/cnode-helper-scripts/guild-deploy.sh\nchmod 700 guild-deploy.sh\n./guild-deploy.sh -s f -b master\n
    source \"${HOME}\"/.bashrc\necho \"${PATH}\"\n

    You can move the binaries by using mv command (for example, if you dont have any other files in these folders, you can use the command below:

    Note

    Ideally, you should shutdown services (eg: cnode, cnode-dbsync, etc) prior to running the below to ensure they run from new location (you can also re-deploy them if you haven't done so in a while, eg: ./cnode.sh -d). At the end of the guide, you can start them back up.

    mv -t \"${HOME}\"/.local/bin/ \"${HOME}\"/.cabal/bin/* \"${HOME}\"/.cargo/bin/* \"${HOME}\"/bin/*\n
    whereis bech32 cardano-address cardano-cli cardano-db-sync cardano-hw-cli cardano-node cardano-submit-api cncli ogmios\n

    The above might result in some lines having more than one entry (eg: you might have cardano-cli in \"${HOME}\"/.cabal/bin and \"${HOME}\"/.local/bin) - for which you'd want to delete the reference(s) not in \"${HOME}\"/.local/bin , while for other cases - you might have no values (eg: you may not use cardano-db-sync, cncli, ogmios and/or cardano-hw-cli. You need not take any actions for the binaries you do not use.

    "},{"location":"upgrade/#supportimprovements","title":"Support/Improvements","text":"

    Hope the guide above helps you with the migration, but again - we could've missed some edge cases. If so, please report via chat in Koios Discussions channel only. Please DO NOT make edits to the script content based on forum/alternate guide/channels, while done with best intentions - there have been solutions put online that modify files unnecessarily instead of correcting configs and disabling updates, such actions will only cause trouble for future updates.

    "},{"location":"Appendix/RecoverByronWallet/","title":"Unofficial Instructions for recovering your Byron Era funds on the new Incentivized Shelley Testnet","text":""},{"location":"Appendix/RecoverByronWallet/#1-grab-and-install-haskell","title":"1. Grab and install Haskell","text":"
    curl -sSL https://get.haskellstack.org/ | sh\n
    "},{"location":"Appendix/RecoverByronWallet/#2-get-the-wallet","title":"2. Get the wallet","text":"

    note: you must build from source as of today as there are changes that just got into master you need

    git clone https://github.com/cardano-foundation/cardano-wallet.git\n

    "},{"location":"Appendix/RecoverByronWallet/#3-go-into-the-wallet-directory","title":"3. Go into the wallet directory","text":"
    cd cardano-wallet\n
    "},{"location":"Appendix/RecoverByronWallet/#4-build-the-wallet","title":"4. Build the wallet","text":"

    stack build --test --no-run-tests\n
    If it fails there are a few reasons we have found: - The cardano build instructions reference a few things that may be missing. Check those. - or maybe one of these would help:

    "},{"location":"Appendix/RecoverByronWallet/#libssl","title":"Libssl:","text":"
    sudo apt install libssl-dev\n
    "},{"location":"Appendix/RecoverByronWallet/#sqlite","title":"Sqlite :","text":"
    sudo apt-get install sqlite3 libsqlite3-dev \n
    "},{"location":"Appendix/RecoverByronWallet/#gmp","title":"gmp:","text":"
    sudo apt-get install libgmp3-dev \n
    "},{"location":"Appendix/RecoverByronWallet/#systemd-dev","title":"systemd dev:","text":"
    sudo apt install libsystemd-dev\n

    get coffee... It takes awhile

    "},{"location":"Appendix/RecoverByronWallet/#5-when-its-done-install-executables-to-your-path","title":"5. When its done, install executables to your path","text":"
    stack install\n
    "},{"location":"Appendix/RecoverByronWallet/#6-test-to-make-sure-cardano-wallet-jormungandr-works-fine","title":"6. Test to make sure cardano-wallet-jormungandr works fine.","text":"

    Generate your new mnemonics you will need below. Note that this generates 15 words as opposed to your byron era mnemnomics which were only 12 words.

    cardano-wallet-jormungandr mnemonic generate\n
    "},{"location":"Appendix/RecoverByronWallet/#7-launch-the-wallet-as-a-service","title":"7. Launch the wallet as a service.","text":"

    you can either open another terminal window or use screen or something. anyway, wherever you run this next command you won't be able to use anymore for a terminal until you stop the wallet

    change --node-port 3001 to wherever you have your jormungandr rest interface running. for me it was 5001.. so

    change --port 3002 to wherever you want to access the wallet interface at. If you have other things running avoid those ports. for most, 3002 should be free

    just to future proof these instructions. genesis should be whatever genesis you are on.

    cardano-wallet-jormungandr serve --node-port 3001 --port 3002 --genesis-block-hash e03547a7effaf05021b40dd762d5c4cf944b991144f1ad507ef792ae54603197\n
    "},{"location":"Appendix/RecoverByronWallet/#8-restore-your-byron-wallet","title":"8. Restore your byron wallet:","text":"

    --->in another window

    replace foo, foo, foo with all your mnemnomics from the byron wallet you are restoring

    Also, if you put your wallet on a different port than 3002, fix that too

    curl -X POST -H \"Content-Type: application/json\" -d '{ \"name\": \"legacy_wallet\", \"mnemonic_sentence\": [\"foo\",\"foo\",\"foo\",\"foo\",\"foo\",\"foo\",\"foo\",\"foo\",\"foo\",\"foo\",\"foo\",\"foo\"], \"passphrase\": \"areallylongpassword\"}' http://localhost:3002/v2/byron-wallets\n
    Thats going to spit out some information about a wallet it creates, you should see the value of your wallet - hopefully its not zero. And you need the wallet ID for the next step

    "},{"location":"Appendix/RecoverByronWallet/#9-create-your-shelley-wallet","title":"9. Create your shelley wallet:","text":"

    Remember all those mnemnomics you made above.. put them here instead of all the foo's.

    curl -X POST -H \"Content-Type: application/json\" -d '{ \"name\": \"pool_wallet\", \"mnemonic_sentence\": [\"foo\",\"foo\",\"foo\",\"foo\",\"foo\",\"foo\",\"foo\",\"foo\",\"foo\",\"foo\",\"foo\",\"foo\",\"foo\",\"foo\",\"foo\"], \"passphrase\": \"areallylongpasswordagain\"}' http://localhost:3002/v2/wallets\n
    Important thing to get is the wallet id from this command

    "},{"location":"Appendix/RecoverByronWallet/#10-migrate-your-funds","title":"10. Migrate your funds","text":"

    Now you are ready to migrate your wallet. replace the <old wallet id> and <new wallet id> with the values you got above

    curl -X POST -H \"Content-Type: application/json\" -d '{\"passphrase\": \"areallylongpassword\"}' http://localhost:3002/v2/byron-wallets/<old wallet id>/migrations/<new wallet id>\n
    "},{"location":"Appendix/RecoverByronWallet/#11-congratulations-your-funds-are-now-in-your-new-wallet","title":"11. Congratulations. your funds are now in your new wallet.","text":"

    From here we recommend you send them to a new address entirely owned and created by jcli or whatever method you have been using for the testnet process.

    This technically may not be required. But a lot of us did it and we know it works for setting up pools and stuff.

    send a small amount first just to make sure you are in control of the transaction and don't send your funds to la la land.

    If you want to send to another address use the command below, but replace the address that you want to send it to, the amount, and your <new wallet id>

    curl -X POST -H \"Content-Type: application/json\" -d '{\"payments\": [ { \"address\": \"<address to send to>\"\", \"amount\": { \"quantity\": 83333330000000, \"unit\": \"lovelace\" } } ], \"passphrase\": \"areallylongpasswordagain\"}' http://localhost:3002/v2/wallets/<new wallet id>/transactions\n

    "},{"location":"Appendix/monitoring/","title":"Monitoring","text":"

    Ensure the Pre-Requisites are in place before you proceed.

    This is an easy-to-use script to automate setting up of monitoring tools. Tasks automates the following tasks: - Installs Prometheus, Node Exporter and Grafana Servers for your respective Linux architecture. - Configure Prometheus to connect to cardano node and node exporter jobs. - Provisions the installed prometheus server to be automatically available as data source in Grafana. - Provisions two of the common grafana dashboards used to monitor cardano-node by SkyLight and IOHK to be readily consumed from Grafana. - Deploy prometheus,node_exporter and grafana-server as systemd service on Linux. - Start and enable those services.

    Note that securing prometheus/grafana servers via TLS encryption and other security best practices are out of scope for this document, and its mainly aimed to help you get started with monitoring without much fuss.

    !> Ensure that you've opened the firewall port for grafana server (default used in this script is 5000)

    "},{"location":"Appendix/monitoring/#download-setup_monsh","title":"Download setup_mon.sh","text":"

    If you have run guild-deploy.sh, you can skip this step. To download monitoring script, you can execute the commands below:

    cd $CNODE_HOME/scripts\nwget https://raw.githubusercontent.com/cardano-community/guild-operators/master/scripts/cnode-helper-scripts/setup_mon.sh\nchmod 750 setup_mon.sh\n

    "},{"location":"Appendix/monitoring/#customise-any-environment-variables","title":"Customise any Environment Variables","text":"

    The default selection may not always be usable for everyone. You can customise further environment variable settings by opening in editor (eg: vi setup_mon.sh ), and updating variables below to your liking:

    #!/usr/bin/env bash\n# shellcheck disable=SC2209,SC2164\n\n######################################################################\n#### Environment Variables\n######################################################################\nCNODE_IP=127.0.0.1\nCNODE_PORT=12798\nGRAFANA_HOST=0.0.0.0\nGRAFANA_PORT=5000\nPROJ_PATH=/opt/cardano/monitoring\nPROM_HOST=127.0.0.1\nPROM_PORT=9090\nNEXP_PORT=$(( PROM_PORT + 1 ))\n````\n\n#### Set up Monitoring\n\nExecute setup_mon.sh with full path to destination folder you want to setup monitoring in. If you're following guild folder structure, you do not need to specify `-d`. Read the usage comments below before you run the actual script.\n\nNote that to deploy services as systemd, the script expect sudo access is available to the user running the script.\n\n``` bash\ncd $CNODE_HOME/scripts\n# To check Usage parameters:\n# ./setup_mon.sh -h\n#Usage: setup_mon.sh [-d directory] [-h hostname] [-p port]\n#Setup monitoring using Prometheus and Grafana for Cardano Node\n#-d directory      Directory where you'd like to deploy the packages for prometheus , node exporter and grafana\n#-i IP/hostname    IPv4 address or a FQDN/DNS name where your cardano-node (relay) is running (check for hasPrometheus in config.json; eg: 127.0.0.1 if same machine as cardano-node)\n#-p port           Port at which your cardano-node is exporting stats (check for hasPrometheus in config.json; eg: 12798)\n./setup_mon.sh\n# \n# Downloading prometheus v2.18.1...\n# Downloading grafana v7.0.0...\n# Downloading exporter v0.18.1...\n# Downloading grafana dashboard(s)...\n#   - SKYLight Monitoring Dashboard\n#   - IOHK Monitoring Dashboard\n# \n# NOTE: Could not create directory as rdlrt, attempting sudo ..\n# NOTE: No worries, sudo worked !! Moving on ..\n# Configuring components\n# Registering Prometheus as datasource in Grafana..\n# Creating service files as root..\n# \n# =====================================================\n# Installation is completed\n# =====================================================\n# \n# - Prometheus (default): http://127.0.0.1:9090/metrics\n#     Node metrics:       http://127.0.0.1:12798\n#     Node exp metrics:   http://127.0.0.1:9091\n# - Grafana (default):    http://0.0.0.0:5000\n# \n# \n# You need to do the following to configure grafana:\n# 0. The services should already be started, verify if you can login to grafana, and prometheus. If using 127.0.0.1 as IP, you can check via curl\n# 1. Login to grafana as admin/admin (http://0.0.0.0:5000)\n# 2. Add \"prometheus\" (all lowercase) datasource (http://127.0.0.1:9090)\n# 3. Create a new dashboard by importing dashboards (left plus sign).\n#   - Sometimes, the individual panel's \"prometheus\" datasource needs to be refreshed.\n# \n# Enjoy...\n# \n# Cleaning up...\n
    "},{"location":"Appendix/monitoring/#view-dashboards","title":"View Dashboards","text":"

    You should now be able to Login to grafana dashboard, using the public IP of your server, at port 5000. The initial credentials to login would be admin/admin, and you will be asked to update your password upon first login. Once logged on, you should be able to go to Manage > Dashboards and select the dashboard you'd like to view. Note that if you've just started the server, you might see graphs as empty, as initial interval for dashboards is 12 hours. You can change it to 5 minutes by looking at top right section of the page.

    Thanks to Pal Dorogi for the original setup instructions used for modifying.

    "},{"location":"Appendix/postgres/","title":"Sample Postgres Setup","text":"

    These deployment instructions used for reference while building cardano-db-sync tool, with the scope being ease of set up, and some tuning baselines for those who are new to Postgres DB. It is recommended to customise these as per your needs for Production builds.

    Important

    You'd find it pretty useful to set up ZFS on your system prior to setting up Postgres, to help with your IOPs throughput requirements. You can find sample install instructions here. You can set up your entire root mount to be on ZFS, or you can opt to mount a file as ZFS on \"${CNODE_HOME}\"

    "},{"location":"Appendix/postgres/#install-postgresql-server","title":"Install PostgreSQL Server","text":"

    Execute commands below to set up Postgres Server

    # Determine OS platform\nOS_ID=$( (grep -i ^ID_LIKE= /etc/os-release || grep -i ^ID= /etc/os-release) | cut -d= -f 2)\nDISTRO=$(grep -i ^NAME= /etc/os-release | cut -d= -f 2)\n\nif [ -z \"${OS_ID##*debian*}\" ]; then\n#Debian/Ubuntu\nwget --quiet -O - https://www.postgresql.org/media/keys/ACCC4CF8.asc | sudo apt-key add -\n  RELEASE=$(lsb_release -cs)\necho \"deb [arch=amd64] http://apt.postgresql.org/pub/repos/apt/ ${RELEASE}\"-pgdg main | sudo tee  /etc/apt/sources.list.d/pgdg.list\n  sudo apt-get update\n  sudo apt-get -y install postgresql-15 postgresql-server-dev-15 postgresql-contrib libghc-hdbc-postgresql-dev\n  sudo systemctl restart postgresql\n  sudo systemctl enable postgresql\nelse\necho \"We have no automated procedures for this ${DISTRO} system\"\nfi\n
    "},{"location":"Appendix/postgres/#create-user-in-postgres","title":"Create User in Postgres","text":"

    Login to Postgres instance as superuser:

    echo $(whoami)\n# <user>\nsudo su postgres\npsql\n

    Note the returned as the output of echo $(whoami) command. Replace all instance of in the documentation below. Execute the below in psql prompt. Replace and PasswordYouWant with your OS user (output of echo $(whoami) command executed above) and a password you'd like to authenticate to Postgres with:

    CREATE ROLE <user> SUPERUSER LOGIN;\nALTER USER <user> PASSWORD 'PasswordYouWant';\n\\q\n
    Type exit at shell to return to your user from postgres

    "},{"location":"Appendix/postgres/#verify-login-to-postgres-instance","title":"Verify Login to postgres instance","text":"
    export PGPASSFILE=$CNODE_HOME/priv/.pgpass\necho \"/var/run/postgresql:5432:cexplorer:*:*\" > $PGPASSFILE\nchmod 0600 $PGPASSFILE\npsql postgres\n# psql (15.0)\n# Type \"help\" for help.\n# \n# postgres=#\n
    "},{"location":"Appendix/postgres/#tuning-your-instance","title":"Tuning your instance","text":"

    Before you start populating your DB instance using dbsync data, now might be a good time to put some thought on to baseline configuration of your postgres instance by editing /etc/postgresql/15/main/postgresql.conf. Typically, you might find a lot of common standard practices parameters available in tuning guides. For our consideration, it would be nice to start with some baselines - for which we will use inputs from example here, which would need to be customised further to your environment and resources.

    In a typical Koios [gRest] setup, we use below for minimum viable specs (i.e. 64GB RAM, > 8 CPUs, >16K IOPs for ioping -q -S512M -L -c 10 -s8k . output when postgres data directory is on ZFS configured with max arc of 4GB), we find the below configuration to be the best common setup:

    Parameter Value Comment data_directory '/opt/cardano/cnode/guild-db/pgdb/15' Move postgres data directory to ZFS mount at /opt/cardano/cnode, ensure it's writable by postgres user effective_cache_size 8GB Be conservative as Node and DBSync by themselves will need ~32-40GB of RAM if ledger-state is enabled effective_io_concurrency 4 Can go higher if you have substantially higher IOPs/IO throughputs lc_time 'en_US.UTF-8' Just to use standard server-side time formatting between instances, can adapt to your preferences log_timezone 'UTC' For consistency, to avoid timezone confusions maintenance_work_mem 512MB Helps with vacuum/index/foreign key maintainance (with 4 workers, it's set to max 2GB) max_connections 200 Allow maximum of 200 connections, the koios connections are still controlled via postgrest db-pool max_parallel_maintenance_workers 4 Max workers postgres will use for maintainance max_parallel_workers 4 Max workers postgres will use across the system max_parallel_workers_per_gather 2 Parallel threads per query, do not increase to higher values as it will multiply memory usage max_wal_size 4GB Used for WAL automatic checkpoints (disabled later) max_worker_processes 4 Maximum number of background processes system can support min_wal_size 1GB Used for WAL automatic checkpoints (disabled later) random_page_cost 1.1 Use higher value if IOPs has trouble catching up (you can use 4 instead of 1.1) shared_buffers 4GB Conservative limit to allow for node/dbsync/zfs memory usage timezone 'UTC' For consistency, to avoid timezone confusions wal_buffers 16MB WAL consumption in shared buffer (disabled later) work_mem 16MB Base memory size before writing to temporary disk files

    In addition to above, due to the nature of usage by dbsync (synching from node and restart traversing back to last saved ledger-state snapshot), we leverage data retention on blockchain - as we're not affected by loss of volatile information upon a restart of instance. Thus, we can relax some of the data retention and protection against corruption related settings, as those are IOPs/CPU Load Average impacts that the instance does not need to spend. We'd recommend setting 3 of those below in your /etc/postgresql/15/main/postgresql.conf:

    Parameter Value wal_level minimal max_wal_senders 0 synchronous_commit off

    Once your changes are done, ensure to restart postgres service using sudo systemctl restart postgresql.

    "},{"location":"Build/dbsync/","title":"DBSync","text":"

    Important

    An average pool operator may not require cardano-db-sync at all. Please verify if it is required for your use as mentioned here.

    "},{"location":"Build/dbsync/#build-instructions","title":"Build Instructions","text":""},{"location":"Build/dbsync/#clone-the-repository","title":"Clone the repository","text":"

    Execute the below to clone the cardano-db-sync repository to $HOME/git folder on your system:

    cd ~/git\ngit clone https://github.com/intersectmbo/cardano-db-sync\ncd cardano-db-sync\n
    "},{"location":"Build/dbsync/#build-cardano-db-sync","title":"Build Cardano DB Sync","text":"

    You can use the instructions below to build the latest release of cardano-db-sync.

    git fetch --tags --all\ngit pull\n# Include the cardano-crypto-praos and libsodium components for db-sync\n# On CentOS 7 (GCC 4.8.5) we should also do\n# echo -e \"package cryptonite\\n  flags: -use_target_attributes\" >> cabal.project.local\n# Replace tag against checkout if you do not want to build the latest released version\ngit checkout $(curl -sLf https://api.github.com/repos/intersectmbo/cardano-db-sync/releases/latest | jq -r .tag_name)\n# Use `-l` argument if you'd like to use system libsodium instead of IOG fork of libsodium while compiling\n$CNODE_HOME/scripts/cabal-build-all.sh\n
    The above would copy the cardano-db-sync binary into ~/.local/bin folder.

    "},{"location":"Build/dbsync/#prepare-db-for-sync","title":"Prepare DB for sync","text":"

    Now that binaries are available, let's create our database (when going through breaking changes, you may need to use --recreatedb instead of --createdb used for the first time. Again, we expect that PGPASSFILE environment variable is already set (refer to the top of this guide for sample instructions):

    cd ~/git/cardano-db-sync\n# scripts/postgresql-setup.sh --dropdb #if exists already, will fail if it doesnt - thats OK\nscripts/postgresql-setup.sh --createdb\n# Password:\n# Password:\n# All good!\n

    Verify you can see \"All good!\" as above!

    "},{"location":"Build/dbsync/#create-symlink-to-schema-folder","title":"Create Symlink to schema folder","text":"

    DBSync instance requires the schema files from the git repository to be present and available to the dbsync instance. You can either clone the ~/git/cardano-db-sync/schema folder OR create a symlink to the folder and make it available to the startup command we will be using. We will use the latter in sample below:

    ln -s ~/git/cardano-db-sync/schema $CNODE_HOME/guild-db/schema\n
    "},{"location":"Build/dbsync/#restore-using-snapshot","title":"Restore using Snapshot","text":"

    If you're running a mainnet/preview/preprod instance of dbsync, you might want to consider use of dbsync snapshots as documented here. The snapshot files as of recent epoch are available via links in release notes.

    At high-level, this would involve steps as below (read and update paths as per your environment):

    # Replace the actual link below with the latest one from release notes\nwget https://update-cardano-mainnet.iohk.io/cardano-db-sync/13/db-sync-snapshot-schema-13-block-7622755-x86_64.tgz\nrm -rf ${CNODE_HOME}/guild-db/ledger-state ; mkdir -p ${CNODE_HOME}/guild-db/ledger-state\ncd -; cd ~/git/cardano-db-sync\nscripts/postgresql-setup.sh --restore-snapshot /tmp/dbsyncsnap.tgz ${CNODE_HOME}/guild-db/ledger-state\n# The restore may take a while, please be patient and do not interrupt the restore process. Once restore is successful, you may delete the downloaded snapshot as below:\n#   rm -f /tmp/dbsyncsnap.tgz\n
    "},{"location":"Build/dbsync/#test-running-dbsync-manually-at-terminal","title":"Test running dbsync manually at terminal","text":"

    In order to verify that you can run dbsync, before making a start - you'd want to ensure that you can run it interactively once. To do so, try the commands below:

    cd $CNODE_HOME/scripts\nexport PGPASSFILE=$CNODE_HOME/priv/.pgpass\n./dbsync.sh\n

    You can monitor logs if needed via parallel session using tail -10f $CNODE_HOME/logs/dbsync.json. If there are no error, you would want to press Ctrl-C to stop the dbsync.sh execution and deploy it as a systemd service. To do so, use the commands below (the creation of file is done using sudo permissions, but you can always deploy it manually):

    cd $CNODE_HOME/scripts\n./dbsync.sh -d\n# Deploying cnode-dbsync.service as systemd service..\n# cnode-dbsync.service deployed successfully!!\n

    Now to start dbsync instance, you can run sudo systemctl start cnode-dbsync

    Note

    Note that dbsync while syncs, it might defer creation of indexes/constraints to speed up initial catch up. Once relatively closer to tip, this will initiate creation of indexes - which can take a while in background. Thus, you might notice the query timings right after reaching to tip might not be as good.

    "},{"location":"Build/dbsync/#update-dbsync","title":"Update DBSync","text":"

    Updating dbsync can have different tasks depending on the versions involved. We attempt to briefly explain the tasks involved:

    "},{"location":"Build/dbsync/#validation","title":"Validation","text":"

    To validate, connect to your postgres instance and execute commands as per below:

    export PGPASSFILE=$CNODE_HOME/priv/.pgpass\npsql cexplorer\n

    You should be at the psql prompt, you can check the tables and verify they're populated:

    \\dt\nselect * from meta;\n

    A sample output of the above two commands may look like below (the number of tables and names may vary between versions):

    cexplorer=# \\dt\nList of relations\n Schema |           Name            | Type  | Owner\n--------+---------------------------+-------+-------\n public | ada_pots                  | table | centos\n public | admin_user                | table | centos\n public | block                     | table | centos\n public | delegation                | table | centos\n public | delisted_pool             | table | centos\n public | epoch                     | table | centos\n public | epoch_param               | table | centos\n public | epoch_stake               | table | centos\n public | ma_tx_mint                | table | centos\n public | ma_tx_out                 | table | centos\n public | meta                      | table | centos\n public | orphaned_reward           | table | centos\n public | param_proposal            | table | centos\n public | pool_hash                 | table | centos\n public | pool_meta_data            | table | centos\n public | pool_metadata             | table | centos\n public | pool_metadata_fetch_error | table | centos\n public | pool_metadata_ref         | table | centos\n public | pool_owner                | table | centos\n public | pool_relay                | table | centos\n public | pool_retire               | table | centos\n public | pool_update               | table | centos\n public | pot_transfer              | table | centos\n public | reserve                   | table | centos\n public | reserved_ticker           | table | centos\n public | reward                    | table | centos\n public | schema_version            | table | centos\n public | slot_leader               | table | centos\n public | stake_address             | table | centos\n public | stake_deregistration      | table | centos\n public | stake_registration        | table | centos\n public | treasury                  | table | centos\n public | tx                        | table | centos\n public | tx_in                     | table | centos\n public | tx_metadata               | table | centos\n public | tx_out                    | table | centos\n public | withdrawal                | table | centos\n(37 rows)\n\n\n\nselect * from meta;\n id |     start_time      | network_name\n----+---------------------+--------------\n  1 | 2017-09-23 21:44:51 | mainnet\n(1 row)\n
    "},{"location":"Build/grest-changelog/","title":"Koios gRest Changelog","text":""},{"location":"Build/grest-changelog/#110-for-all-networks","title":"[1.1.0] - For all networks.","text":"

    This will be first major [breaking] release for Koios consumers in a while, and will be rolled out under new base prefix (/api/v1). The major work with this release was to start making use of newer flags in dbsync which help performance of queries under new endpoints. Please ensure to check out the release notes for 1.1.0rc below. The list for this section is only a small addendum to 1.1.0rc:

    "},{"location":"Build/grest-changelog/#chores","title":"Chores:","text":""},{"location":"Build/grest-changelog/#110rc-for-all-networks","title":"[1.1.0rc] - For all networks.","text":"

    This will be first major [breaking] release for Koios consumers in a while, and will be rolled out under new base prefix (/api/v1). The major work with this release was to start making use of newer flags in dbsync which help performance of queries under new endpoints. Also, you'd see quite a few new endpoint additions below, that'd be helping out with slightly lighter version of queries. To keep migration paths easier, we will ensure both v0 and v1 versions of the release is up for a month post release, before retiring v0.

    "},{"location":"Build/grest-changelog/#new-endpoints-added","title":"New endpoints added:","text":""},{"location":"Build/grest-changelog/#data-inputoutput-changes","title":"Data Input/Output Changes:","text":""},{"location":"Build/grest-changelog/#deprecations","title":"Deprecations:","text":""},{"location":"Build/grest-changelog/#chores_1","title":"Chores:","text":""},{"location":"Build/grest-changelog/#1010-for-all-networks","title":"[1.0.10] - For all networks.","text":"

    The release is effectively same as 1.0.10rc except with one minor modification below.

    "},{"location":"Build/grest-changelog/#chores_2","title":"Chores:","text":""},{"location":"Build/grest-changelog/#1010rc-for-non-mainnet-networks","title":"[1.0.10rc] - For non-mainnet networks","text":"

    This release primarily focuses on ability to support better DeFi projects alongwith some value addition for existing clients by bringing in 10 new endpoints (paired with 2 deprecations), and few additional optional input parameters , and some additional output columns to existing endpoints. The only breaking change/fix is for output returned for tx_info.

    Also, dbsync 13.1.x.x has been released and is recommended to be used for this release

    "},{"location":"Build/grest-changelog/#new-endpoints-added_1","title":"New endpoints added","text":""},{"location":"Build/grest-changelog/#data-inputoutput-changes_1","title":"Data Input/Output Changes","text":""},{"location":"Build/grest-changelog/#deprecations_1","title":"Deprecations:","text":""},{"location":"Build/grest-changelog/#chores_3","title":"Chores:","text":""},{"location":"Build/grest-changelog/#109-for-all-networks","title":"[1.0.9] - For all networks","text":"

    This release is effectively same as 1.0.9rc below (please check out the notes accordingly), just with minor bug fix on setup-grest.sh itself.

    "},{"location":"Build/grest-changelog/#109rc-for-non-mainnet-networks","title":"[1.0.9rc] - For non-mainnet networks","text":"

    This release candidate is non-breaking for existing methods and inputs, but breaking for output objects for endpoints. The aim with release candidate version is to allow folks couple of weeks to test, adapt their libraries before applying to mainnet.

    "},{"location":"Build/grest-changelog/#new-endpoints-added_2","title":"New endpoints added","text":""},{"location":"Build/grest-changelog/#data-inputoutput-changes_2","title":"Data Input/Output changes","text":""},{"location":"Build/grest-changelog/#changes-for-instance-providers","title":"Changes for Instance Providers","text":""},{"location":"Build/grest-changelog/#108-for-all-networks","title":"[1.0.8] - For all networks","text":"

    This release is contains minor bug-fixes that were discovered in koios-1.0.7. No major changes to output for this one.

    "},{"location":"Build/grest-changelog/#changes-for-api","title":"Changes for API","text":""},{"location":"Build/grest-changelog/#new-endpoints-added_3","title":"New endpoints added","text":""},{"location":"Build/grest-changelog/#data-inputoutput-changes_3","title":"Data Input/Output changes","text":""},{"location":"Build/grest-changelog/#changes-for-instance-providers_1","title":"Changes for Instance Providers","text":""},{"location":"Build/grest-changelog/#107-for-all-networks","title":"[1.0.7] - For all networks","text":"

    This release continues updates from koios-1.0.6 to further utilise stake-snapshot cache tables which would be useful for SPOs as well as reduce downtime post epoch transition. One largely requested feature to accept bulk inputs for many block/address/account endpoints is now complete. Additionally, koios instance providers are now recommended to use cardano-node 1.35.3 with dbsync 13.0.5.

    "},{"location":"Build/grest-changelog/#changes-for-api_1","title":"Changes for API","text":""},{"location":"Build/grest-changelog/#new-endpoints-added_4","title":"New endpoints added","text":""},{"location":"Build/grest-changelog/#data-inputoutput-changes_4","title":"Data Input/Output changes","text":""},{"location":"Build/grest-changelog/#changes-for-instance-providers_2","title":"Changes for Instance Providers","text":""},{"location":"Build/grest-changelog/#106106m-interim-release-for-all-networks-to-upgrade-to-dbsync-v13","title":"[1.0.6/1.0.6m] - Interim release for all networks to upgrade to dbsync v13","text":"

    The backlog of items not being added to mainnet has been increasing due to delays with Vasil HFC event to Mainnet. As such we had to come up with a split update approach. The mainnet nodes are still not qualified to be Vasil-ready (in our opinion) for 1.35.x , but dbsync 13 can be used against node 1.34.1 fine. In order to cater for this split, we have added an intermediate koios-1.0.6m tag that brings dbsync updates while maintaining node-1.34.1.

    "},{"location":"Build/grest-changelog/#changes-for-api_2","title":"Changes for API","text":""},{"location":"Build/grest-changelog/#data-output-changes","title":"Data Output Changes","text":""},{"location":"Build/grest-changelog/#changes-for-instance-providers_3","title":"Changes for Instance Providers","text":""},{"location":"Build/grest-changelog/#105-alpha-networks-only","title":"[1.0.5] - alpha networks only","text":"

    Since there have been a few deviations wrt Vasil for testnet and mainnet, this version only targets networks except Mainnet!

    "},{"location":"Build/grest-changelog/#changes-for-api_3","title":"Changes for API","text":""},{"location":"Build/grest-changelog/#data-output-changes_1","title":"Data Output Changes","text":""},{"location":"Build/grest-changelog/#changes-for-instance-providers_4","title":"Changes for Instance Providers","text":""},{"location":"Build/grest-changelog/#101","title":"[1.0.1]","text":""},{"location":"Build/grest-changelog/#100","title":"[1.0.0]","text":""},{"location":"Build/grest-changelog/#100-rc1","title":"[1.0.0-rc1]","text":""},{"location":"Build/grest-changelog/#changes-for-api_4","title":"Changes for API","text":""},{"location":"Build/grest-changelog/#data-output-changes_2","title":"Data Output Changes","text":""},{"location":"Build/grest-changelog/#input-parameter-changes","title":"Input Parameter Changes","text":""},{"location":"Build/grest-changelog/#changes-for-instance-providers_5","title":"Changes for Instance Providers","text":""},{"location":"Build/grest-changelog/#added","title":"Added","text":""},{"location":"Build/grest-changelog/#fixed","title":"Fixed","text":""},{"location":"Build/grest-changelog/#100-rc0-2022-04-29","title":"[1.0.0-rc0] - 2022-04-29","text":""},{"location":"Build/grest/","title":"Koios gRest","text":"

    Important

    "},{"location":"Build/grest/#what-is-grest","title":"What is gRest","text":"

    gRest is an open source implementation of a query layer built over dbsync using PostgREST and HAProxy. The package is built as part of Koios team's efforts to unite community individual stream of work together and give back a more aligned structure to query dbsync and adopt standardisation to queries utilising open-source tooling as well as collaboration. In addition to these, there are also accessibility features to deploy rules for failover, do healthchecks, set up priorities, have ability to prevent DDoS attacks, provide timeouts, report tips for analysis over a longer period, etc - which can prove to be really useful when performing any analysis for instances.

    Note

    Note that the scripts below do allow for provisioning ogmios integration too, but Ogmios - currently - is not designed to provide advanced session management for a server-client architecture in absence of a middleware. Thus, the availability for ogmios from monitoring instance is restricted to avoid ability to DDoS an instance.

    "},{"location":"Build/grest/#components","title":"Components","text":"
    1. PostgREST: An RPC JSON interface for any PostgreSQL database (in our case, database served via cardano-db-sync) to provide a RESTful Web Service. The endpoints of PostgREST in itself are essentially the table/functions defined in elected schema via grest config file. You can read more about advanced query syntax using PostgREST API here, but we will provide a simpler view using examples towards the end of the page. It is an easy alternative - with almost no overhead as it directly serves the underlying database as an API, as compared to Cardano GraphQL component (which may often have lags). Some of the other advantages of PostgREST over graphql based projects are also performance, being stateless, 0 overhead, support for JWT / native Postgres DB authentication against the Rest Interface as well.

    2. HAProxy: An easy gateway proxy that automatically provides failover/basic DDoS protection, specify rules management for load balancing, setup multiple frontend/backends, provide easy means to have TLS enabled for public facing instances, etc. You may alter the settings for proxy layer as per your SecOps preferences. This component is optional (eg: if you prefer to expose your PostgREST server itself, you can do so using similar steps below).

    "},{"location":"Build/grest/#setup","title":"Setup gRest services","text":"

    To start with you'd want to ensure your current shell session has access to Postgres credentials, continuing from examples from the above mentioned Sample Postgres deployment guide.

    cd $CNODE_HOME/priv\nPGPASSFILE=$CNODE_HOME/priv/.pgpass\npsql cexplorer\n

    Ensure that you can connect to your Postgres DB fine using above (quit from psql once validated using \\q). As part of guild-deploy.sh execution, you'd find setup-grest.sh file made available in ${CNODE_HOME}/scripts folder, which will help you automate installation of PostgREST, HAProxy as well as brings in latest queries/functions provided via Koios to your instances.

    Warning

    As of now, gRest services are in alpha stage - while can be utilised, please remember there may be breaking changes and every collaborator is expected to work with the team to keep their instances up-to-date using alpha branch.

    Familiarise with the usage options for the setup script , the syntax can be viewed as below:

    cd \"${CNODE_HOME}\"/scripts\n./setup-grest.sh -h\n#\n# Usage: setup-grest.sh [-f] [-i [p][r][m][c][d]] [-u] [-b <branch>]\n# \n# Install and setup haproxy, PostgREST, polling services and create systemd services for haproxy, postgREST and dbsync\n# \n# -f    Force overwrite of all files including normally saved user config sections\n# -i    Set-up Components individually. If this option is not specified, components will only be installed if found missing (eg: -i prcd)\n#     p    Install/Update PostgREST binaries by downloading latest release from github.\n#     r    (Re-)Install Reverse Proxy Monitoring Layer (haproxy) binaries and config\n#     m    Install/Update Monitoring agent scripts\n#     c    Overwrite haproxy, postgREST configs\n#     d    Overwrite systemd definitions\n# -u    Skip update check for setup script itself\n# -q    Run all DB Queries to update on postgres (includes creating grest schema, and re-creating views/genesis table/functions/triggers and setting up cron jobs)\n# -b    Use alternate branch of scripts to download - only recommended for testing/development (Default: master)\n#\n

    To run the setup overwriting all standard deployment tasks from a branch (eg: koios-1.0.9 branch), you may want to use:

    ./setup-grest.sh -f -i prmcd -r -q -b koios-1.0.9\n

    Similarly - if you'd like to re-install all components and force overwrite all configs but not reset cache tables, you may run:

    ./setup-grest.sh -f -i prmcd -q\n

    Another example could be to preserve your config, but only update queries using an alternate branch (eg: let's say you want to try the branch alpha prior to a tagged release). To do so, you may run:

    ./setup-grest.sh -q -b alpha\n

    Please ensure to follow the on-screen instructions, if any (for example restarting deployed services, or updating configs to specify correct target postgres URLs/enable TLS/add peers etc in ${CNODE_HOME}/priv/grest.conf and ${CNODE_HOME}/files/haproxy.cfg).

    The default ports used will make haproxy instance available at port 8053 or 8453 if TLS is enabled (you might want to enable firewall rule to open this port to services you would like to access). If you want to prevent unauthenticated access to grest schema, uncomment the jwt-secret and specify a custom secret-token.

    Reminder

    Once you've successfully deployed the grest instance, it will deploy certain cron jobs that will ensure the relevant cache tables are updated periodically. Until these have finished (especially on first run, it could take an hour or so on mainnet, your instance will likely not pass any tests from grest-poll.sh but that's expected.

    "},{"location":"Build/grest/#tls","title":"Enable TLS on HAProxy","text":"

    In order to enable SSL on your haproxy, all you need to do is edit the file ${CNODE_HOME}/files/haproxy.cfg and update the frontend app section to uncomment ssl bind (and comment normal bind).

    Info

    If you're not familiar with how to configure TLS OR would not like to buy one, you can find tips on how to create a TLS certificate for free via LetsEncrypt using tutorials here. Once you do have a TLS Certificate generated, you need to chain the private key and full chain cert together in a file - /etc/ssl/server.pem - which can be then referenced as below:

    frontend app\n  #bind 0.0.0.0:8053\n  ## If using SSL, comment line above and uncomment line below\n  bind :8453 ssl crt /etc/ssl/server.pem no-sslv3\n  http-request set-log-level silent\n  acl srv_down nbsrv(grest_postgrest) eq 0\n  acl is_wss hdr(Upgrade) -i websocket\n  ...\n
    Restart haproxy service for changes to take effect.

    "},{"location":"Build/grest/#validation","title":"Validation","text":"

    With the setup, you also have a checkstatus.sh script, which will query the Postgres DB instance via haproxy (coming through postgREST), and only show an instance up if the latest block in your DB instance is within 180 seconds.

    Important

    If you'd like to participate in joining to the elastic cluster via Koios, please raise a PR request by editing topology files in this folder to do so!!

    If you were using guild network, you could do a couple of very basic sanity checks as per below:

    1. To query active stake for pool pool1z2ry6kxywgvdxv26g06mdywynvs7jj3uemnxv273mr5esukljsr in epoch 122, we can execute the below:

      curl -d _pool_bech32=pool1z2ry6kxywgvdxv26g06mdywynvs7jj3uemnxv273mr5esukljsr -d _epoch_no=122 -s http://localhost:8053/rpc/pool_active_stake\n## {\"active_stake_sum\" : 19409732875}\n

    2. To check latest owner key(s) for a given pool pool1z2ry6kxywgvdxv26g06mdywynvs7jj3uemnxv273mr5esukljsr, you can execute the below:

      curl -d _pool_bech32=pool1z2ry6kxywgvdxv26g06mdywynvs7jj3uemnxv273mr5esukljsr -s http://localhost:8050/rpc/pool_owners\n## [{\"owner\" : \"stake_test1upx5p04dn3t6dvhfh27744su35vvasgaaq565jdxwlxfq5sdjwksw\"}, {\"owner\" : \"stake_test1uqak99cgtrtpean8wqwp7d9taaqkt9gkkxga05m5azcg27chnzfry\"}]\n

    You may want to explore what all endpoints come out of the box, and test them out, to do so - refer to API documentation for OpenAPI3 documentation. Each endpoint has a pre-filled example for mainnet and connects by default to primary Koios endpoint, allowing you to test endpoints and if needed - grab the curl commands to start testing yourself against your local or remote instances.

    "},{"location":"Build/grest/#participating-in-koios-cluster-as-instance-provider","title":"Participating in Koios Cluster as instance Provider","text":"

    If you're interested to participate in decentralised infrastructure by providing an instance, there are a few additional steps you'd need:

    1. Enable ports for your HAProxy instance (default: 8053), gRest Exporter service (default: 8059) and (optionally) submit API instance (default: 8090) against the monitoring instance (do not need to open these ports to internet) of corresponding network.

    2. Ensure that each of the service above is listening on your public IP address (for instance, submitapi.sh might need to be edited to change HOSTADDR to 0.0.0.0 and restarted).

    3. Create a PR specifying connectivity information to your HAProxy port here.

    4. Make sure to join the telegram discussions group to participate in any discussions, actions, polls for new-features, etc. Feel free to give a shout in the group in case you have trouble following any of the above

    "},{"location":"Build/node-cli/","title":"Node & CLI","text":"

    Reminder !!

    Ensure the Pre-Requisites are in place before you proceed.

    "},{"location":"Build/node-cli/#build-instructions","title":"Build Instructions","text":""},{"location":"Build/node-cli/#clone-the-repository","title":"Clone the repository","text":"

    Execute the below to clone the cardano-node repository to $HOME/git folder on your system:

    cd ~/git\ngit clone https://github.com/intersectmbo/cardano-node\ncd cardano-node\n
    "},{"location":"Build/node-cli/#build-cardano-node","title":"Build Cardano Node","text":"

    You can use the instructions below to build the latest release of cardano-node.

    git fetch --tags --recurse-submodules --all\ngit pull\n# Replace tag against checkout if you do not want to build the latest released version, we recommend using battle tested node versions - which may not always be latest\ngit checkout $(curl -sLf https://api.github.com/repos/intersectmbo/cardano-node/releases/latest | jq -r .tag_name)\n\n# Use `-l` argument if you'd like to use system libsodium instead of IOG fork of libsodium while compiling\n$CNODE_HOME/scripts/cabal-build-all.sh\n

    The above would copy the binaries built into ~/.local/bin folder.

    "},{"location":"Build/node-cli/#download-pre-compiled-binary-from-node-release","title":"Download pre-compiled Binary from Node release","text":"

    While certain folks might want to build the node themselves (could be due to OS/arch compatibility, trust factor or customisations), for most it might not make sense to build the node locally. Instead, you can download the binaries using cardano-node release notes, where-in you can find the download links for every version. This is already taken care of by guild-deploy.sh if you used the option to download binaries (you can always re-run with specific arguments if unsure).

    "},{"location":"Build/node-cli/#verify","title":"Verify","text":"

    Execute cardano-cli and cardano-node to verify output as below (the exact version and git rev should depend on your checkout tag on github repository):

    cardano-cli version\n# cardano-cli 8.x.x - linux-x86_64 - ghc-8.10\n# git rev <...>\ncardano-node version\n# cardano-node 8.x.x - linux-x86_64 - ghc-8.10\n# git rev <...>\n
    "},{"location":"Build/node-cli/#update-port-number-or-pool-name-for-relative-paths","title":"Update port number or pool name for relative paths","text":"

    Before you go ahead with starting your node, you may want to update values for CNODE_PORT in $CNODE_HOME/scripts/env. Note that it is imperative for operational relays and pools to ensure that the port mentioned is opened via firewall to the destination your node is supposed to connect from. Update your network/firewall configuration accordingly. Future executions of guild-deploy.sh will preserve and not overwrite these values (or atleast back up if forced to overwrite).

    CNODEBIN=\"${HOME}/.local/bin/cardano-node\"\nCCLI=\"${HOME}/.local/bin/cardano-cli\"\nCNODE_PORT=6000\nPOOL_NAME=\"GUILD\"\n

    Important

    POOL_NAME is the name of folder that you will use when registering pools and starting node in core mode. This folder would typically contain your hot.skey,vrf.skey and op.cert files required. If the mentioned files are absent (expected if this is a fresh install), the node will automatically start in a relay mode.

    "},{"location":"Build/node-cli/#start-the-node","title":"Start the node","text":"

    To test starting the node in interactive mode, we will make use of pre-built script cnode.sh. This script automatically determines whether to start the node as a relay or block producer (if the required pool keys are present in the $CNODE_HOME/priv/pool/<POOL_NAME> as mentioned above). The script contains a user-defined variable CPU_CORES which determines the number of CPU cores the node will use upon start-up:

    ######################################\n# User Variables - Change as desired #\n# Common variables set in env file   #\n######################################\n\n#CPU_CORES=4            # Number of CPU cores cardano-node process has access to (please don't set higher than physical core count, 4 recommended)\n

    Now let's test starting the node in interactive mode.

    Note

    At this stage, upon executing cnode.sh, you are expected to see the live config and a line ending with Listening on http://127.0.0.1:12798 - this is expected, as your logs are being written to $CNODE_HOME/logs/node.json . If so, you should be alright to return to your console by pressing Ctrl-C. The node will be started later using instructions below using systemd (Linux's service management). In case you receive any errors, please troubleshoot and fix those before proceeding.

    cd \"${CNODE_HOME}\"/scripts\n./cnode.sh\n

    Press Ctrl-C to exit node and return to console.

    "},{"location":"Build/node-cli/#modify-the-nodes-config-files","title":"Modify the node's config files","text":"

    Now that you've tested the basic node operation, you might want to customise your config files (assuming you are in top-level folder , i.e. cd \"${CNODE_HOME}\") :

    1. files/config.json : This file contains the logging configurations (tracers of to tune logging, paths for other genesis config files, address/ports on which the prometheus/EKG monitoring will listen, etc). Unless running more than one node on same machine (not recommended), you should be alright to use this file as-is.

    2. files/topology.json : This file tells your node how to connect to other nodes (especially initially to start synching). You would want to update this file as below:

    Important

    You'd want to set useLedgerAfterSlot to -1 for your Block Producing (Core) node - thereby, telling your Core node to remain in non-P2P mode.

    The resultant topology file could look something like below:

    {\n\"localRoots\": [\n{\n\"accessPoints\": [\n{\"address\": \"xx.xx.xx.xx\", \"port\": 6000 },\n{\"address\": \"xx.xx.xx.yy\", \"port\": 6000 }\n],\n\"advertise\": false,\n\"valency\": 2\n}\n],\n\"publicRoots\": [\n{\n\"accessPoints\": [\n{\"address\": \"...\", \"port\": 3001 },\n{\"address\": \"...\", \"port\": 6000 }\n],\n\"advertise\": false\n}\n],\n\"useLedgerAfterSlot\": 67067585\n}\n

    Once above two files are updated, since you modified the file manually - there is always a chance of human errors (eg: missing comma/quotes). Thus, we would recommend you to start the node interactively once again before proceeding.

    cd \"${CNODE_HOME}\"/scripts\n./cnode.sh\n

    As before, ensure you do not have any errors in the console. To stop the node, hit Ctrl-C - we will start the node as systemd later in the document.

    "},{"location":"Build/node-cli/#start-the-submit-api","title":"Start the submit-api","text":"

    Note

    An average pool operator may not require cardano-submit-api at all. Please verify if it is required for your use as mentioned here. If - however - you do run submit-api for accepting sizeable transaction load, you would want to override the default MEMPOOL_BYTES by uncommenting it in cnode.sh.

    cardano-submit-api is one of the binaries built as part of cardano-node repository and allows you to submit transactions over a Web API. To run this service interactively, you can use the pre-built script below (submitapi.sh). Make sure to update submitapi.sh script to change listen IP or Port that you'd want to make this service available on.

    cd $CNODE_HOME/scripts\n./submitapi.sh\n

    To stop the process, hit Ctrl-C

    "},{"location":"Build/node-cli/#systemd","title":"Run as systemd service","text":"

    The preferred way to run the node (and submit-api) is through a service manager like systemd. This section explains how to setup a systemd service file.

    1. Deploy as a systemd service Execute the below command to deploy your node as a systemd service (from the respective scripts folder):

    cd $CNODE_HOME/scripts\n./cnode.sh -d\n# Deploying cnode.service as systemd service..\n# cnode.service deployed successfully!!\n\n./submitapi.sh -d\n# Deploying cnode-submit-api.service as systemd service..\n# cnode-submit-api deployed successfully!!\n

    2. Start the service Run below commands to enable automatic start of service on startup and start it.

    sudo systemctl start cnode.service\nsudo systemctl start cnode-submit-api.service\n

    3. Check status and stop/start commands Replace status with stop/start/restart depending on what action to take.

    sudo systemctl status cnode.service\nsudo systemctl status cnode-submit-api.service\n

    Important

    In case you see the node exit unsuccessfully upon checking status, please verify you've followed the transition process correctly as documented below, and that you do not have another instance of node already running. It would help to check your system logs (/var/log/syslog for debian-based and /var/log/messages for Red Hat/CentOS/Fedora systems, you can also check journalctl -f -u <service> to examine startup attempt for services) for any errors while starting node.

    You can use gLiveView to monitor your node that was started as a systemd service.

    cd $CNODE_HOME/scripts\n./gLiveView.sh\n
    "},{"location":"Build/offchain-metadata-tools/","title":"Offchain Metadata Tools","text":"

    Important

    In the Cardano multi-asset era, this project helps you create and submit metadata describing your assets, storing them off-chain.

    "},{"location":"Build/offchain-metadata-tools/#download-pre-built-binaries","title":"Download pre-built binaries","text":"

    Go to input-output-hk/offchain-metadata-tools to download the binaries and place in a directory specified by PATH, e.g. $HOME/.local/bin/.

    "},{"location":"Build/offchain-metadata-tools/#build-instructions","title":"Build Instructions","text":"

    An alternative to pre-built binaries - instructions describe how to build the token-metadata-creator tool but the offchain-metadata-tools repository contains other tools as well. Build the ones needed for your installation.

    "},{"location":"Build/offchain-metadata-tools/#clone-the-repository","title":"Clone the repository","text":"

    Execute the below to clone the offchain-metadata-tools repository to $HOME/git folder on your system:

    cd ~/git\ngit clone https://github.com/input-output-hk/offchain-metadata-tools.git\ncd offchain-metadata-tools/token-metadata-creator\n
    "},{"location":"Build/offchain-metadata-tools/#build-token-metadata-creator","title":"Build token-metadata-creator","text":"

    You can use the instructions below to build token-metadata-creator, same steps can be executed in future to update the binaries (replacing appropriate tag) as well.

    git fetch --tags --all\ngit pull\n# Replace master with appropriate tag if you'd like to avoid compiling against master\ngit checkout master\n$CNODE_HOME/scripts/cabal-build-all.sh\n
    The above would copy the binaries into ~/.local/bin folder.

    "},{"location":"Build/offchain-metadata-tools/#verify","title":"Verify","text":"

    Verify that the tool is executable from anywhere by running:

    token-metadata-creator -h\n
    "},{"location":"Build/wallet/","title":"Wallet","text":"

    !> - An average pool operator may not require cardano-wallet at all. Please verify if it is required for your use as mentioned here.

    Ensure the Pre-Requisites are in place before you proceed.

    "},{"location":"Build/wallet/#build-instructions","title":"Build Instructions","text":"

    Follow instructions below for building the cardano-wallet binary:

    "},{"location":"Build/wallet/#clone-the-repository","title":"Clone the repository","text":"

    Execute the below to clone the cardano-wallet repository to $HOME/git folder on your system:

    cd ~/git\ngit clone https://github.com/cardano-foundation/cardano-wallet\ncd cardano-wallet\n
    "},{"location":"Build/wallet/#build-cardano-wallet","title":"Build Cardano Wallet","text":"

    You can use the instructions below to build the latest release of cardano-wallet.

    !> - Note that the latest release of cardano-wallet may not work with the latest release of cardano-node. Please check the compatibility of each cardano-wallet release yourself in the official docs, e.g. https://github.com/cardano-foundation/cardano-wallet/releases/latest.

    git fetch --tags --all\ngit pull\n# Replace tag against checkout if you do not want to build the latest released version\ngit checkout $(curl -s https://api.github.com/repos/cardano-foundation/cardano-wallet/releases/latest | jq -r .tag_name)\n$CNODE_HOME/scripts/cabal-build-all.sh\n

    The above would copy the binaries into ~/.local/bin folder.

    "},{"location":"Build/wallet/#start-the-wallet","title":"Start the wallet","text":"

    You can run the below to connect to a cardano-node instance that is expected to be already running and the wallet will start syncing.

    cardano-wallet serve /\n    --node-socket $CNODE_HOME/sockets/node.socket /\n    --mainnet / # if using the testnet flag you also need to specify the testnet shelley-genesis.json file\n--database $CNODE_HOME/priv/wallet\n

    "},{"location":"Build/wallet/#verify-the-wallet-is-handling-requests","title":"Verify the wallet is handling requests","text":"

    cardano-wallet network information\n
    Expected output should be similar to the following
    Ok.\n{\n\"network_tip\": {\n\"time\": \"2021-06-01T17:31:05Z\",\n\"epoch_number\": 269,\n\"absolute_slot_number\": 31002374,\n\"slot_number\": 157574\n},\n\"node_era\": \"mary\",\n\"node_tip\": {\n\"height\": {\n\"quantity\": 5795127,\n\"unit\": \"block\"\n},\n\"time\": \"2021-06-01T17:31:00Z\",\n\"epoch_number\": 269,\n\"absolute_slot_number\": 31002369,\n\"slot_number\": 157569\n},\n\"sync_progress\": {\n\"status\": \"ready\"\n},\n\"next_epoch\": {\n\"epoch_start_time\": \"2021-06-04T21:44:51Z\",\n\"epoch_number\": 270\n}\n}\n

    "},{"location":"Build/wallet/#creatingrestoring-wallet","title":"Creating/Restoring Wallet","text":"

    If you're creating a new wallet, you'd first want to generate a mnemonic for use (see below):

    cardano-wallet recovery-phrase generate\n# false brother typical saddle settle phrase foster sauce ask sunset firm gate service render burger\n
    You can use the above mnemonic to then restore a wallet as per below:
    cardano-wallet wallet create from-recovery-phrase MyWalletName\n

    "},{"location":"Build/wallet/#expected-output","title":"Expected output:","text":"
    Please enter a 15\u201324 word recovery phrase: false brother typical saddle settle phrase foster sauce ask sunset firm gate service render burger\n(Enter a blank line if you do not wish to use a second factor.)\nPlease enter a 9\u201312 word second factor:\nPlease enter a passphrase: **********\nEnter the passphrase a second time: **********\nOk.\n{\n    ...\n}\n
    "},{"location":"Mithril/mithril-overview/","title":"Mithril Overview","text":"

    Mithril Networks provide the ability to download and bootstrap cardano nodes via snapshots of the the Cardano blockchain. This is a great way to speed up the process of syncing a new node, especially for stake pool operators. The tools provided by Guild Operators are designed to facilitate the ease of use in setting up and managing the:

    The env file contains a new environment variable MITHRIL_DOWNLOAD that when enabled allows the cnode.sh script to automatically download the latest Mithril snapshot if the local db directory is empty. This is useful for new nodes that need to be bootstrapped with the latest snapshot to avoid synchronizing the entire blockchain from scratch. While also providing a high level of trust that the snapshot is valid since it is signed by multiple pool operators.

    "},{"location":"Mithril/mithril-overview/#architecture","title":"Architecture","text":"

    The architecture for Mithril Networks is described in detail at Mithril network architecture by CF/IOHK. However the architecture suggested and supported by the Guild Operators tools is not identical to the upstream documentation in that we provide a more simplified approach to the setup and management of the Mithril Network components and tools that allow setting up a Squid Mithril relays and an Nginx loadbalancer (aka sidecar) local to the Mithril signer. The Nginx sidecar provides the ability to loadbalance requests to multiple Squid based Mithril Relays running on each of the SPO's Cardano Relay nodes.

    "},{"location":"Mithril/mithril-overview/#single-relay-architecture","title":"Single Relay Architecture","text":"

    For SPO's who only have a single Cardano relay node, an Squid based Mithril relay can be run on the same node as the Cardano relay. This can be used by the Mithril signer to submit the snapshot signatures to the Mithril Aggregator.

    "},{"location":"Mithril/mithril-overview/#multi-relay-architecture","title":"Multi Relay Architecture","text":"

    For SPO's who have multiple Cardano relay nodes, a Nginx relay sidecar can be run on the Block Producer and load balance requests over mutliple Cardano relay nodes, each running its own Nginx Mithril relay to pass the signature along to the Mithril aggregator. This can be used to avoid a single point of failure in case a Relay server is offline for any reason. This provides high availability for the Mithril signer through multiple relays as long as the local Nginx Mithril relay is running on the same server as the Cardano Block Producer node.

    "},{"location":"Mithril/mithril-overview/#installation","title":"Installation","text":"

    The installation of the Mithril tools is automated via guild-deploy.sh. To participate in a Mithril network include the -s m flag which will install the Mithril Client and Mithril Signer release binaries to \"${HOME}\"/.local/bin.

    guild-deploy.sh -s m\n
    "},{"location":"Mithril/mithril-overview/#bootstrapping-a-node-using-mithril-client","title":"Bootstrapping a node using Mithril Client","text":"

    The Mithril client is used to download a snapshot of the Cardano blockchain from a Mithril Aggregator. The snapshot is then used to bootstrap a new Cardano node. The Mithril client can be used to download the latest snapshot, list all available snapshots, or show details of a specific snapshot.

    To bootstrap a Cardano node using the Mithril client, follow these steps:

    1. Setup the Cardano Node: Use the guild tools to setup the Cardano node, either by building the binaries or using pre-compiled binaries. Follow the instructions in the guild-operators documentation.

    2. Create the Mithril environment file: Run the script with the environment setup command. This will create a new mithril.env file with all the necessary environment variables for the Mithril client.

    ./mithril-client.sh environment setup\n
    1. Download the latest Mithril snapshot: Once the environment file is set up, you can download the latest Mithril snapshot by running the script with the snapshot download command. This snapshot contains the latest state of the Cardano blockchain db from a Mithril Aggregator.
    ./mithril-client.sh snapshot download\n
    "},{"location":"Mithril/mithril-overview/#participating-in-mithril-network","title":"Participating in Mithril Network","text":"

    The Mithril signer is used to participate in the creation of stake based signatures of snapshots. The Mithril signer can be used to sign a snapshots. The signed snapshot is then submitted to a Mithril Aggregator, via a Squid based Mithril Relay.

    The first step to participate in the Mithril network is to deploy your Squid based Mithril Relays. The Mithril relay is used to provide a private and highly available network for submitting the snapshots to a Mithril Aggregator.

    "},{"location":"Mithril/mithril-overview/#deploying-the-squid-mithril-relay","title":"Deploying the Squid Mithril Relay","text":"

    To deploy your Squid based Mithril Relays with your Cardano relay node, follow these steps:

    1. Deploy the Squid Mithril Relay: Run the mithril-relay.sh script:

    2. Use the -d flag to deploy the Squid Mithril Relay.

    3. Provide the IP address of your Block Producer when prompted to secure the Mithril Relay to only accept traffic from your Block Producer.
    4. Optionally provide the relays listening port when prompted to use a port other than the default 3132, or just press enter to use the default.
    5. Create the appropriate firewall rule to allow traffic from your Block Producer to the Mithril Relay.
    ./mithril-relay.sh -d\n\nInstalling squid proxy\nEnter the IP address of your Block Producer: 1.2.3.4\nEnter the relays listening port (press Enter to use default 3132):\nUsing port 3132 for relays listening port.\nCreate the appropriate firewall rule: sudo ufw allow from 1.2.3.4 to any port 3132 proto tcp\n
    1. Enable the Systemd Squid Mithril Relay service to start on boot.
       sudo systemctl enable --now squid\n
    1. Repeat the process for each of your Cardano relay nodes.
    "},{"location":"Mithril/mithril-overview/#deploying-the-mithril-signer","title":"Deploying the Mithril Signer","text":""},{"location":"Mithril/mithril-overview/#mithril-signer-with-single-relay","title":"Mithril Signer with Single Relay","text":"
    1. Deploy the Mithril Signer: Run the mithril-signer.sh script:

    2. Use the -u flag to update the mithril.env file with the Mithril Signer environment variables.

    3. Provide the IP address of your Mithril Relay when prompted.
    4. Optionally provide the relays listening port when prompted to use a port.

        ./mithril-signer.sh -u\n  Enter the IP address of the relay endpoint: 4.5.6.7\n  Enter the port of the relay endpoint (press Enter to use default 3132):\n  Using RELAY_ENDPOINT=4.5.6.7:3132 for the Mithril signer relay endpoint.\n
    5. Use the -d flag to deploy the Mithril Signer.

        ./mithril-signer.sh -d\n  Creating cnode-mithril-signer systemd service environment file..\n  Mithril signer service successfully deployed\n
    6. Enable the Systemd service to start the Mithril Signer on boot.

        sudo systemctl enable cnode-mithril-signer\n
    "},{"location":"Mithril/mithril-overview/#mithril-signer-with-multi-relay","title":"Mithril Signer with Multi Relay","text":"
    1. Deploy the Nginx sidecar loadbalancer: Run the mithril-relay.sh script:

    2. Use the -l flag to deploy the Nginx Mithril Relay.

    3. Provide the IP address of your Block Producer when prompted to secure the Mithril Relay to only accept traffic from your Block Producer.
    4. Optionally provide the relays listening port when prompted to use a port other than the default 3132, or just press enter to use the default.
    5. Create the appropriate firewall rule to allow traffic from your Block Producer to the Mithril Relay.

      ./mithril-relay.sh -d\n\nnInstalling nginx load balancer\nEnter the IP address of a relay: 4.5.6.7\nAre there more relays? (y/n) y\nEnter the IP address of a relay: 8.9.10.11\nAre there more relays? (y/n) n\nEnter the IP address of the load balancer (press Enter to use default 127.0.0.1):\nUsing IP address 127.0.0.1 for the load balancer configuration.\nEnter the relays listening port (press Enter to use default 3132):\nUsing port 3132 for relays listening port.\nStarting Mithril relay sidecar (nginx load balancer)\n
    6. Enable the Systemd Nginx Mithril Relay service to start on boot.

      sudo systemctl enable --now nginx\n
    7. Deploy the Mithril Signer: Run the mithril-signer.sh script:

    8. Use the -u flag to update the mithril.env file with the Mithril Signer environment variables.

    9. Provide the IP address of your Mithril Relay when prompted.
    10. Optionally provide the relays listening port when prompted to use a port.

          ./mithril-signer.sh -u\n    Enter the IP address of the relay endpoint: 127.0.0.1\n    Enter the port of the relay endpoint (press Enter to use default 3132):\n    Using RELAY_ENDPOINT=127.0.0.1:3132 for the Mithril signer relay endpoint.\n
    11. Use the -d flag to deploy the Mithril Signer.

          ./mithril-signer.sh -d\n    Creating cnode-mithril-signer systemd service environment file..\n    Mithril signer service successfully deployed\n
    12. Enable the Systemd service to start the Mithril Signer on boot.

          sudo systemctl enable cnode-mithril-signer\n
    "},{"location":"Scripts/blockperf/","title":"BlockPerf","text":"

    Reminder !!

    Ensure the Pre-Requisites are in place before you proceed.

    blockPerf.sh is a script to monitor the network propagation of new blocks as seen by the local cardano-node.

    "},{"location":"Scripts/blockperf/#block-propagation-traces","title":"Block propagation traces","text":"

    Although blockPerf can also run on the block producer, it makes the most sense to run it on the upstream relays. There it waits for each new block announced to the relay over the network by its remote peers.

    It looks for the delay times that result

    You can view this data locally as a console stream, or run it as a systemd service in background.

    BlockPerf also sends this data to the TopologyUpdater server, so that there is a possibility to compare this data (similar to sendtip to pooltool). As a contributing operator you get the possibility to see how your own relays compare to other nodes regarding receive quality, delay times and thus performance.

    There is no connection or constraint between the TopologyUpdater Relay subscription and the BlockPerf analysis. BlockPerf is even designed to work outside the cnTools suite.

    The results of these data are a good basis to make optimizations and to evaluate which changes were useful or might by required to improve the performance compared to other relay nodes.

    "},{"location":"Scripts/blockperf/#installation","title":"Installation","text":"

    The script is best run as a background process. This can be accomplished in many ways but the preferred method is to run it as a systemd service. A terminal multiplexer like tmux or screen could also be used but not covered here.

    "},{"location":"Scripts/blockperf/#run-as-service","title":"Run as service","text":"

    Use the deploy-as-systemd.sh script to create a systemd unit file. In this setup the script is started in \"service\" mode. Error/Warn level log output is handled by syslog and end up in the systems standard syslog file, normally /var/log/syslog. journalctl -f -u cnode-tu-blockperf.service can be used to check service output (follow mode).

    Outside the cnTools environment call blockPerf.sh -d to install it as a systemd service.

    "},{"location":"Scripts/blockperf/#console-view","title":"Console view","text":"

    If you run blockPerf local in the console (scripts/blockPerf.sh) , immediately after the appearance of a new block it shows where it came from, how many slots away from the previous block it was, and how many milliseconds the individual steps took.

    Block:.... 6860534\n Slot..... 52833850 (+59s)\n ......... 2022-02-09 09:49:01\n Header... 2022-02-09 09:49:02,780 (+1780 ms)\n Request.. 2022-02-09 09:49:02,780 (+0 ms)\n Block.... 2022-02-09 09:49:02,830 (+50 ms)\n Adopted.. 2022-02-09 09:49:02,900 (+70 ms)\n Size..... 79976 bytes\n delay.... 1.819971868 sec\n From..... 104.xxx.xxx.61:3001\n\nBlock:.... 6860535\n Slot..... 52833857 (+7s)\n ......... 2022-02-09 09:49:08\n Header... 2022-02-09 09:49:08,960 (+960 ms)\n Request.. 2022-02-09 09:49:08,970 (+10 ms)\n Block.... 2022-02-09 09:49:09,020 (+50 ms)\n Adopted.. 2022-02-09 09:49:09,090 (+70 ms)\n Size..... 64950 bytes\n delay.... 1.028341023 sec\n From..... 34.xxx.xxx.15:4001\n
    "},{"location":"Scripts/blockperf/#collaborative-web-view","title":"Collaborative web view","text":"

    A further aim of the blockPerf project is to use the data that individual nodes send to the central TopologyUpdater database to produce graphical visualisations and evaluations that provide the participating node operators with useful insights into their performance compared to all others.

    "},{"location":"Scripts/cncli/","title":"CNCLI","text":"

    Reminder !!

    Ensure the Pre-Requisites are in place before you proceed.

    cncli.sh is a script to download and deploy CNCLI created and maintained by Andrew Westberg. It's a community-based CLI tool written in RUST for low-level cardano-node communication. Usage is optional and no script is dependent on it. The main features include:

    "},{"location":"Scripts/cncli/#installation","title":"Installation","text":"

    cncli.sh script's main functions, sync, leaderlog, validate and PoolTool sendslots/sendtip are not meant to be run manually, but instead deployed as systemd services that run in the background to do the block scraping and validation automatically. Additional commands exist for manual execution to initiate the sqlite db, filling the blocklog DB with all blocks created by the pool known to the blockchain, migration of old cntoolsBlockCollector JSON blocklog, and re-validation of blocks and leaderlogs. See usage output below for a complete list of available commands.

    The script works in tandem with Log Monitor to provide faster adopted status but mainly to catch slots the node is leader for but are unable to create a block for. These are marked as invalid. Blocklog will however work fine without the logMonitor service and CNCLI is able to handle everything except catching invalid blocks.

    1. Run the latest version of guild-deploy.sh with guild-deploy.sh -s c to download and install RUST and CNCLI. IOG fork of libsodium required by CNCLI is automatically compiled by CNCLI build process. If a previous installation is found, RUST and CNCLI will be updated to the latest version.
    2. Run deploy-as-systemd.sh to deploy the systemd services that handle all the work in the background. Six systemd services in total are deployed whereof four are related to CNCLI. See above for the different purposes they serve.
    3. If you want to disable some of the deployed services, run sudo systemctl disable <service>

    4. cnode.service (main cardano-node launcher)

    5. cnode-cncli-sync.service
    6. cnode-cncli-leaderlog.service
    7. cnode-cncli-validate.service
    8. cnode-cncli-ptsendtip.service
    9. cnode-cncli-ptsendslots.service
    10. cnode-logmonitor.service (see Log Monitor)
    "},{"location":"Scripts/cncli/#configuration","title":"Configuration","text":"

    You can override the values in the script at the User Variables section shown below. POOL_ID, POOL_VRF_SKEY and POOL_VRF_VKEY should automatically be detected if POOL_NAME is set in the common env file and can be left commented. PT_API_KEY and POOL_TICKER need to be set in the script if PoolTool sendtip/sendslots are to be used before starting the services. For the rest of the commented values, if the defaults do not provide the right values, uncomment and make adjustments.

    #POOL_ID=\"\"                               # Automatically detected if POOL_NAME is set in env. Required for leaderlog calculation & pooltool sendtip, lower-case hex pool id\n#POOL_VRF_SKEY=\"\"                         # Automatically detected if POOL_NAME is set in env. Required for leaderlog calculation, path to pool's vrf.skey file\n#POOL_VRF_VKEY=\"\"                         # Automatically detected if POOL_NAME is set in env. Required for block validation, path to pool's vrf.vkey file\n#PT_API_KEY=\"\"                            # POOLTOOL sendtip: set API key, e.g \"a47811d3-0008-4ecd-9f3e-9c22bdb7c82d\"\n#POOL_TICKER=\"\"                           # POOLTOOL sendtip: set the pools ticker, e.g. \"TCKR\"\n#PT_HOST=\"127.0.0.1\"                      # POOLTOOL sendtip: connect to a remote node, preferably block producer (default localhost)\n#PT_PORT=\"${CNODE_PORT}\"                  # POOLTOOL sendtip: port of node to connect to (default is CNODE_PORT from the env file)\n#CNCLI_DIR=\"${CNODE_HOME}/guild-db/cncli\" # path to the directory for cncli sqlite db\n#SLEEP_RATE=60                            # CNCLI leaderlog/validate: time to wait until next check (in seconds)\n#CONFIRM_SLOT_CNT=600                     # CNCLI validate: require at least these many slots to have passed before validating\n#CONFIRM_BLOCK_CNT=15                     # CNCLI validate: require at least these many blocks on top of minted before validating\n#TIMEOUT_LEDGER_STATE=300                 # CNCLI leaderlog: timeout in seconds for ledger-state query\n#BATCH_AUTO_UPDATE=N                      # Set to Y to automatically update the script if a new version is available without user interaction\n
    "},{"location":"Scripts/cncli/#run","title":"Run","text":"

    Services are controlled by sudo systemctl <status|start|stop|restart> <service name> All services are configured as child services to cnode.service and as such, when an action is taken against this service it's replicated to all child services. E.g running sudo systemctl start cnode.service will also start all child services.

    Log output is handled by syslog and end up in the systems standard syslog file, normally /var/log/syslog. journalctl -f -u <service> can be used to check service output (follow mode). Other logging configurations are not covered here.

    Recommended workflow to get started with CNCLI blocklog.

    1. Install and deploy services according to Installation section.
    2. Set required user variables according to Configuration section.
    3. (optional) If a previous blocklog db exist created by cntoolsBlockCollector, run this command to migrate json storage to new SQLite DB:
    4. $CNODE_HOME/scripts/cncli.sh migrate <path> where is the location to the directory containing all blocks_.json files.
    5. Start deployed services with:
    6. sudo systemctl start cnode-cncli-sync.service (starts leaderlog, validate & ptsendslots automatically)
    7. sudo systemctl start cnode-logmonitor.service
    8. sudo systemctl start cnode-cncli-ptsendtip.service (optional but recommended)
    9. alternatively restart the main service that will trigger a start of all services with:
    10. sudo systemctl restart cnode.service
    11. Run init command to fill the db with all blocks made by your pool known to the blockchain
    12. $CNODE_HOME/scripts/cncli.sh init
    13. Enjoy full blocklog automation and visit View Blocklog section for instructions on how to show blocks from the blocklog DB.
    14. Usage: cncli.sh [operation <sub arg>]\nScript to run CNCLI, best launched through systemd deployed by 'deploy-as-systemd.sh'\n\nsync        Start CNCLI chainsync process that connects to cardano-node to sync blocks stored in SQLite DB (deployed as service)\nleaderlog   One-time leader schedule calculation for current epoch, then continuously monitors and calculates schedule for coming epochs, 1.5 days before epoch boundary on the mainnet (deployed as service)\n  force     Manually force leaderlog calculation and overwrite even if already done, exits after leaderlog is calculated\nvalidate    Continuously monitor and confirm that the blocks made actually was accepted and adopted by chain (deployed as service)\n  all       One-time re-validation of all blocks in blocklog db\n  epoch     One-time re-validation of blocks in blocklog db for the specified epoch \nptsendtip   Send node tip to PoolTool for network analysis and to show that your node is alive and well with a green badge (deployed as service)\nptsendslots Securely sends PoolTool the number of slots you have assigned for an epoch and validates the correctness of your past epochs (deployed as service)\ninit        One-time initialization adding all minted and confirmed blocks to blocklog\nmigrate     One-time migration from old blocklog (cntoolsBlockCollector) to new format (post cncli)\n  path      Path to the old cntoolsBlockCollector blocklog folder holding json files with blocks created\n
      "},{"location":"Scripts/cncli/#view-blocklog","title":"View Blocklog","text":"

      Best and easiest viewed in CNTools and gLiveView but the blocklog database is a SQLite DB so if you are comfortable with SQL, the sqlite3 command can be used to query the DB.

      Block status

      - Leader    : Scheduled to make block at this slot\n- Ideal     : Expected/Ideal number of blocks assigned based on active stake (sigma)\n- Luck      : Leader slots assigned vs ideal slots for this epoch\n- Adopted   : Block created successfully\n- Confirmed : Block created validated to be on-chain with the certainty set in `cncli.sh` for `CONFIRM_BLOCK_CNT`\n- Missed    : Scheduled at slot but no record of it in CNCLI DB and no other pool has made a block for this slot\n- Ghosted   : Block created but marked as orphaned and no other pool has made a valid block for this slot -> height battle or block propagation issue\n- Stolen    : Another pool has a valid block registered on-chain for the same slot\n- Invalid   : Pool failed to create block, base64 encoded error message can be decoded with `echo <base64 hash> | base64 -d | jq -r`\n
      CNTools

      Open CNTools and select [b] Blocks to open the block viewer. Either select Epoch and enter the epoch you want to see a detailed view for or choose Summary to display blocks for last x epochs.

      If the node was elected to create blocks in the selected epoch it could look something like this:

      Summary
       >> BLOCKS\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\nCurrent epoch: 96\n\n+--------+---------------------------+----------------------+--------------------------------------+\n| Epoch  | Leader | Ideal | Luck     | Adopted | Confirmed  | Missed | Ghosted | Stolen | Invalid  |\n+--------+---------------------------+----------------------+--------------------------------------+\n| 96     | 34     | 31.66 | 107.39%  | 18      | 18         | 0      | 0       | 0      | 0        |\n| 95     | 32     | 30.57 | 104.68%  | 32      | 32         | 0      | 0       | 0      | 0        |\n+--------+---------------------------+----------------------+--------------------------------------+\n\n[h] Home | [b] Block View | [i] Info | [*] Refresh\n
      Epoch
       >> BLOCKS\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\nCurrent epoch: 96\n\n+---------------------------+----------------------+--------------------------------------+\n| Leader | Ideal | Luck     | Adopted | Confirmed  | Missed | Ghosted | Stolen | Invalid  |\n+---------------------------+----------------------+--------------------------------------+\n| 34     | 31.66 | 107.39%  | 18      | 18         | 0      | 0       | 0      | 0        |\n+---------------------------+----------------------+--------------------------------------+\n\n+-----+------------+----------+---------------------+--------------------------+-------+-------------------------------------------------------------------+\n| #   | Status     | Block    | Slot | SlotInEpoch  | Scheduled At             | Size  | Hash                                                              |\n+-----+------------+----------+---------------------+--------------------------+-------+-------------------------------------------------------------------+\n| 1   | confirmed  | 2043444  | 11142827 | 40427    | 2020-11-16 08:34:03 CET  | 3     | ec216d3fb01e4a3cc3e85305145a31875d9561fa3bbcc6d0ee8297236dbb4115  |\n| 2   | confirmed  | 2044321  | 11165082 | 62682    | 2020-11-16 14:44:58 CET  | 3     | b75c33a5bbe49a74e4b4cc5df4474398bfb10ed39531fc65ec2acc51f89ddce5  |\n| 3   | confirmed  | 2044397  | 11166970 | 64570    | 2020-11-16 15:16:26 CET  | 3     | c1ea37fd72543779b6dab46e3e29e0e422784b5fd6188f828ace9eabcc87088f  |\n| 4   | confirmed  | 2044879  | 11178909 | 76509    | 2020-11-16 18:35:25 CET  | 3     | 35a116cec80c5dc295415e4fc8e6435c562b14a5d6833027006c988706c60307  |\n| 5   | confirmed  | 2046965  | 11232557 | 130157   | 2020-11-17 09:29:33 CET  | 3     | d566e5a1f6a3d78811acab4ae3bdcee6aa42717364f9afecd6cac5093559f466  |\n| 6   | confirmed  | 2047101  | 11235675 | 133275   | 2020-11-17 10:21:31 CET  | 3     | 3a638e01f70ea1c4a660fe4e6333272e6c61b11cf84dc8a5a107b414d1e057eb  |\n| 7   | confirmed  | 2047221  | 11238453 | 136053   | 2020-11-17 11:07:49 CET  | 3     | 843336f132961b94276603707751cdb9a1c2528b97100819ce47bc317af0a2d6  |\n| 8   | confirmed  | 2048692  | 11273507 | 171107   | 2020-11-17 20:52:03 CET  | 3     | 9b3eb79fe07e8ebae163870c21ba30460e689b23768d2e5f8e7118c572c4df36  |\n| 9   | confirmed  | 2049058  | 11282619 | 180219   | 2020-11-17 23:23:55 CET  | 3     | 643396ea9a1a2b6c66bb83bdc589fa19c8ae728d1f1181aab82e8dfe508d430a  |\n| 10  | confirmed  | 2049321  | 11289237 | 186837   | 2020-11-18 01:14:13 CET  | 3     | d93d305a955f40b2298247d44e4bc27fe9e3d1486ef3ef3e73b235b25247ccd7  |\n| 11  | confirmed  | 2049747  | 11299205 | 196805   | 2020-11-18 04:00:21 CET  | 3     | 19a43deb5014b14760c3e564b41027c5ee50e0a252abddbfcac90c8f56dc0245  |\n| 12  | confirmed  | 2050415  | 11316075 | 213675   | 2020-11-18 08:41:31 CET  | 3     | dd2cb47653f3bfb3ccc8ffe76906e07d96f1384bafd57a872ddbab3b352403e3  |\n| 13  | confirmed  | 2050505  | 11318274 | 215874   | 2020-11-18 09:18:10 CET  | 3     | deb834bc42360f8d39eefc5856bb6d7cabb6b04170c842dcbe7e9efdf9dbd2e1  |\n| 14  | confirmed  | 2050613  | 11320754 | 218354   | 2020-11-18 09:59:30 CET  | 3     | bf094f6fde8e8c29f568a253201e4b92b078e9a1cad60706285e236a91ec95ff  |\n| 15  | confirmed  | 2050807  | 11325239 | 222839   | 2020-11-18 11:14:15 CET  | 3     | 21f904346ba0fd2bb41afaae7d35977cb929d1d9727887f541782576fc6a62c9  |\n| 16  | confirmed  | 2050997  | 11330062 | 227662   | 2020-11-18 12:34:38 CET  | 3     | 109799d686fe3cad13b156a2d446a544fde2bf5d0e8f157f688f1dc30f35e912  |\n| 17  | confirmed  | 2051286  | 11336791 | 234391   | 2020-11-18 14:26:47 CET  | 3     | bb1beca7a1d849059110e3d7dc49ecf07b47970af2294fe73555ddfefb9561a8  |\n| 18  | confirmed  | 2051734  | 11348498 | 246098   | 2020-11-18 17:41:54 CET  | 3     | 87940b53c2342999c1ba4e185038cda3d8382891a16878a865f5114f540683de  |\n| 19  | leader     | -        | 11382001 | 279601   | 2020-11-19 03:00:17 CET  | -     | -                                                                 |\n| 20  | leader     | -        | 11419959 | 317559   | 2020-11-19 13:32:55 CET  | -     | -                                                                 |\n| 21  | leader     | -        | 11433174 | 330774   | 2020-11-19 17:13:10 CET  | -     | -                                                                 |\n| 22  | leader     | -        | 11434241 | 331841   | 2020-11-19 17:30:57 CET  | -     | -                                                                 |\n| 23  | leader     | -        | 11435289 | 332889   | 2020-11-19 17:48:25 CET  | -     | -                                                                 |\n| 24  | leader     | -        | 11440314 | 337914   | 2020-11-19 19:12:10 CET  | -     | -                                                                 |\n| 25  | leader     | -        | 11442361 | 339961   | 2020-11-19 19:46:17 CET  | -     | -                                                                 |\n| 26  | leader     | -        | 11443861 | 341461   | 2020-11-19 20:11:17 CET  | -     | -                                                                 |\n| 27  | leader     | -        | 11446997 | 344597   | 2020-11-19 21:03:33 CET  | -     | -                                                                 |\n| 28  | leader     | -        | 11453110 | 350710   | 2020-11-19 22:45:26 CET  | -     | -                                                                 |\n| 29  | leader     | -        | 11455323 | 352923   | 2020-11-19 23:22:19 CET  | -     | -                                                                 |\n| 30  | leader     | -        | 11505987 | 403587   | 2020-11-20 13:26:43 CET  | -     | -                                                                 |\n| 31  | leader     | -        | 11514983 | 412583   | 2020-11-20 15:56:39 CET  | -     | -                                                                 |\n| 32  | leader     | -        | 11516010 | 413610   | 2020-11-20 16:13:46 CET  | -     | -                                                                 |\n| 33  | leader     | -        | 11518958 | 416558   | 2020-11-20 17:02:54 CET  | -     | -                                                                 |\n| 34  | leader     | -        | 11533254 | 430854   | 2020-11-20 21:01:10 CET  | -     | -                                                                 |\n+-----+------------+----------+---------------------+--------------------------+-------+-------------------------------------------------------------------+\n
      gLiveView

      Currently shows a block summary for current epoch. For full block details use CNTools for now. Invalid, missing, ghosted and stolen blocks only shown in case of a non-zero value.

      \u2502--------------------------------------------------------------\u2502\n\u2502 BLOCKS   Leader  | Ideal  | Luck    | Adopted | Confirmed    \u2502\n\u2502          24        27.42    87.53%    1         1            \u2502\n\u2502          08:07:57 until leader XXXXXXXXX.....................\u2502\n\u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518\n
      "},{"location":"Scripts/cntools-changelog/","title":"Changelog","text":"

      All notable changes to this tool will be documented in this file.

      Whenever you're updating between versions where format/hash of keys have changed , or you're changing networks - it is recommended to Backup your Wallet and Pool folders before you proceed with launching cntools on a fresh network.

      The format is based on Keep a Changelog, and this adheres to Semantic Versioning.

      "},{"location":"Scripts/cntools-changelog/#1210-2024-01-19","title":"[12.1.0] - 2024-01-19","text":""},{"location":"Scripts/cntools-changelog/#changed","title":"Changed","text":""},{"location":"Scripts/cntools-changelog/#added","title":"Added","text":""},{"location":"Scripts/cntools-changelog/#1202-2024-02-11","title":"[12.0.2] - 2024-02-11","text":""},{"location":"Scripts/cntools-changelog/#fixed","title":"Fixed","text":""},{"location":"Scripts/cntools-changelog/#1201-2024-01-26","title":"[12.0.1] - 2024-01-26","text":""},{"location":"Scripts/cntools-changelog/#fixed_1","title":"Fixed","text":""},{"location":"Scripts/cntools-changelog/#1200-2024-01-19","title":"[12.0.0] - 2024-01-19","text":""},{"location":"Scripts/cntools-changelog/#changed_1","title":"Changed","text":""},{"location":"Scripts/cntools-changelog/#fixed_2","title":"Fixed","text":""},{"location":"Scripts/cntools-changelog/#1102-2023-10-30","title":"[11.0.2] - 2023-10-30","text":""},{"location":"Scripts/cntools-changelog/#fixed_3","title":"Fixed","text":""},{"location":"Scripts/cntools-changelog/#1101-2023-10-25","title":"[11.0.1] - 2023-10-25","text":""},{"location":"Scripts/cntools-changelog/#fixed_4","title":"Fixed","text":""},{"location":"Scripts/cntools-changelog/#1100-2023-07-05","title":"[11.0.0] - 2023-07-05","text":""},{"location":"Scripts/cntools-changelog/#changed_2","title":"Changed","text":""},{"location":"Scripts/cntools-changelog/#1040-2023-06-19","title":"[10.4.0] - 2023-06-19","text":""},{"location":"Scripts/cntools-changelog/#added_1","title":"Added","text":""},{"location":"Scripts/cntools-changelog/#1031-2023-06-03","title":"[10.3.1] - 2023-06-03","text":""},{"location":"Scripts/cntools-changelog/#fixed_5","title":"Fixed","text":""},{"location":"Scripts/cntools-changelog/#1030-2023-05-18","title":"[10.3.0] - 2023-05-18","text":""},{"location":"Scripts/cntools-changelog/#added_2","title":"Added","text":""},{"location":"Scripts/cntools-changelog/#1023-2023-04-28","title":"[10.2.3] - 2023-04-28","text":""},{"location":"Scripts/cntools-changelog/#fixed_6","title":"Fixed","text":""},{"location":"Scripts/cntools-changelog/#1022-2023-04-24","title":"[10.2.2] - 2023-04-24","text":""},{"location":"Scripts/cntools-changelog/#fixed_7","title":"Fixed","text":""},{"location":"Scripts/cntools-changelog/#1021-2023-04-04","title":"[10.2.1] - 2023-04-04","text":""},{"location":"Scripts/cntools-changelog/#fixed_8","title":"Fixed","text":""},{"location":"Scripts/cntools-changelog/#1020-2023-03-13","title":"[10.2.0] - 2023-03-13","text":""},{"location":"Scripts/cntools-changelog/#fixed_9","title":"Fixed","text":""},{"location":"Scripts/cntools-changelog/#changed_3","title":"Changed","text":""},{"location":"Scripts/cntools-changelog/#1011-2023-02-07","title":"[10.1.1] - 2023-02-07","text":""},{"location":"Scripts/cntools-changelog/#fixed_10","title":"Fixed","text":""},{"location":"Scripts/cntools-changelog/#1010-2023-01-17","title":"[10.1.0] - 2023-01-17","text":""},{"location":"Scripts/cntools-changelog/#added_3","title":"Added","text":""},{"location":"Scripts/cntools-changelog/#changed_4","title":"Changed","text":""},{"location":"Scripts/cntools-changelog/#fixed_11","title":"Fixed","text":""},{"location":"Scripts/cntools-changelog/#1005-2022-11-07","title":"[10.0.5] - 2022-11-07","text":""},{"location":"Scripts/cntools-changelog/#changed_5","title":"Changed","text":""},{"location":"Scripts/cntools-changelog/#1004-2022-08-26","title":"[10.0.4] - 2022-08-26","text":""},{"location":"Scripts/cntools-changelog/#changed_6","title":"Changed","text":""},{"location":"Scripts/cntools-changelog/#fixed_12","title":"Fixed","text":""},{"location":"Scripts/cntools-changelog/#1003-2022-08-16","title":"[10.0.3] - 2022-08-16","text":""},{"location":"Scripts/cntools-changelog/#fixed_13","title":"Fixed","text":""},{"location":"Scripts/cntools-changelog/#1002-2022-08-13","title":"[10.0.2] - 2022-08-13","text":""},{"location":"Scripts/cntools-changelog/#fixed_14","title":"Fixed","text":""},{"location":"Scripts/cntools-changelog/#1001-2022-07-14","title":"[10.0.1] - 2022-07-14","text":""},{"location":"Scripts/cntools-changelog/#changed_7","title":"Changed","text":""},{"location":"Scripts/cntools-changelog/#fixed_15","title":"Fixed","text":""},{"location":"Scripts/cntools-changelog/#1000-2022-06-28","title":"[10.0.0] - 2022-06-28","text":""},{"location":"Scripts/cntools-changelog/#added_4","title":"Added","text":""},{"location":"Scripts/cntools-changelog/#changed_8","title":"Changed","text":""},{"location":"Scripts/cntools-changelog/#910-2022-05-11","title":"[9.1.0] - 2022-05-11","text":""},{"location":"Scripts/cntools-changelog/#changed_9","title":"Changed","text":""},{"location":"Scripts/cntools-changelog/#9010-2022-05-03","title":"[9.0.10] - 2022-05-03","text":""},{"location":"Scripts/cntools-changelog/#fixed_16","title":"Fixed","text":""},{"location":"Scripts/cntools-changelog/#909-2022-03-14","title":"[9.0.9] - 2022-03-14","text":""},{"location":"Scripts/cntools-changelog/#changed_10","title":"Changed","text":""},{"location":"Scripts/cntools-changelog/#908-2022-03-07","title":"[9.0.8] - 2022-03-07","text":""},{"location":"Scripts/cntools-changelog/#changed_11","title":"Changed","text":""},{"location":"Scripts/cntools-changelog/#907-2022-03-02","title":"[9.0.7] - 2022-03-02","text":""},{"location":"Scripts/cntools-changelog/#fixed_17","title":"Fixed","text":""},{"location":"Scripts/cntools-changelog/#906-2022-02-20","title":"[9.0.6] - 2022-02-20","text":""},{"location":"Scripts/cntools-changelog/#fixed_18","title":"Fixed","text":""},{"location":"Scripts/cntools-changelog/#905-2022-02-16","title":"[9.0.5] - 2022-02-16","text":""},{"location":"Scripts/cntools-changelog/#fixed_19","title":"Fixed","text":""},{"location":"Scripts/cntools-changelog/#904-2022-02-14","title":"[9.0.4] - 2022-02-14","text":""},{"location":"Scripts/cntools-changelog/#fixed_20","title":"Fixed","text":""},{"location":"Scripts/cntools-changelog/#903-2022-02-01","title":"[9.0.3] - 2022-02-01","text":""},{"location":"Scripts/cntools-changelog/#added_5","title":"Added","text":""},{"location":"Scripts/cntools-changelog/#902-2022-01-22","title":"[9.0.2] - 2022-01-22","text":""},{"location":"Scripts/cntools-changelog/#changed_12","title":"Changed","text":""},{"location":"Scripts/cntools-changelog/#901-2022-01-17","title":"[9.0.1] - 2022-01-17","text":""},{"location":"Scripts/cntools-changelog/#changed_13","title":"Changed","text":""},{"location":"Scripts/cntools-changelog/#900-2022-01-10","title":"[9.0.0] - 2022-01-10","text":""},{"location":"Scripts/cntools-changelog/#changed_14","title":"Changed","text":""},{"location":"Scripts/cntools-changelog/#882-2021-12-28","title":"[8.8.2] - 2021-12-28","text":""},{"location":"Scripts/cntools-changelog/#fixed_21","title":"Fixed","text":""},{"location":"Scripts/cntools-changelog/#881-2021-12-18","title":"[8.8.1] - 2021-12-18","text":""},{"location":"Scripts/cntools-changelog/#fixed_22","title":"Fixed","text":""},{"location":"Scripts/cntools-changelog/#880-2021-12-15","title":"[8.8.0] - 2021-12-15","text":""},{"location":"Scripts/cntools-changelog/#fixed_23","title":"Fixed","text":""},{"location":"Scripts/cntools-changelog/#873-2021-11-30","title":"[8.7.3] - 2021-11-30","text":""},{"location":"Scripts/cntools-changelog/#fixed_24","title":"Fixed","text":""},{"location":"Scripts/cntools-changelog/#872-2021-11-08","title":"[8.7.2] - 2021-11-08","text":""},{"location":"Scripts/cntools-changelog/#changed_15","title":"Changed","text":""},{"location":"Scripts/cntools-changelog/#871-2021-11-04","title":"[8.7.1] - 2021-11-04","text":""},{"location":"Scripts/cntools-changelog/#fixed_25","title":"Fixed","text":""},{"location":"Scripts/cntools-changelog/#870-2021-10-05","title":"[8.7.0] - 2021-10-05","text":""},{"location":"Scripts/cntools-changelog/#changed_16","title":"Changed","text":""},{"location":"Scripts/cntools-changelog/#866-2021-09-26","title":"[8.6.6] - 2021-09-26","text":""},{"location":"Scripts/cntools-changelog/#fixed_26","title":"Fixed","text":""},{"location":"Scripts/cntools-changelog/#865-2021-09-15","title":"[8.6.5] - 2021-09-15","text":""},{"location":"Scripts/cntools-changelog/#fixed_27","title":"Fixed","text":""},{"location":"Scripts/cntools-changelog/#864-2021-09-14","title":"[8.6.4] - 2021-09-14","text":""},{"location":"Scripts/cntools-changelog/#fixed_28","title":"Fixed","text":""},{"location":"Scripts/cntools-changelog/#863-2021-08-31","title":"[8.6.3] - 2021-08-31","text":""},{"location":"Scripts/cntools-changelog/#fixed_29","title":"Fixed","text":""},{"location":"Scripts/cntools-changelog/#862-2021-08-30","title":"[8.6.2] - 2021-08-30","text":""},{"location":"Scripts/cntools-changelog/#fixed_30","title":"Fixed","text":""},{"location":"Scripts/cntools-changelog/#861-2021-08-27","title":"[8.6.1] - 2021-08-27","text":""},{"location":"Scripts/cntools-changelog/#changed_17","title":"Changed","text":""},{"location":"Scripts/cntools-changelog/#860-2021-08-27","title":"[8.6.0] - 2021-08-27","text":""},{"location":"Scripts/cntools-changelog/#changed_18","title":"Changed","text":""},{"location":"Scripts/cntools-changelog/#8415-2021-07-15","title":"[8.4.15] - 2021-07-15","text":""},{"location":"Scripts/cntools-changelog/#changed_19","title":"Changed","text":""},{"location":"Scripts/cntools-changelog/#8414-2021-07-14","title":"[8.4.14] - 2021-07-14","text":""},{"location":"Scripts/cntools-changelog/#fixed_31","title":"Fixed","text":""},{"location":"Scripts/cntools-changelog/#8413-2021-07-08","title":"[8.4.13] - 2021-07-08","text":""},{"location":"Scripts/cntools-changelog/#changed_20","title":"Changed","text":""},{"location":"Scripts/cntools-changelog/#8412-2021-06-28","title":"[8.4.12] - 2021-06-28","text":""},{"location":"Scripts/cntools-changelog/#fixed_32","title":"Fixed","text":""},{"location":"Scripts/cntools-changelog/#8411-2021-06-25","title":"[8.4.11] - 2021-06-25","text":""},{"location":"Scripts/cntools-changelog/#changed_21","title":"Changed","text":""},{"location":"Scripts/cntools-changelog/#8410-2021-06-15","title":"[8.4.10] - 2021-06-15","text":""},{"location":"Scripts/cntools-changelog/#fixed_33","title":"Fixed","text":""},{"location":"Scripts/cntools-changelog/#849-2021-06-15","title":"[8.4.9] - 2021-06-15","text":""},{"location":"Scripts/cntools-changelog/#changed_22","title":"Changed","text":""},{"location":"Scripts/cntools-changelog/#846-2021-06-04","title":"[8.4.6] - 2021-06-04","text":""},{"location":"Scripts/cntools-changelog/#fixed_34","title":"Fixed","text":""},{"location":"Scripts/cntools-changelog/#845-2021-05-31","title":"[8.4.5] - 2021-05-31","text":""},{"location":"Scripts/cntools-changelog/#fixed_35","title":"Fixed","text":""},{"location":"Scripts/cntools-changelog/#844-2021-05-19","title":"[8.4.4] - 2021-05-19","text":""},{"location":"Scripts/cntools-changelog/#fixed_36","title":"Fixed","text":""},{"location":"Scripts/cntools-changelog/#843-2021-05-17","title":"[8.4.3] - 2021-05-17","text":""},{"location":"Scripts/cntools-changelog/#fixed_37","title":"Fixed","text":""},{"location":"Scripts/cntools-changelog/#842-2021-05-16","title":"[8.4.2] - 2021-05-16","text":""},{"location":"Scripts/cntools-changelog/#fixed_38","title":"Fixed","text":""},{"location":"Scripts/cntools-changelog/#841-2021-05-16","title":"[8.4.1] - 2021-05-16","text":""},{"location":"Scripts/cntools-changelog/#changed_23","title":"Changed","text":""},{"location":"Scripts/cntools-changelog/#840-2021-05-16","title":"[8.4.0] - 2021-05-16","text":""},{"location":"Scripts/cntools-changelog/#added_6","title":"Added","text":""},{"location":"Scripts/cntools-changelog/#830-2021-05-15","title":"[8.3.0] - 2021-05-15","text":""},{"location":"Scripts/cntools-changelog/#added_7","title":"Added","text":""},{"location":"Scripts/cntools-changelog/#changed_24","title":"Changed","text":""},{"location":"Scripts/cntools-changelog/#fixed_39","title":"Fixed","text":""},{"location":"Scripts/cntools-changelog/#822-2021-05-02","title":"[8.2.2] - 2021-05-02","text":""},{"location":"Scripts/cntools-changelog/#fixed_40","title":"Fixed","text":""},{"location":"Scripts/cntools-changelog/#821-2021-04-26","title":"[8.2.1] - 2021-04-26","text":""},{"location":"Scripts/cntools-changelog/#changed_25","title":"Changed","text":""},{"location":"Scripts/cntools-changelog/#820-2021-04-18","title":"[8.2.0] - 2021-04-18","text":""},{"location":"Scripts/cntools-changelog/#added_8","title":"Added","text":""},{"location":"Scripts/cntools-changelog/#changed_26","title":"Changed","text":""},{"location":"Scripts/cntools-changelog/#816-2021-04-14","title":"[8.1.6] - 2021-04-14","text":""},{"location":"Scripts/cntools-changelog/#changed_27","title":"Changed","text":""},{"location":"Scripts/cntools-changelog/#fixed_41","title":"Fixed","text":""},{"location":"Scripts/cntools-changelog/#815-2021-04-09","title":"[8.1.5] - 2021-04-09","text":""},{"location":"Scripts/cntools-changelog/#fixed_42","title":"Fixed","text":""},{"location":"Scripts/cntools-changelog/#814-2021-04-05","title":"[8.1.4] - 2021-04-05","text":""},{"location":"Scripts/cntools-changelog/#changed_28","title":"Changed","text":""},{"location":"Scripts/cntools-changelog/#813-2021-04-01","title":"[8.1.3] - 2021-04-01","text":""},{"location":"Scripts/cntools-changelog/#fixed_43","title":"Fixed","text":""},{"location":"Scripts/cntools-changelog/#812-2021-03-31","title":"[8.1.2] - 2021-03-31","text":""},{"location":"Scripts/cntools-changelog/#changed_29","title":"Changed","text":""},{"location":"Scripts/cntools-changelog/#811-2021-03-30","title":"[8.1.1] - 2021-03-30","text":""},{"location":"Scripts/cntools-changelog/#fixed_44","title":"Fixed","text":""},{"location":"Scripts/cntools-changelog/#810-2021-03-26","title":"[8.1.0] - 2021-03-26","text":""},{"location":"Scripts/cntools-changelog/#added_9","title":"Added","text":""},{"location":"Scripts/cntools-changelog/#changed_30","title":"Changed","text":""},{"location":"Scripts/cntools-changelog/#fixed_45","title":"Fixed","text":""},{"location":"Scripts/cntools-changelog/#802-2021-03-15","title":"[8.0.2] - 2021-03-15","text":""},{"location":"Scripts/cntools-changelog/#fixed_46","title":"Fixed","text":""},{"location":"Scripts/cntools-changelog/#801-2021-03-05","title":"[8.0.1] - 2021-03-05","text":""},{"location":"Scripts/cntools-changelog/#fixed_47","title":"Fixed","text":""},{"location":"Scripts/cntools-changelog/#800-2021-02-28","title":"[8.0.0] - 2021-02-28","text":""},{"location":"Scripts/cntools-changelog/#added_10","title":"Added","text":""},{"location":"Scripts/cntools-changelog/#changed_31","title":"Changed","text":""},{"location":"Scripts/cntools-changelog/#fixed_48","title":"Fixed","text":""},{"location":"Scripts/cntools-changelog/#716-2021-02-10","title":"[7.1.6] - 2021-02-10","text":""},{"location":"Scripts/cntools-changelog/#715-2021-02-03","title":"[7.1.5] - 2021-02-03","text":""},{"location":"Scripts/cntools-changelog/#changed_32","title":"Changed","text":""},{"location":"Scripts/cntools-changelog/#fixed_49","title":"Fixed","text":""},{"location":"Scripts/cntools-changelog/#714-2021-02-01","title":"[7.1.4] - 2021-02-01","text":""},{"location":"Scripts/cntools-changelog/#fixed_50","title":"Fixed","text":""},{"location":"Scripts/cntools-changelog/#713-2021-01-30","title":"[7.1.3] - 2021-01-30","text":""},{"location":"Scripts/cntools-changelog/#fixed_51","title":"Fixed","text":""},{"location":"Scripts/cntools-changelog/#711-2021-01-29","title":"[7.1.1] - 2021-01-29","text":""},{"location":"Scripts/cntools-changelog/#changed_33","title":"Changed","text":""},{"location":"Scripts/cntools-changelog/#710-2021-01-29","title":"[7.1.0] - 2021-01-29","text":""},{"location":"Scripts/cntools-changelog/#changed_34","title":"Changed","text":""},{"location":"Scripts/cntools-changelog/#702-2021-01-17","title":"[7.0.2] - 2021-01-17","text":""},{"location":"Scripts/cntools-changelog/#changed_35","title":"Changed","text":""},{"location":"Scripts/cntools-changelog/#fixed_52","title":"Fixed","text":""},{"location":"Scripts/cntools-changelog/#701-2021-01-13","title":"[7.0.1] - 2021-01-13","text":""},{"location":"Scripts/cntools-changelog/#changed_36","title":"Changed","text":""},{"location":"Scripts/cntools-changelog/#700-2021-01-11","title":"[7.0.0] - 2021-01-11","text":"

      Though mostly unchanged in the user interface, this is a major update with most of the code re-written/touched in the back-end. Only the most noticeable changes added to changelog.

      "},{"location":"Scripts/cntools-changelog/#added_11","title":"Added","text":""},{"location":"Scripts/cntools-changelog/#changed_37","title":"Changed","text":""},{"location":"Scripts/cntools-changelog/#fixed_53","title":"Fixed","text":""},{"location":"Scripts/cntools-changelog/#631-2020-12-14","title":"[6.3.1] - 2020-12-14","text":""},{"location":"Scripts/cntools-changelog/#fixed_54","title":"Fixed","text":""},{"location":"Scripts/cntools-changelog/#630-2020-12-03","title":"[6.3.0] - 2020-12-03","text":""},{"location":"Scripts/cntools-changelog/#changed_38","title":"Changed","text":""},{"location":"Scripts/cntools-changelog/#fixed_55","title":"Fixed","text":""},{"location":"Scripts/cntools-changelog/#621-2020-11-28","title":"[6.2.1] - 2020-11-28","text":""},{"location":"Scripts/cntools-changelog/#changed_39","title":"Changed","text":""},{"location":"Scripts/cntools-changelog/#620-alpha-branch","title":"[6.2.0] - (alpha branch)","text":""},{"location":"Scripts/cntools-changelog/#added_12","title":"Added","text":""},{"location":"Scripts/cntools-changelog/#changed_40","title":"Changed","text":""},{"location":"Scripts/cntools-changelog/#fixed_56","title":"Fixed","text":""},{"location":"Scripts/cntools-changelog/#610-2020-10-22","title":"[6.1.0] - 2020-10-22","text":""},{"location":"Scripts/cntools-changelog/#added_13","title":"Added","text":""},{"location":"Scripts/cntools-changelog/#changed_41","title":"Changed","text":""},{"location":"Scripts/cntools-changelog/#fixed_57","title":"Fixed","text":""},{"location":"Scripts/cntools-changelog/#603-2020-10-16","title":"[6.0.3] - 2020-10-16","text":""},{"location":"Scripts/cntools-changelog/#fixed_58","title":"Fixed","text":""},{"location":"Scripts/cntools-changelog/#602-2020-10-16","title":"[6.0.2] - 2020-10-16","text":""},{"location":"Scripts/cntools-changelog/#fixed_59","title":"Fixed","text":""},{"location":"Scripts/cntools-changelog/#601-2020-10-16","title":"[6.0.1] - 2020-10-16","text":""},{"location":"Scripts/cntools-changelog/#fixed_60","title":"Fixed","text":""},{"location":"Scripts/cntools-changelog/#600-2020-10-15","title":"[6.0.0] - 2020-10-15","text":"

      This is a major release with a lot of changes. It is highly recommended that you familiarise yourself with the usage for Hybrid or Online v/s Offline mode on a testnet environment before doing it on production. Please visit https://cardano-community.github.io/guild-operators/upgrade for details.

      "},{"location":"Scripts/cntools-changelog/#added_14","title":"Added","text":""},{"location":"Scripts/cntools-changelog/#changed_42","title":"Changed","text":""},{"location":"Scripts/cntools-changelog/#removed","title":"Removed","text":""},{"location":"Scripts/cntools-changelog/#fixed_61","title":"Fixed","text":""},{"location":"Scripts/cntools-changelog/#500-2020-07-20","title":"[5.0.0] - 2020-07-20","text":""},{"location":"Scripts/cntools-changelog/#added_15","title":"Added","text":""},{"location":"Scripts/cntools-changelog/#changed_43","title":"Changed","text":""},{"location":"Scripts/cntools-changelog/#removed_1","title":"Removed","text":""},{"location":"Scripts/cntools-changelog/#fixed_62","title":"Fixed","text":""},{"location":"Scripts/cntools-changelog/#400-2020-07-13","title":"[4.0.0] - 2020-07-13","text":""},{"location":"Scripts/cntools-changelog/#added_16","title":"Added","text":""},{"location":"Scripts/cntools-changelog/#changed_44","title":"Changed","text":""},{"location":"Scripts/cntools-changelog/#300-2020-07-12","title":"[3.0.0] - 2020-07-12","text":""},{"location":"Scripts/cntools-changelog/#added_17","title":"Added","text":""},{"location":"Scripts/cntools-changelog/#changed_45","title":"Changed","text":""},{"location":"Scripts/cntools-changelog/#fixed_63","title":"Fixed","text":""},{"location":"Scripts/cntools-changelog/#200-2020-07-12","title":"[2.0.0] - 2020-07-12","text":""},{"location":"Scripts/cntools-changelog/#added_18","title":"Added","text":""},{"location":"Scripts/cntools-changelog/#changed_46","title":"Changed","text":""},{"location":"Scripts/cntools-changelog/#removed_2","title":"Removed","text":""},{"location":"Scripts/cntools-changelog/#fixed_64","title":"Fixed","text":""},{"location":"Scripts/cntools-changelog/#100-2020-07-07","title":"[1.0.0] - 2020-07-07","text":""},{"location":"Scripts/cntools-common/","title":"Common Tasks","text":"

      Important

      Familiarize yourself with the Online workflow of creating wallets and pools on the Preview/Preprod/Guild network first. You can then move on to test the Offline Workflow. The Offline workflow means that the private keys never touch the Online node. When comfortable with both the online and offline CNTools workflow, it's time to deploy what you learnt on the mainnet.

      This chapter describes some common use-cases for wallet and pool creation when running CNTools in Online mode. CNTools contains much more functionality not described here.

      Create Wallet

      A wallet is needed for pledge and to pay for pool registration fee.

      1. Choose [w] Wallet and you will be presented with the following menu:
        ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n >> WALLET\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n Wallet Management\n\n ) New         - create a new wallet\n ) Import      - import a Daedalus/Yoroi 24/25 mnemonic or Ledger/Trezor HW wallet\n ) Register    - register a wallet on chain\n ) De-Register - De-Register (retire) a registered wallet\n ) List        - list all available wallets in a compact view\n ) Show        - show detailed view of a specific wallet\n ) Remove      - remove a wallet\n ) Decrypt     - remove write protection and decrypt wallet\n ) Encrypt     - encrypt wallet keys and make all files immutable\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n Select Wallet Operation\n\n  [n] New\n  [i] Import\n  [r] Register\n  [z] De-Register\n  [l] List\n  [s] Show\n  [x] Remove\n  [d] Decrypt\n  [e] Encrypt\n  [h] Home\n
      2. Choose [n] New to create a new wallet. [i] Import can also be used to import a Daedalus/Yoroi based 15 or 24 word wallet seed
      3. Give the wallet a name
      4. CNTools will give you the wallet address. For example:
        ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n >> WALLET >> NEW\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\nName of new wallet: Test\n\nNew Wallet         : Test\nAddress            : addr_test1qpq5qjr774cyc6kxcwp060k4t4hwp42q43v35lmcg3gcycu5uwdwld5yr8m8fgn7su955zf5qahtrgljqfjfa4nr8jfsj4alxk\nEnterprise Address : addr_test1vpq5qjr774cyc6kxcwp060k4t4hwp42q43v35lmcg3gcyccuxhdka\n\nYou can now send and receive Ada using the above addresses.\nNote that Enterprise Address will not take part in staking.\nWallet will be automatically registered on chain if you\nchoose to delegate or pledge wallet when registering a stake pool.\n
      5. Send some money to this wallet. Either through the faucet or have a friend send you some.
      Import Daedalus/Yoroi/HW Wallet

      The Import feature of CNTools is originally based on this guide from Ilap.

      If you would like to use Import function to import a Daedalus/Yoroi based 15 or 24 word wallet seed, please ensure that cardano-address and bech32 bineries are available in your $PATH environment variable:

      bech32 --version\n1.1.0\n\ncardano-address --version\n3.5.0\n

      If the version is not as per above, please run the latest guild-deploy.sh from here and rebuild cardano-node as instructed here.

      To import a Daedalus/Yoroi wallet to CNTools, open CNTools and select the [w] Wallet option, and then select the [i] Import, the following menu will appear:

      ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n >> WALLET >> IMPORT\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n Wallet Import\n\n ) Mnemonic  - Daedalus/Yoroi 24 or 25 word mnemonic\n ) HW Wallet - Ledger/Trezor hardware wallet\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n Select Wallet operation\n\n  [m] Mnemonic\n  [w] HW Wallet\n  [h] Home\n

      Note

      You can import Hardware wallet using [w] HW Wallet above, but please note that before you are able to use hardware wallet in CNTools, you need to ensure you can detect your hardware device at OS level using cardano-hw-cli

      Select the wallet you want to import, for Daedalus / Yoroi wallets select [m] Mnemonic:

      ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n >> WALLET >> IMPORT >> MNEMONIC\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\nName of imported wallet: TEST\n\n24 or 15 word mnemonic(space separated):\n
      Give your wallet a name (in this case 'TEST'), and enter your mnemonic phrase. Please ensure that you **READ* through the complete notes presented by CNTools before proceeding.

      Create Pool

      Create the necessary pool keys.

      1. From the main menu select [p] Pool
        ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n >> POOL\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n Pool Management\n\n ) New      - create a new pool\n ) Register - register created pool on chain using a stake wallet (pledge wallet)\n ) Modify   - change pool parameters and register updated pool values on chain\n ) Retire   - de-register stake pool from chain in specified epoch\n ) List     - a compact list view of available local pools\n ) Show     - detailed view of specified pool\n ) Rotate   - rotate pool KES keys\n ) Decrypt  - remove write protection and decrypt pool\n ) Encrypt  - encrypt pool cold keys and make all files immutable\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n Select Pool Operation\n\n  [n] New\n  [r] Register\n  [m] Modify\n  [x] Retire\n  [l] List\n  [s] Show\n  [o] Rotate\n  [d] Decrypt\n  [e] Encrypt\n  [h] Home\n
      2. Select [n] New to create a new pool
      3. Give the pool a name. In our case, we call it TEST. The result should look something like this:
        ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n >> POOL >> NEW\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\nPool Name: TEST\n\nPool: TEST\nID (hex)    : 8d5a3510f18ce241115da38a1b2419ed82d308599c16e98caea1b4c0\nID (bech32) : pool134dr2y833n3yzy2a5w9pkfqeakpdxzzenstwnr9w5x6vqtnclue\n
      Register Pool

      Register the pool on-chain.

      1. From the main menu select [p] Pool
      2. Select [r] Register
      3. Select the pool you just created
      4. CNTools will give you prompts to set pledge, margin, cost, metadata, and relays. Enter values that are useful to you.

      Make sure you set your pledge low enough to insure your funds in your wallet will cover pledge plus pool registration fees.

      1. Select wallet to use as pledge wallet, Test in our case. As this is a newly created wallet, you will be prompted to continue with wallet registration. When complete and if successful, both wallet and pool will be registered on-chain.

      It will look something like this:

      ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n >> POOL >> REGISTER\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\nOnline mode  -  The default mode to use if all keys are available\n\nHybrid mode  -  1) Go through the steps to build a transaction file\n                2) Copy the built tx file to an offline node\n                3) Sign it using 'Sign Tx' with keys on offline node\n                   (CNTools started in offline mode '-o' without node connection)\n                4) Copy the signed tx file back to the online node and submit using 'Submit Tx'\n\nSelected value: [o] Online\n\n# Select pool\nSelected pool: TEST\n\n# Pool Parameters\npress enter to use default value\n\nPledge (in Ada, default: 50,000):\nMargin (in %, default: 7.5):\nCost (in Ada, minimum: 340, default: 340):\n\n# Pool Metadata\n\nEnter Pool's JSON URL to host metadata file - URL length should be less than 64 chars (default: https://foo.bat/poolmeta.json):\n\nEnter Pool's Name (default: TEST):\nEnter Pool's Ticker , should be between 3-5 characters (default: TEST):\nEnter Pool's Description (default: No Description):\nEnter Pool's Homepage (default: https://foo.com):\n\nOptionally set an extended metadata URL?\nSelected value: [n] No\n{\n  \"name\": \"TEST\",\n  \"ticker\": \"TEST\",\n  \"description\": \"No Description\",\n  \"homepage\": \"https://foo.com\",\n  \"nonce\": \"1613146429\"\n}\n\nPlease host file /opt/cardano/guild/priv/pool/TEST/poolmeta.json as-is at https://foo.bat/poolmeta.json\n\n# Pool Relay Registration\nSelected value: [d] A or AAAA DNS record (single)\nEnter relays's DNS record, only A or AAAA DNS records: relay.foo.com\nEnter relays's port: 6000\nAdd more relay entries?\nSelected value: [n] No\n\n# Select main owner/pledge wallet (normal CLI wallet)\nSelected wallet: Test (100,000.000000 Ada)\nWallet Test3 not registered on chain\n\nWaiting for new block to be created (timeout = 600 slots, 600s)\nINFO: press any key to cancel and return (won't stop transaction)\n\nOwner #1 : Test added!\n\nRegister a multi-owner pool (you need to have stake.vkey of any additional owner in a seperate wallet folder under $CNODE_HOME/priv/wallet)?\nSelected value: [n] No\n\nUse a separate rewards wallet from main owner?\nSelected value: [n] No\n\nWaiting for new block to be created (timeout = 600 slots, 600s)\nINFO: press any key to cancel and return (won't stop transaction)\n\nPool TEST successfully registered!\nOwner #1      : Test\nReward Wallet : Test\nPledge        : 50,000 Ada\nMargin        : 7.5 %\nCost          : 340 Ada\n\nUncomment and set value for POOL_NAME in ./env with 'TEST'\n\nINFO: Total balance in 1 owner/pledge wallet(s) are: 99,497.996518 Ada\n

      1. As mentioned in the above output: Uncomment and set value for POOL_NAME in ./env with 'TEST' (in our case, the POOL_NAME is TEST). The cnode.sh script will automatically detect whether the files required to run as a block producing node are present in the $CNODE_HOME/priv/pool/<POOL_NAME> directory.
      Rotate KES Keys

      The node runs with an operational certificate, generated using the KES hot key. For security reasons, the protocol asks to re-generate (or rotate) your KES key once reaching expiry. On mainnet, this expiry is in 62 cycles of 18 hours (thus, to ask for rotation quarterly), after which your node will not be able to forge valid blocks unless rotated. To be able to rotate KES keys, your cold keys files (cold.skey, cold.vkey and cold.counter) need to be present on the machine where you run CNTools to rotate your KES key.

      1. To Rotate KES keys and generate the operational certificate - op.cert.

      2. From the main menu select [p] Pool

      3. Select [o] Rotate
      4. Select the pool you just created

      The output should look like:

      ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n >> POOL >> ROTATE KES\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\nSelect pool to rotate KES keys on\nSelected pool: TEST\n\nPool KES keys successfully updated\nNew KES start period  : 240\nKES keys will expire  : 302 - 2021-09-04 11:24:31 UTC\n\nRestart your pool node for changes to take effect\n\npress any key to return to home menu\n
      1. Start or restart your cardano-node. If deployed as a systemd service as shown here, you can run sudo systemctl restart cnode.
      2. Ensure the node is running as a block producing (core) node.

      You can use gLiveView - the output at the top should say > Cardano Node - (Core - Guild).

      Alternatively, you can check the node logs in $CNODE_HOME/logs/ to see whether the node is performing leadership checks (TraceStartLeadershipCheck, TraceNodeIsNotLeader, etc.)

      "},{"location":"Scripts/cntools/","title":"Overview","text":"

      Important

      Koios CNTools is like a swiss army knife for pool operators to simplify typical operations regarding their wallet keys and pool management. Please note that this tool only aims to simplify usual tasks for its users, but it should NOT act as an excuse to skip understanding how to manually work through things or basics of Linux operations. The skills highlighted on the home page are paramount for a stake pool operator, and so is the understanding of configuration files and network. Please ensure you've read and understood the disclaimers before proceeding.

      Visit the Changelog section to see progress and current release.

      "},{"location":"Scripts/cntools/#overview","title":"Overview","text":"

      The tool consist of three files.

      In addition to the above files, there is also a dependency on the common env file. CNTools connects to your node through the configuration in the env file located in the same directory as the script. Customize env and cntools.sh files to your needs.

      Additionally, CNTools can integrate and enable optional functionalities based on external components:

      See CNCLI and Log Monitor sections for more details.

      Koios CNTools can operate in following modes:

      "},{"location":"Scripts/cntools/#download-and-update","title":"Download and Update","text":"

      The update functionality is provided from within CNTools. In case of breaking changes, please follow the prompts post-upgrade. If stuck, it's always best to re-run the latest guild-deploy.sh before proceeding.

      If you have not updated in a while, it is possible that you might come from a release with breaking changes. If so, please be sure to check out the upgrade instructions.

      "},{"location":"Scripts/cntools/#navigation","title":"Navigation","text":"

      The scripts menu supports both arrow key navigation and shortcut key selection. The character within the square brackets is the shortcut to press for quick navigation. For other selections like wallet and pool menu that don't contain shortcuts, there is a third way to navigate. Key pressed is compared to the first character of the menu option and if there is a match the selection jumps to this location. A handy way to quickly navigate a large menu.

      "},{"location":"Scripts/cntools/#hardware-wallet","title":"Hardware Wallet","text":"

      CNTools includes hardware wallet support since version 7.0.0 through Vacuumlabs cardano-hw-cli application. Initialize and update firmware/app on the device to the latest version before usage following the manufacturer instructions.

      To enable hardware support run guild-deploy.sh -s w. This downloads and installs Vacuumlabs cardano-hw-cli including udev configuration. When a new version of Vacuumlabs cardano-hw-cli is released, run guild-deploy.sh -s w again to update. For additional runtime options, run guild-deploy.sh -h.

      Ledger Trezor "},{"location":"Scripts/cntools/#offline-workflow","title":"Offline Workflow","text":"

      CNTools can be run in online and offline mode. At a very high level, for working with offline devices, remember that you need to use CNTools in an online node to generate a staging transaction for the desired type of transaction, and then move the staging transaction to an offline node to sign (authorize) using the signing keys on your offline node - and then bring back the signed transaction to the online node for submission to the chain.

      For the offline workflow, all the wallet and pool keys should be kept on the offline node. The backup function in CNTools has an option to create a backup without private keys (sensitive signing keys) to be transferred to online node. All other files are included in the backup to be transferred to the online node.

      Keys excluded from backup when created without private keys: Wallet - payment.skey, stake.skey Pool - cold.skey

      Note that setting up an offline server requires good SysOps background (you need to be aware of how to set up your server with offline mirror repository, how to transfer files across and be fairly familiar with the disk layout presented in the documentation). The guild-deploy.sh in its current state is not expected to run on an offline machine. Essentially, you simply need the cardano-cli, bech32, cardano-address binaries in your $PATH, OS level dependency packages [jq, coreutils, pkgconfig, gcc-c++ and bc ], and perhaps a copy from your online cnode directory (to ensure you have the right genesis/config files on your offline server). We strongly recommend you to familiarise yourself with the workflow on the preview / preprod / guild networks first, before attempting on mainnet.

      Example workflow for creating a wallet and pool:

      sequenceDiagram Note over Offline: Create/Import a wallet Note over Offline: Create a new pool Note over Offline: Rotate KES keys to generate op.cert Note over Offline: Create a backup w/o private keys Offline->>Online: Transfer backup to online node Note over Online: Fund the wallet base address with enough Ada Note over Online: Register wallet using ' Wallet \u00bb Register ' in hybrid mode Online->>Offline: Transfer built tx file back to offline node Note over Offline: Use ' Transaction >> Sign ' with payment.skey from wallet to sign transaction Offline->>Online: Transfer signed tx back to online node Note over Online: Use ' Transaction >> Submit ' to send signed transaction to blockchain Note over Online: Register pool in hybrid mode loop Offline-->Online: Repeat steps to sign and submit built pool registration transaction end Note over Online: Verify that pool was successfully registered with ' Pool \u00bb Show ' Online mode

      To start CNTools in Online (advanced) Mode, execute the script from the $CNODE_HOME/scripts/ directory:

      cd $CNODE_HOME/scripts\n./cntools.sh -a\n

      You should get a screen that looks something like this:

      ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n >> Koios CNTools vX.X.X - Guild - CONNECTED <<\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n Main Menu    Telegram Announcement / Support channel: t.me/CardanoKoios/9759\n\n ) Wallet      - create, show, remove and protect wallets\n ) Funds       - send, withdraw and delegate\n ) Pool        - pool creation and management\n ) Transaction - Sign and Submit a cold transaction (hybrid/offline mode)\n ) Blocks      - show core node leader schedule & block production statistics\n ) Backup      - backup & restore of wallet/pool/config\n ) Advanced    - Developer and advanced features: metadata, multi-assets, ...\n ) Refresh     - reload home screen content\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n                                                  Epoch 276 - 3d 19:08:27 until next\n What would you like to do?                                         Node Sync: 12 :)\n\n  [w] Wallet\n  [f] Funds\n  [p] Pool\n  [t] Transaction\n  [b] Blocks\n  [u] Update\n  [z] Backup & Restore\n  [a] Advanced\n  [r] Refresh\n  [q] Quit\n
      Offline mode

      To start CNTools in Offline Mode, execute the script from the $CNODE_HOME/scripts/ directory using the -o flag:

      cd $CNODE_HOME/scripts\n./cntools.sh -o\n

      The main menu header should let you know that node is started in offline mode:

      ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n >> Koios CNTools vX.X.X - Guild - OFFLINE <<\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n Main Menu    Telegram Announcement / Support channel: t.me/CardanoKoios/9759\n\n ) Wallet      - create, show, remove and protect wallets\n ) Funds       - send, withdraw and delegate\n ) Pool        - pool creation and management\n ) Transaction - Sign and Submit a cold transaction (hybrid/offline mode)\n\n ) Backup      - backup & restore of wallet/pool/config\n\n ) Refresh     - reload home screen content\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n                                                  Epoch 276 - 3d 19:03:46 until next\n What would you like to do?\n\n  [w] Wallet\n  [f] Funds\n  [p] Pool\n  [t] Transaction\n  [z] Backup & Restore\n  [r] Refresh\n  [q] Quit\n

      "},{"location":"Scripts/env/","title":"Common env","text":"

      A common environment file called env is sourced by most scripts in the Guild Operators repository. This file holds common variables and functions needed by other scripts. There are several benefits to this, not having to specify duplicate settings and being able to reuse functions decreasing the risk of misconfiguration and inconsistency.

      "},{"location":"Scripts/env/#installation","title":"Installation","text":"

      env file is downloaded together with the rest of the scripts when Pre-Requisites if followed and located in the $CNODE_HOME/scripts/ directory. The file is also automatically downloaded/updated by some of the individual scripts if missing, like cntools.sh, gLiveView.sh and topologyUpdater.sh. All custom changes in User Variables section are untouched on updates unless a forced overwrite is selected when running guild-deploy.sh.

      "},{"location":"Scripts/env/#configuration","title":"Configuration","text":"

      Most variables can be left commented to use the automatically detected or default value. But there are some that need to be set as explained below.

      Take your time and look through the different variables and their explanations and decide if you need/want to change the default setting. For a default deployment using guild-deploy.sh, the CNODE_PORT (all installs) and POOL_NAME (only block producer) should be the only variables needed to be set.

      ######################################\n# User Variables - Change as desired #\n# Leave as is if unsure              #\n######################################\n\n#CCLI=\"${HOME}/.local/bin/cardano-cli\"                  # Override automatic detection of path to cardano-cli executable\n#CNCLI=\"${HOME}/.local/bin/cncli\"                       # Override automatic detection of path to cncli executable (https://github.com/AndrewWestberg/cncli)\n#CNODE_HOME=\"/opt/cardano/cnode\"                        # Override default CNODE_HOME path (defaults to /opt/cardano/cnode)\nCNODE_PORT=6000                                         # Set node port\n#CONFIG=\"${CNODE_HOME}/files/config.json\"               # Override automatic detection of node config path\n#SOCKET=\"${CNODE_HOME}/sockets/node.socket\"             # Override automatic detection of path to socket\n#TOPOLOGY=\"${CNODE_HOME}/files/topology.json\"           # Override default topology.json path\n#LOG_DIR=\"${CNODE_HOME}/logs\"                           # Folder where your logs will be sent to (must pre-exist)\n#DB_DIR=\"${CNODE_HOME}/db\"                              # Folder to store the cardano-node blockchain db\n#UPDATE_CHECK=\"Y\"                                       # Check for updates to scripts, it will still be prompted before proceeding (Y|N).\n#TMP_DIR=\"/tmp/cnode\"                                   # Folder to hold temporary files in the various scripts, each script might create additional subfolders\n#EKG_HOST=127.0.0.1                                     # Set node EKG host IP\n#EKG_PORT=12788                                         # Override automatic detection of node EKG port\n#PROM_HOST=127.0.0.1                                    # Set node Prometheus host IP\n#PROM_PORT=12798                                        # Override automatic detection of node Prometheus port\n#EKG_TIMEOUT=3                                          # Maximum time in seconds that you allow EKG request to take before aborting (node metrics)\n#CURL_TIMEOUT=10                                        # Maximum time in seconds that you allow curl file download to take before aborting (GitHub update process)\n#BLOCKLOG_DIR=\"${CNODE_HOME}/guild-db/blocklog\"         # Override default directory used to store block data for core node\n#BLOCKLOG_TZ=\"UTC\"                                      # TimeZone to use when displaying blocklog - https://en.wikipedia.org/wiki/List_of_tz_database_time_zones\n#SHELLEY_TRANS_EPOCH=208                                # Override automatic detection of shelley epoch start, e.g 208 for mainnet\n#TG_BOT_TOKEN=\"\"                                        # Uncomment and set to enable telegramSend function. To create your own BOT-token and Chat-Id follow guide at:\n#TG_CHAT_ID=\"\"                                          # https://cardano-community.github.io/guild-operators/Scripts/sendalerts\n#USE_EKG=\"N\"                                            # Use EKG metrics from the node instead of Promethus. Promethus metrics(default) should yield slightly better performance\n#TIMEOUT_LEDGER_STATE=300                               # Timeout in seconds for querying and dumping ledger-state\n#IP_VERSION=4                                           # The IP version to use for push and fetch, valid options: 4 | 6 | mix (Default: 4)\n\n#WALLET_FOLDER=\"${CNODE_HOME}/priv/wallet\"              # Root folder for Wallets\n#POOL_FOLDER=\"${CNODE_HOME}/priv/pool\"                  # Root folder for Pools\n# Each wallet and pool has a friendly name and subfolder containing all related keys, certificates, ...\n#POOL_NAME=\"\"                                           # Set the pool's name to run node as a core node (the name, NOT the ticker, ie folder name)\n\n#WALLET_PAY_VK_FILENAME=\"payment.vkey\"                  # Standardized names for all wallet related files\n#WALLET_PAY_SK_FILENAME=\"payment.skey\"\n#WALLET_HW_PAY_SK_FILENAME=\"payment.hwsfile\"\n#WALLET_PAY_ADDR_FILENAME=\"payment.addr\"\n#WALLET_BASE_ADDR_FILENAME=\"base.addr\"\n#WALLET_STAKE_VK_FILENAME=\"stake.vkey\"\n#WALLET_STAKE_SK_FILENAME=\"stake.skey\"\n#WALLET_HW_STAKE_SK_FILENAME=\"stake.hwsfile\"\n#WALLET_STAKE_ADDR_FILENAME=\"reward.addr\"\n#WALLET_STAKE_CERT_FILENAME=\"stake.cert\"\n#WALLET_STAKE_DEREG_FILENAME=\"stake.dereg\"\n#WALLET_DELEGCERT_FILENAME=\"delegation.cert\"\n\n#POOL_ID_FILENAME=\"pool.id\"                             # Standardized names for all pool related files\n#POOL_HOTKEY_VK_FILENAME=\"hot.vkey\"\n#POOL_HOTKEY_SK_FILENAME=\"hot.skey\"\n#POOL_COLDKEY_VK_FILENAME=\"cold.vkey\"\n#POOL_COLDKEY_SK_FILENAME=\"cold.skey\"\n#POOL_OPCERT_COUNTER_FILENAME=\"cold.counter\"\n#POOL_OPCERT_FILENAME=\"op.cert\"\n#POOL_VRF_VK_FILENAME=\"vrf.vkey\"\n#POOL_VRF_SK_FILENAME=\"vrf.skey\"\n#POOL_CONFIG_FILENAME=\"pool.config\"\n#POOL_REGCERT_FILENAME=\"pool.cert\"\n#POOL_CURRENT_KES_START=\"kes.start\"\n#POOL_DEREGCERT_FILENAME=\"pool.dereg\"\n\n#ASSET_FOLDER=\"${CNODE_HOME}/priv/asset\"                # Root folder for Multi-Assets containing minted assets and subfolders for Policy IDs\n#ASSET_POLICY_VK_FILENAME=\"policy.vkey\"                 # Standardized names for all multi-asset related files\n#ASSET_POLICY_SK_FILENAME=\"policy.skey\"\n#ASSET_POLICY_SCRIPT_FILENAME=\"policy.script\"           # File extension '.script' mandatory\n#ASSET_POLICY_ID_FILENAME=\"policy.id\"\n
      "},{"location":"Scripts/gliveview/","title":"gLiveView","text":"

      Reminder !!

      Ensure the Pre-Requisites are in place before you proceed.

      Koios gLiveView is a local monitoring tool to use in addition to remote monitoring tools like Prometheus/Grafana, Zabbix or IOG's RTView. This is especially useful when moving to a systemd deployment - if you haven't done so already - as it offers an intuitive UI to monitor the node status.

      "},{"location":"Scripts/gliveview/#configuration-startup","title":"Configuration & Startup","text":"

      For most setups, it's enough to set CNODE_PORT in the env file. The rest of the variables should automatically be detected. If required, modify User Variables in env and gLiveView.sh to suit your environment (if the environment is customised). This should lead you to a stage where you can now start running ./gLiveView.sh in the folder you downloaded the script (the default location would be $CNODE_HOME/scripts). Note that the script is smart enough to automatically detect when you're running as a Core or Relay and will show fields accordingly.

      The tool can be run in legacy mode with only standard ASCII characters for terminals with trouble displaying the box-drawing characters. Run ./gLiveView.sh -h to show available command-line parameters or permanently set it directly in script.

      Note !!

      Keeping gLiveView to it's intent of being a dashboard and not a full-fledged monitoring tool, we intend to keep most relevant information for a node operator in a minimalistic dashboard, accordingly - gLiveView runs by default in compact mode. One can enable verbose mode by pressing 'v' to unhide additional fields.

      A sample output from both core and relay together with peer analysis:

      Core

      Relay

      Peer Analysis

      "},{"location":"Scripts/gliveview/#upper-main-section","title":"Upper main section","text":"

      Displays live metrics from cardano-node gathered through the nodes EKG/Prometheus(env setting) endpoint.

      "},{"location":"Scripts/gliveview/#core-section","title":"Core section","text":"

      If the node is run as a core, identified by the 'forge-about-to-lead' parameter, a second core section is displayed.

      "},{"location":"Scripts/gliveview/#peer-analysis","title":"Peer analysis","text":"

      A manual peer analysis can be triggered by key press p. A latency test will be done on incoming and outgoing connections to the node.

      Note

      Note that with P2P enabled, an incoming/outgoing connection can be reused for bi-directional traffic. There isnt a way to distinctly identify the P2P peer's direction yet for a given IP.

      Outgoing connections(peers in topology file), ping type used is done in this order: 1. cncli - If available, this gives the most accurate measure as it checks the entire handshake process against the remote peer. 2. ss - Sends a TCP SYN package to ping the remote peer on the cardano-node port. Should give ~100% success rate. 2. tcptraceroute - Same as ss. 3. ping - fallback method using ICMP ping against IP. Will only work if firewall of remote peer accept ICMP traffic.

      For incoming connections, only ICMP ping is used as remote peer port is unknown. It's not uncommon to see many undetermined peers for incoming connections as it's a good security practice to disable ICMP in firewall.

      Once the analysis is finished, it will display the RTTs (return-trip times) for the peers and group them in ranges 0-50, 50-100, 100-200, 200<. The analysis is NOT live. Press [h] Home to go back to default view or [i] Info to show in-script help text. Up and Down arrow keys is used to select incoming or outgoing detailed list of IPs and their RTT value. Left (<) and Right (>) arrow keys can be used to navigate the pages in the selected list.

      "},{"location":"Scripts/gliveview/#troubleshootingcustomisations","title":"Troubleshooting/Customisations","text":"

      In case you run into trouble while running the script, you might want to edit env & gLiveView.sh and look at User Variables section. You can override the values if the automatic detection do not provide the right information, but we would appreciate if you could also notify us by raising an issue against the GitHub repository:

      gLiveView.sh

      ######################################\n# User Variables - Change as desired #\n######################################\n\nNODE_NAME=\"Cardano Node\"                  # Change your node's name prefix here, keep at or below 19 characters!\nREFRESH_RATE=2                            # How often (in seconds) to refresh the view (additional time for processing and output may slow it down)\nLEGACY_MODE=false                         # (true|false) If enabled unicode box-drawing characters will be replaced by standard ASCII characters\nRETRIES=3                                 # How many attempts to connect to running Cardano node before erroring out and quitting\nPEER_LIST_CNT=6                           # Number of peers to show on each in/out page in peer analysis view\nTHEME=\"dark\"                              # dark  = suited for terminals with a dark background\n# light = suited for terminals with a bright background\nENABLE_IP_GEOLOCATION=\"Y\"                 # Enable IP geolocation on outgoing and incoming connections using ip-api.com\n

      "},{"location":"Scripts/itnrewards/","title":"Itnrewards","text":""},{"location":"Scripts/itnrewards/#concept","title":"Concept","text":"

      To claim rewards earned during the Incentivized TestNet the private and public keys from ITN must be converted to Shelley stake keys. A script called itnRewards.sh has been created to guide you through the process of converting the keys and to create a CNTools compatible wallet from were the rewards can be withdrawn.

      graph TB A([\"itnRewards.sh\"]) A --x B([\"ITN Owner skey (ed25519[e]_sk)..\"]) --x D([\"cardano-cli shelley key convert-itn-key ..\"]) A --x C([\"ITN Owner vkey (ed25519_pk)..\"]) --x D D --x E([\"Stake skey/vkey\"]) --x L A --x F([\"cardano-cli shelley ..\"]) F --x G([\"Payment skey/vkey/addr\"]) --x L F --x H([\"Reward addr\"]) --x L F --x I([\"Base addr\"]) --x L L[CNTools Wallet] ;"},{"location":"Scripts/itnrewards/#steps","title":"Steps","text":""},{"location":"Scripts/itnwitness/","title":"Itnwitness","text":"

      Disclaimer

      Currently this is to protect the existing pools from the ITN who already have a delegator base against spoofing - to avoid scammers building on results of ITN from known pools. There would be a solution in the future for Mainnet nodes too - but it doesn't apply to those in its current form.

      "},{"location":"Scripts/itnwitness/#concept","title":"Concept","text":"

      Due to the expected ticker spoofing attack for pools that were famous during ITN, some of the community members have proposed an interim solution to verify the legitimacy of a pool for delegators. You can check the high-level workflow below:

      graph TB A(\"ITN Owner skey (ed25519/ed25519e) ..\") --x C([\"jcli key sign ..\"]) B(\"Haskell Pool ID (pool.id) ..\") --x C C --x D(\"Signature key, (pool.sig) ..\") E(\"ITN Owner vkey (ed25519_pk) ..\") --x F(\"Extended Metadata JSON (poolmeta_extended.json) ..\") D --x F F --x G(\"Pool Meta JSON (poolmeta.json) ..\") ;"},{"location":"Scripts/itnwitness/#steps","title":"Steps","text":"

      The actual implementation is pretty straightforward, we will keep it brisk - as we assume ones participating are fairly familiar with jcli usage.

      If the process is approved to appear for wallets, we may consider providing easier alternatives. If any queries about the process, or any additions please create a git issue/PR against guild repository - to capture common queries and update instructions/help text where appropriate.

      "},{"location":"Scripts/itnwitness/#sample-output-of-json-files-generated","title":"Sample output of JSON files generated","text":"
      {\n\"itn\": {\n\"owner\": \"ed25519_pk1...\",\n\"witness\": \"ed25519_sig1...\"\n}\n}\n
      "},{"location":"Scripts/logmonitor/","title":"Log Monitor","text":"

      Reminder !!

      Ensure the Pre-Requisites are in place before you proceed.

      logMonitor.sh is a general purpose JSON log monitoring script for traces created by cardano-node. Currently, it looks for traces related to leader slots and block creation but other uses could be added in the future.

      "},{"location":"Scripts/logmonitor/#block-traces","title":"Block traces","text":"

      For the core node (block producer) the logMonitor.sh script can be run to monitor the JSON log file created by cardano-node for traces related to leader slots and block creation.

      For optimal coverage, it's best run together with CNCLI scripts as they provide different functionalities. Together, they create a complete picture of blocks assigned, created, validated or invalidated due to node issues.

      "},{"location":"Scripts/logmonitor/#installation","title":"Installation","text":"

      The script is best run as a background process. This can be accomplished in many ways but the preferred method is to run it as a systemd service. A terminal multiplexer like tmux or screen could also be used but not covered here.

      Use the deploy-as-systemd.sh script to create a systemd unit file (deployed together with CNCLI). Log output is handled by syslog and end up in the systems standard syslog file, normally /var/log/syslog. journalctl -f -u cnode-logmonitor.service can be used to check service output (follow mode). Other logging configurations are not covered here.

      "},{"location":"Scripts/logmonitor/#view-blocklog","title":"View Blocklog","text":"

      Best viewed in CNTools or gLiveView. See CNCLI for example output.

      "},{"location":"Scripts/mithril-client/","title":"Client","text":"

      mithril-client.sh is a script to manage the Mithril client, a tool used to set up the Mithril client environment and manage downloading Mithril snapshots and stake distributions. The main features include:

      "},{"location":"Scripts/mithril-client/#preparing-a-relay-or-block-producer-node","title":"Preparing a Relay or Block Producer Node","text":"

      To prepare a relay or block producer node, you should follow these steps:

      1. Create the Mithril environment file: Run the script with the environment setup command. This will create a new mithril.env file with all the necessary environment variables for the Mithril client.
      ./mithril-client.sh environment setup\n
      1. Download the latest Mithril snapshot: Once the environment file is set up, you can download the latest Mithril snapshot by running the script with the snapshot download command. This snapshot contains the latest state of the Cardano blockchain db from a Mithril Aggregator.
      ./mithril-client.sh snapshot download\n
      "},{"location":"Scripts/mithril-client/#investigating-available-snapshots","title":"Investigating Available Snapshots","text":"

      You can investigate the available snapshots by using the snapshot list and snapshot show commands:

      ./mithril-client.sh snapshot list\n./mithril-client.sh snapshot list json\n
      ./mithril-client.sh snapshot show <DIGEST>\n./mithril-client.sh snapshot show <DIGEST> json\n./mithril-client.sh snapshot show json <DIGEST>\n
      "},{"location":"Scripts/mithril-client/#managing-stake-distributions","title":"Managing Stake Distributions","text":"

      You can manage stake distributions by using the stake-distribution download and stake-distribution list commands:

      ./mithril-client.sh stake-distribution download\n
      ./mithril-client.sh stake-distribution list\n./mithril-client.sh stake-distribution list json\n
      "},{"location":"Scripts/mithril-relay/","title":"Relay","text":"

      mithril-relay.sh is a bash script for deployment of Squid Mithril Relays and a Nginx loadbalancer. It provides functionalities such as:

      "},{"location":"Scripts/mithril-relay/#usage","title":"Usage","text":"
      Usage: mithril-relay.sh [-d] [-l]\n\nOptions:\n    -d  Install squid and configure as a relay\n    -l  Install nginx and configure as a load balancer\n    -h  Show this help text\n
      "},{"location":"Scripts/mithril-relay/#description","title":"Description","text":"

      The mithril-relay.sh script is a bash script for managing the Mithril Relay Server. It provides functionalities such as installing and configuring Squid as a relay, installing and configuring Nginx as a load balancer.

      "},{"location":"Scripts/mithril-relay/#environment-variables","title":"Environment Variables","text":"

      The script uses the following environment variable:

      "},{"location":"Scripts/mithril-relay/#execution","title":"Execution","text":"

      The script parses command line options and performs the corresponding actions based on the options provided. If the -d option is provided, it installs Squid and configures it as a relay. If the -l option is provided, it installs Nginx and configures it as a load balancer. If no options are provided, it displays the usage message.

      "},{"location":"Scripts/mithril-signer/","title":"Signer","text":"

      mithril-signer.sh is a bash script for managing the Mithril Signer Server. It provides functionalities such as deploying the server as a systemd service and updating the environment file to contain variables specific to the Mithril Signer.

      "},{"location":"Scripts/mithril-signer/#usage","title":"Usage","text":"
      Usage: mithril-signer.sh [-d] [-u]\n\nOptions:\n    -d    Deploy mithril-signer as a systemd service\n    -u    Update mithril environment file\n    -h    Show this help text\n
      "},{"location":"Scripts/mithril-signer/#description","title":"Description","text":"

      This script is a bash script for managing the Mithril Signer Server. It provides functionalities such as deploying the server as a systemd service, updating the environment file, and running the server.

      "},{"location":"Scripts/mithril-signer/#environment-variables","title":"Environment Variables","text":"

      The script uses several environment variables, some of which are:

      "},{"location":"Scripts/mithril-signer/#execution","title":"Execution","text":"

      The script parses command line options, sources the environment file, sets default values, and performs basic sanity checks. It then checks if the -d or -u options were specified and performs the corresponding actions. If no options were specified, it runs the Mithril Signer Server.

      "},{"location":"Scripts/sendalerts/","title":"Sendalerts","text":"

      !> Ensure the Pre-Requisites are in place before you proceed.

      This section describes the ways in which CNTools can send important messages to the operator.

      "},{"location":"Scripts/sendalerts/#telegram-alerts","title":"Telegram alerts","text":"

      If known but unwanted errors occur on your node, or if characteristic values indicate an unusual status , CNTools can send you Telegram alert messages.

      To do this, you first have to activate your own bot and link it to your own Telegram user. Here is an explanation of how this works:

      1. Open Telegram and search for \"botfather\".

      2. Write him your wish: /newbot.

      3. Define a name for your bot, such as cntools_[POOLNAME]_alerts.

      4. Botfather will confirm the creation of your bot by giving you the unique bot access token. Keep it safe and private.

      5. Now send at least one direct message to your new bot.

      6. Open this URL in your browser by using your own, just created bot access token:

      https://api.telegram.org/bot<your-access-token>/getUpdates\n
      1. the result is a JSON. Look for the value of result.message.chat.id. This chat id should be a large integer number.

      This is all you need to enable your Telegram alerts in the scripts/env file - uncomment and add the chat ID to the TG_CHAT_ID user variable in the env file:

      ...\nTG_CHAT_ID=\"<YOUR_TG_CHAT_ID>\"\n...  \n

      "},{"location":"Scripts/topologyupdater/","title":"Topology Updater","text":"

      Reminder !!

      The topologyUpdater shell script must be executed on the relay node as a cronjob exactly every 60 minutes. After 4 consecutive requests (3 hours) the node is considered a new relay node in listed in the topology file. If the node is turned off, it's automatically delisted after 3 hours.

      "},{"location":"Scripts/topologyupdater/#download","title":"Download and Configure","text":"

      If you have run guild-deploy.sh, this should already be available in your scripts folder and make this step unnecessary.

      Before the updater can make a valid request to the central topology service, it must query the current tip/blockNo from the well-synced local node. It connects to your node through the configuration in the script as well as the common env configuration file. Customize these files for your needs.

      To download topologyUpdater.sh manually, you can execute the commands below and test executing Topology Updater once (it's OK if first execution gives back an error):

      cd $CNODE_HOME/scripts\ncurl -s -o topologyUpdater.sh https://raw.githubusercontent.com/cardano-community/guild-operators/master/scripts/cnode-helper-scripts/topologyUpdater.sh\ncurl -s -o env https://raw.githubusercontent.com/cardano-community/guild-operators/master/scripts/cnode-helper-scripts/env\nchmod 750 topologyUpdater.sh\n./topologyUpdater.sh\n

      "},{"location":"Scripts/topologyupdater/#modify","title":"Examine and modify the variables within topologyUpdater.sh script","text":"

      Out of the box, the scripts might come with some assumptions, that may or may not be valid for your environment. One of the common changes as an SPO would be to the complete CUSTOM_PEERS section as below to include your local relays/BP nodes (described in the How do I add my own nodes section), and any additional peers you'd like to be always available at minimum. Please do take time to update the variables in User Variables section in env & topologyUpdater.sh:

      ### topologyUpdater.sh\n\n######################################\n# User Variables - Change as desired #\n######################################\n\nCNODE_HOSTNAME=\"CHANGE ME\"                                # (Optional) Must resolve to the IP you are requesting from\nCNODE_VALENCY=1                                           # (Optional) for multi-IP hostnames\nMAX_PEERS=15                                              # Maximum number of peers to return on successful fetch\n#CUSTOM_PEERS=\"None\"                                      # Additional custom peers to (IP,port[,valency]) to add to your target topology.json\n# eg: \"10.0.0.1,3001|10.0.0.2,3002|relays.mydomain.com,3003,3\"\n#BATCH_AUTO_UPDATE=N                                      # Set to Y to automatically update the script if a new version is available without user interaction\n

      Any customisations you add above, will be saved across future guild-deploy.sh executions, unless you specify the -f flag to overwrite completely.

      "},{"location":"Scripts/topologyupdater/#deploy","title":"Deploy the script","text":"

      systemd service The script can be deployed as a background service in different ways but the recommended and easiest way if guild-deploy.sh was used, is to utilize the deploy-as-systemd.sh script to setup and schedule the execution. This will deploy both push & fetch service files as well as timers for a scheduled 60 min node alive message and cnode restart at the user set interval (default: 24 hours) when running the deploy script.

      systemctl list-timers can be used to to check the push and restart service schedule.

      crontab job Another way to deploy the topologyUpdater.sh script is as a crontab job. Add the script to be executed once per hour at a minute of your choice (eg xx:25 o'clock in the example below). The example below will handle both the fetch and push in a single call to the script once an hour. In addition to the below crontab job for topologyUpdater, it's expected that you also add a scheduled restart of the relay node to pick up a fresh topology file fetched by topologyUpdater script with relays that are alive and well.

      25 * * * * /opt/cardano/cnode/scripts/topologyUpdater.sh\n
      "},{"location":"Scripts/topologyupdater/#logs","title":"Logs","text":"

      You can check the last result of push message in logs/topologyUpdater_lastresult.json. If deployed as systemd service, use sudo journalctl -u <service> to check output from service.

      If one of the parameters is outside the allowed ranges, invalid or missing the returned JSON will tell you what needs to be fixed.

      Don't try to execute the script more often than once per hour. It's completely useless and may lead to a temporary blacklisting.

      "},{"location":"Scripts/topologyupdater/#why-does-my-topology-file-only-contain-iog-peers","title":"Why does my topology file only contain IOG peers?","text":"

      Each subscribed node (4 consecutive requests) is allowed to fetch a subset of other nodes to prove loyalty/stability of the relay. Until reaching this point, your fetch calls will only return IOG peers combined with any custom peers added in USER VARIABLES section of topologyUpdater.sh script

      The engineers of cardano-node network stack suggested to use around 20 peers. More peers create unnecessary and unwanted system load and delays.

      In its default setting, topologyUpdater returns a list of 15 remote peers.

      Note that the change in topology is only effective upon restart of your node. Make sure you account for some scheduled restarts on your relays, to help onboard newer relays onto the network (as described in the systemd section).

      "},{"location":"Scripts/topologyupdater/#how-do-i-add-my-own-relaysstatic-nodes-in-addition-to-dynamic-list-generated-by-topologyupdater","title":"How do I add my own relays/static nodes in addition to dynamic list generated by topologyUpdater?","text":"

      Most of the Stake Pool Operators may have few preferences (own relays, close friends, etc) that they would like to add to their topology by default. This is where the CUSTOM_PEERS variable in topologyUpdater.sh comes in. You can add a list of peers in the format of: hostname/IP:port[:valency] here and the output topology.json formed will already include the custom peers that you supplied. Every custom peer is defined in the form [address]:[port] and optional :[valency] (if not specified, the valency defaults to 1). Multiple custom peers are separated by |. An example of a valid CUSTOM_PEERS variable would be:

      CUSTOM_PEERS=\"foo.bar.io,3001,2|198.175.21.197,6001|36.233.3.89,6000\n
      The list above would add three custom peers with the specified addresses and ports, with the first one additionally specifying the optional valency parameter (in this case 2).

      "},{"location":"Scripts/topologyupdater/#how-are-the-peers-for-my-topology-file-selected","title":"How are the peers for my topology file selected?","text":"

      We calculate the distance on the Earth's surface from your node's IP to all subscribed peers. We then order the peers by distance (closest first) and start by selecting one peer. We then skip some, pick the next, skip, pick, skip, pick ... until we reach the end of the list (furthest away). The number of skipped records is calculated in a way to have the desired number of peers at the end.

      Every requesting node has its personal distance to all other nodes.

      We assume this should result in a well-distributed and interconnected peering network.

      "},{"location":"docker/build/","title":"Build","text":""},{"location":"docker/build/#intro","title":"Intro","text":"

      \ud83d\udca1 Docker containers are the fastest way to run a Cardano node in both \"Relay\" and \"Block-Producing\" (Pool) mode.

      "},{"location":"docker/build/#how-to-build","title":"How to build","text":"
      docker build -t cardanocommunity/cardano-node:latest - < dockerfile_bin\n
      "},{"location":"docker/build/#for-windows-users","title":"For Windows Users","text":"

      With Powershell on Windows, you can run docker by typing the following command:

      Get-Content dockerfile_bin  | docker build -t guild-operators/cardano-node:latest -\n
      "},{"location":"docker/build/#see-also","title":"See also","text":"

      Docker Tips

      Docker Official Docs

      "},{"location":"docker/docker/","title":"Overview","text":"

      Running your own Cardano node has never been so fast and easy.

      But first, a kind reminder to the security aspects of running docker containers.

      "},{"location":"docker/docker/#external-resources","title":"External resources","text":""},{"location":"docker/docker/#built-in-cardano-software","title":"\ud83d\udd14 Built-in Cardano software","text":""},{"location":"docker/docker/#mithril","title":"Mithril","text":""},{"location":"docker/docker/#built-in-tools","title":"\ud83d\udd14 Built-in tools","text":""},{"location":"docker/docker/#docker-splash-screen","title":"Docker Splash screen","text":""},{"location":"docker/docker/#cntools","title":"Cntools","text":""},{"location":"docker/docker/#gliveview","title":"gLiveView","text":""},{"location":"docker/docker/#gliveview-peers-analyzer","title":"gLiveView Peers analyzer","text":""},{"location":"docker/docker/#cncli","title":"CNCLI","text":""},{"location":"docker/docker/#strategy","title":"Guild Operators Docker strategy ( mainnet/ preview / preprod / guild)","text":"

      Modular docker images based on Debian.

      Based on the Guild's work the Cardano Node image is built in a single stage: -> dockerfile_bin

      "},{"location":"docker/docker/#additional-docs","title":"Additional docs","text":"

      If you prefer to build the images your own than you can check:

      "},{"location":"docker/docker/#port-mapping","title":"Port mapping","text":"

      The dockerfiles are located in ./files/docker/

      Node Ports Wallet Ports Flavor Node (6000) Wallet (8090) Debian Prometheus (12798) Prometheus (12798) EKG (12781)"},{"location":"docker/run/","title":"Run","text":""},{"location":"docker/run/#os-requirements","title":"OS Requirements","text":" Private mode Public mode

      Note

      1) --entrypoint=bash # This option won't start the node's container but only the OS running (the node software wont actually start, you'll need to manually execute entrypoint.sh ), ready to get in (trough the command docker exec -it < container name or hash > /bin/bash) and play/explore around with it in command line mode. 2) all guild tools env variable can be used to start a new container using custom values by using the \"-e\" option. 3) CPU and RAM and Shared Memory allocation option for the container can be used when you start the container (i.e. --shm-size or --memory or --cpus official docker resource docs) 4) --env MITHRIL_DOWNLOAD=Y # This option will allow Mithril client to download the latest Mithril snapshot of the blockchain when the container starts and does not have a copy of the blockchain yet. This is useful when you want to start a new node from scratch and don't want to wait for the node to sync from the network. This option is only available for the mainnet, preprod, and preview networks.

      "},{"location":"docker/run/#use-cases","title":"Use Cases","text":"
      docker run --init -dit\n--name <YourCName>\n--security-opt=no-new-privileges\n-e NETWORK=mainnet\n-v <your_custom_path>:/opt/cardano/cnode/priv\n-v <your_custom_db_path>:/opt/cardano/cnode/db\ncardanocommunity/cardano-node\n
      "},{"location":"docker/run/#use-cases_1","title":"Use Cases:","text":"
      docker run --init -dit\n--name <YourCName>\n--security-opt=no-new-privileges\n-e NETWORK=mainnet\n-p 6000:6000\n-v <your_custom_path>:/opt/cardano/cnode/priv\n-v <your_custom_db_path>:/opt/cardano/cnode/db\ncardanocommunity/cardano-node\n
      docker run --init -dit\n--name <YourCName>\n--security-opt=no-new-privileges\n-e NETWORK=mainnet\n-e CONFIG=/opt/cardano/cnode/priv/<your own configuration files>.yml\n-p 6000:6000\n-v <your_custom_path>:/opt/cardano/cnode/priv\n-v <your_custom_db_path>:/opt/cardano/cnode/db\ncardanocommunity/cardano-node\n
      "},{"location":"docker/security/","title":"Security","text":""},{"location":"docker/security/#docker-security-best-practices","title":"Docker Security best practices","text":""},{"location":"docker/security/#intro","title":"Intro","text":"

      On the security front, Docker developers are faced with different types of security attacks such as:

      Docker containers are now being exploited to covertly mine for cryptocurrency, marking a shift from ransomware to cryptocurrency malware. As with all things in security, also Docker security is a moving target \u2014 so it\u2019s helpful to have access to up-to-date information, including experience-based best practices, for securing your containerized environments.

      "},{"location":"docker/security/#here-below-some-key-concepts","title":"Here below some key concepts:","text":"
      1. Use a Third-Party Security Tool Docker allows you to use containers from untrusted public repositories, which increases the need to scrutinize whether the container was created securely and whether it is free of any corrupt or malicious files. For this, use a multi-purpose security tool that gives extensive dev-to-production security controls.(keep reading below)

      2. Manage Vulnerability It is best to have a sound vulnerability management program that has multiple checks throughout the container lifecycle. Vulnerability management should incorporate quality gates to detect access issues and weaknesses for a potential exploit from dev-to-production environments.

      3. Monitor and Audit Container Activity It is vital to monitor the container ecosystem and detect suspicious activity. Container monitoring activities provide real-time reports that can help you react promptly to a security breach.

      4. Enable Docker Content Trust Docker Content Trustis a new feature incorporated into Docker 1.8. It is disabled by default, but once enabled, allows you to verify the integrity, authenticity, and publication date of all Docker images from the Docker Hub Registry.

      5. Use Docker Bench for Security You should consider Docker Bench for Security as your must-use script. Once the script is run, you will notice a lot of information regarding configuration best practices for deploying Docker containers that can be used to further secure your Docker server and containers.

      6. Resource Utilization To reduce performance impacts and denial-of-service attacks, it is a good practice to implement limits on the system resources that the containers can consume. If, for example, a web server is compromised, it helps to limit the impact to the other processes that are running on a host.

      7. RBAC RBAC is role-based access control. If you have multiple users accessing you enviroment, this is a must-have. It can be quite expensive to implement but portainer makes it super easy.

      "},{"location":"docker/security/#security-docker-best-practices","title":"Security Docker best practices:","text":""},{"location":"docker/security/#the-guild-docker-images-are-not-using-all-the-following-tips-due-to-functional-purpose","title":"The Guild Docker images are not using all the following tips due to functional purpose","text":"

      Guild tips:

      Some more general tips:

      "},{"location":"docker/security/#notes","title":"Notes:","text":""},{"location":"docker/tips/","title":"Tips","text":""},{"location":"docker/tips/#how-to-run-a-cardano-node-with-docker","title":"How to run a Cardano Node with Docker","text":"

      With this quick guide you will be able to run a cardano node in seconds and also have the powerfull Koios SPO scripts built-in.

      "},{"location":"docker/tips/#how-to-operate-interactively-within-the-container","title":"How to operate interactively within the container","text":"

      Once executed the container as a deamon with attached tty you are then able to enter the container by using the flag -dit .

      While if you have a hook within the container console, use the following command (change CN with your container name):

      docker exec -it CN bash 

      This command will bring you within the container bash env ready to use the Koios tools.

      "},{"location":"docker/tips/#docker-flags-explained","title":"Docker flags explained","text":"
      \"docker build\" options explained:\n -t : option is to \"tag\" the image you can name the image as you prefer as long as you maintain the references between dockerfiles.\n\n\"docker run\" options explained:\n -d : for detach the container\n -i : interactive enabled -t : terminal session enabled\n -e : set an Env Variable\n -p : set exposed ports (by default if not specified the ports will be reachable only internally)\n--hostname : Container's hostname\n --name : Container's name\n
      "},{"location":"docker/tips/#custom-container-with-your-own-cfg","title":"Custom container with your own cfg","text":"
      docker run --init -itd  \n-name Relay                                   # Optional (recommended for quick access): set a name for your newly created container.\n-p 9000:6000                                  # Optional: to expose the internal container's port (6000) to the host <IP> port 9000\n-e NETWORK=mainnet                            # Mandatory: mainnet / preprod / guild-mainnet / guild\n--security-opt=no-new-privileges              # Option to prevent privilege escalations\n-v <YourNetPath>:/opt/cardano/cnode/sockets   # Optional: useful to share the node socket with other containers\n-v <YourCfgPath>:/opt/cardano/cnode/priv      # Optional: if used has to contain all the sensitive keys needed to run a node as core\n-v <YourDBbk>:/opt/cardano/cnode/db           # Optional: if not set a fresh DB will be downloaded from scratch\ncardanocommunity/cardano-node:latest          # Mandatory: image to run\n

      Note

      To be able to use the CNTools encryption key feature you need to manually change in \"cntools.config\" ENABLE_CHATTR to \"true\" and not use the --security-opt=no-new-privileges docker run option.

      "},{"location":"docker/tips/#docker-cli-managment","title":"Docker CLI managment","text":""},{"location":"docker/tips/#official","title":"Official","text":""},{"location":"docker/tips/#un-official-docker-managment-cli-tool","title":"Un-Official Docker managment cli tool","text":""},{"location":"docker/tips/#docker-backups-and-restores","title":"Docker backups and restores","text":"

      The docker container has an optional backup and restore functionality that can be used to backup the /opt/cardano/cnode/db directory. To have the backup persist longer than the countainer, the backup directory should be mounted as a volume.

      [!NOTE] The backup and restore functionality is disabled by default.

      [!WARNING] Make sure adequate space exists on the host as the backup will double the space consumed by the database.

      "},{"location":"docker/tips/#creating-a-backup","title":"Creating a Backup","text":"

      When the container is started with the ENABLE_BACKUP environment variable set to Y the container will automatically create a backup in the /opt/cardano/cnode/backup/$NETWORK-db directory. The backup will be created when the container is started and if the backup directory is smaller than the db directory.

      "},{"location":"docker/tips/#restoring-from-a-backup","title":"Restoring from a Backup","text":"

      When the container is started with the ENABLE_RESTORE environment variable set to Y the container will automatically restore the latest backup from the /opt/cardano/cnode/backup/$NETWORK-db directory. The database will be restored when the container is started and if the backup directory is larger than the db directory.

      "}]} \ No newline at end of file +{"config":{"lang":["en"],"separator":"[\\s\\-]+","pipeline":["stopWordFilter"]},"docs":[{"location":"","title":"Home","text":"

      This documentation site (rather the repository itself) is created by some of the well known and experienced community members and contains instructions/information about various guild tools which simplify various stake-ops (setting up, managing and monitoring pools) for operators. Note that the guides are present to help you simplify your tasks - but as an entity responsible for creating blocks on a financial platform, we expect some basic pre-requisite skill sets - at professional level - before entering the portal:

      Everyone is welcome to contribute to the repository (via documentation, testing, code, videos, etc). Our aim is to work together and reduce confusion rather than hosting 100 versions of documentation - each marketing their pool in a way.

      "},{"location":"#support","title":"Support","text":"

      The Telegram Support channel is used to announce new releases and changes to the code base. This is also the place to ask general questions regarding the documentation and scripts on this site.

      To report bugs and issues with scripts and documentation please open a GitHub Issue. Feature requests are best opened as a discussion thread.

      "},{"location":"#getting-started","title":"Getting Started","text":"

      Use the sidebar to navigate through the topics. Note that the instructions assume the folder structure as per here.

      Again, Feedback/Contribution and ownership of tasks is always welcome. If you're interested in collaborating regularly, make a start - and you should be part of the guild already .

      "},{"location":"basics/","title":"Basics","text":""},{"location":"basics/#architecture","title":"Architecture","text":"

      The architecture for various components are already described at docs.cardano.org by CF/IOHK. We will not reinvent the wheel

      "},{"location":"basics/#manual-software-pre-requirements","title":"Manual Software Pre-Requirements","text":"

      While we do not intend to hand out step-by-step instructions, the tools are often misused as a shortcut to avoid ensuring base skillsets mentioned on home page. Some of the common gotchas that we often find SPOs to miss out on:

      "},{"location":"basics/#pre-requisites","title":"Pre-Requisites","text":"

      Reminder !!

      You're expected to run the commands below from same session, using same working directories as indicated and using a non-root user with sudo access. You are expected to be familiar with this as part of pre-requisite skill sets for stake pool operators.

      "},{"location":"basics/#os-prereqs","title":"Set up OS packages, folder structure and fetch files from repo","text":"

      The pre-requisites for Linux systems are automated to be executed as a single script. To download the pre-requisites scripts, execute the below:

      mkdir \"$HOME/tmp\";cd \"$HOME/tmp\"\n# Install curl\n# CentOS / RedHat - sudo dnf -y install curl\n# Ubuntu / Debian - sudo apt -y install curl\ncurl -sS -o guild-deploy.sh https://raw.githubusercontent.com/cardano-community/guild-operators/master/scripts/cnode-helper-scripts/guild-deploy.sh\nchmod 755 guild-deploy.sh\n

      Please familiarise with the syntax of guild-deploy.sh before proceeding. The usage syntax can be checked using ./guild-deploy.sh -h , sample output below:

      Usage: guild-deploy.sh [-n <mainnet|preprod|guild|preview>] [-p path] [-t <name>] [-b <branch>] [-u] [-s [p][b][l][f][d][c][o][w][x]]\nSet up dependencies for building/using common tools across cardano ecosystem.\nThe script will always update dynamic content from existing scripts retaining existing user variables\n\n-n    Connect to specified network instead of mainnet network (Default: connect to cardano mainnet network) eg: -n guild\n-p    Parent folder path underneath which the top-level folder will be created (Default: /opt/cardano)\n-t    Alternate name for top level folder - only alpha-numeric chars allowed (Default: cnode)\n-b    Use alternate branch of scripts to download - only recommended for testing/development (Default: master)\n-u    Skip update check for script itself\n-s    Selective Install, only deploy specific components as below:\n  p   Install common pre-requisite OS-level Dependencies for most tools on this repo (Default: skip)\nb   Install OS level dependencies for tools required while building cardano-node/cardano-db-sync components (Default: skip)\nl   Build and Install libsodium fork from IO repositories (Default: skip)\nm   Download latest (released) binaries for mithril-signer, mithril-client (Default: skip)\nf   Force overwrite entire content of scripts and config files (backups of existing ones will be created) (Default: skip)\nd   Download latest (released) binaries for bech32, cardano-address, cardano-node, cardano-cli, cardano-db-sync and cardano-submit-api binaries (Default: skip)\nc   Install/Upgrade CNCLI binary (Default: skip) # (1)!\no   Install/Upgrade Ogmios Server binary (Default: skip)\nw   Install/Upgrade Cardano Hardware CLI (Default: skip)\nx   Install/Upgrade Cardano Signer binary (Default: skip)\n
      1. If you receive an error for glibc, it would likely be due to the build mismatch between pre-compiled binary and your OS, which is not uncommon. You may need to compile cncli manually on your OS as per instructions here - make sure to copy the output binary to \"${HOME}/.local/bin\" folder.

      This script uses opt-in election of what you'd like the script to do (as against previous version that used to try and auto-detect versions). The defaults without any arguments will only update static part of script contents for you. A typical example install to install most components but not overwrite static part of existing files for preview network would be:

      ./guild-deploy.sh -b master -n preview -t cnode -s pdlcowx\n. \"${HOME}/.bashrc\"\n

      If instead of download, you'd want to build the components yourself, you could use:

      ./guild-deploy.sh -b master -n preview -t cnode -s pblcowx\n. \"${HOME}/.bashrc\"\n

      Lastly, if you'd want to update your scripts but not install any additional dependencies, you may simply run:

      ./guild-deploy.sh -b master -n preview -t cnode\n
      "},{"location":"basics/#folder-structure","title":"Folder structure","text":"

      Running the script above will create the folder structure as per below, for your reference. You can replace the top level folder /opt/cardano/cnode by editing the value of CNODE_HOME in ~/.bashrc and $CNODE_HOME/files/env files:

      /opt/cardano/cnode            # Top-Level Folder\n\u251c\u2500\u2500 ...\n\u251c\u2500\u2500 files                     # Config, genesis and topology files\n\u2502   \u251c\u2500\u2500 ...\n\u2502   \u251c\u2500\u2500 byron-genesis.json    # Byron Genesis file referenced in config.json\n\u2502   \u251c\u2500\u2500 shelley-genesis.json  # Genesis file referenced in config.json\n\u2502   \u251c\u2500\u2500 alonzo-genesis.json    # Alonzo Genesis file referenced in config.json\n\u2502   \u251c\u2500\u2500 config.json           # Config file used by cardano-node\n\u2502   \u2514\u2500\u2500 topology.json         # Map of chain for cardano-node to boot from\n\u251c\u2500\u2500 db                        # DB Store for cardano-node\n\u251c\u2500\u2500 guild-db                  # DB Store for guild-specific tools and additions (eg: cncli, cardano-db-sync's schema)\n\u251c\u2500\u2500 logs                      # Logs for cardano-node\n\u251c\u2500\u2500 priv                      # Folder to store your keys (permission: 600)\n\u251c\u2500\u2500 scripts                   # Scripts to start and interact with cardano-node\n\u2514\u2500\u2500 sockets                   # Socket files created by cardano-node\n
      "},{"location":"build/","title":"Overview","text":"

      The documentation here uses instructions from Intersect MBO repositories as foundation, with additional info which we can contribute to where appropriate. Note that not everyone needs to build each component. You can refer to architecture to understand and qualify which of the components built by IO you want to run.

      "},{"location":"build/#components","title":"Components","text":"

      For most Pool Operators, simply building cardano-node should be enough. Use the below to decide whether you need other components:

      graph TB A([Interact with HD Walletslocally]) B([Explore blockchainlocally]) C([Easy pool-ops andfund management]) D([Create Custom Assets]) E([Monitor node using Terminal UI]) F([Sign/verify any datausing crypto keys]) N(Node) O(Ogmios) P(gRest/Koios) Q(DBSync) R(Wallet) S(CNTools) T(Tx Submit API) U(GraphQL) V(OfflineMetadataTools) X(gLiveView) Y(cardano-signer) Z[(PostgreSQL)] N --x C --x S N --x D --x S & V N --x E --x X N --x B B --x U --x Q B --x P --x Q P --x O P --x T F ---x Y N --x A --x R Q --x Z

      Important

      We strongly prefer use of gRest over GraphQL components due to performance, security, simplicity, control and most importantly - consistency benefits. Please refer to official documentations if you're interested in GraphQL or Cardano-Rest components instead.

      Note

      The instructions are intentionally limited to stack/cabal** to avoid wait times/availability of nix/docker files on a rapidly developing codebase - this also helps us prevent managing multiple versions of instructions.

      "},{"location":"build/#description-for-components-built-by-community","title":"Description for components built by community","text":""},{"location":"build/#cntools","title":"CNTools","text":"

      A swiss army knife for pool operators, primarily built by Ola, to simplify typical operations regarding their wallet keys and pool management. You can read more about it here

      "},{"location":"build/#gliveview","title":"gLiveView","text":"

      A local node monitoring tool, primarily built by Ola, to use in addition to remote monitoring tools like Prometheus/Grafana, Zabbix or IOG's RTView. This is especially useful when moving to a systemd deployment - if you haven't done so already - as it offers an intuitive UI to monitor the node status. You can read more about it here

      "},{"location":"build/#topology-updater","title":"Topology Updater","text":"

      A temporary node-to-node discovery solution, run by Markus, that was started initially to bridge the gap created while awaiting completion of P2P on cardano network, but has since become an important lifeline to the network health - to allow everyone to activate their relay nodes without having to postpone and wait for manual topology completion requests. You can read more about it here

      "},{"location":"build/#koiosgrest","title":"Koios/gRest","text":"

      A full-featured local query layer node to explore blockchain data (via dbsync) using standardised pre-built queries served via API as per standard from Koios - for which user can opt to participate in elastic query layer. You can read more about build steps here and reference API endpoints here

      "},{"location":"build/#ogmios","title":"Ogmios","text":"

      A lightweight bridge interface for cardano-node. It offers a WebSockets API that enables local clients to speak Ouroboros' mini-protocols via JSON/RPC. You can read more about it here

      "},{"location":"build/#cncli","title":"CNCLI","text":"

      A CLI tool written in Rust by Andrew Westberg for low-level communication with cardano-node. It is commonly used by SPOs to check their leader logs (integrates with CNTools as well as gLiveView) or to send their pool's health information to https://pooltool.io. You can read more about it here

      "},{"location":"build/#cardano-signer","title":"Cardano Signer","text":"

      A tool written by Martin to sign/verify data (hex, text or binary) using cryptographic keys to generate data as per CIP-8 or CIP-36 standards. You can read more about it here

      "},{"location":"catalystf11/","title":"Catalystf11","text":""},{"location":"catalystf11/#marlowehub-unifying-platform-for-marlowe-smart-contracts-phase-1-smart-contracts","title":"MarloweHub: Unifying Platform for Marlowe Smart Contracts - Phase 1 - Smart Contracts","text":"

      Category: Concept Applicant: mike (pooltool.io) Requested funds: \u20b3100,000.00 Links: Ideascale, Lidonation

      "},{"location":"catalystf11/#adastat-cardano-explorer-open-source-improved-reboot-towards-a-first-class-community-blockchain-explorer","title":"AdaStat Cardano Explorer - Open Source Improved Reboot towards a first-class community blockchain explorer","text":"

      Category: Product Applicant: Dmytro Stashenko (adastat.net) Requested funds: \u20b3180,300.00 Links: Ideascale, Lidonation

      "},{"location":"catalystf11/#adahold-decentralized-price-can-only-go-up-token-solution-for-true-ada-hodlers-smart-contract","title":"AdaHold: Decentralized Price-Can-Only-Go-Up Token, Solution For TRUE Ada Hodlers - Smart Contract","text":"

      Category: Concept Applicant: Dmytro Stashenko (adastat.net) Requested funds: \u20b399,700.00 Links: Ideascale, Lidonation

      "},{"location":"catalystf11/#sundae-labs-next-gen-uplc-debugger-with-aiken-integration","title":"Sundae Labs Next-Gen UPLC Debugger with Aiken Integration","text":"

      Category: Developers Applicant: Dan Gonzalez (sundae.fi) Requested funds: \u20b3140,000.00 Links: Ideascale, Lidonation

      "},{"location":"catalystf11/#margin-pool-enhanced-liquidity-for-margin-trading-on-sundaeswap","title":"Margin Pool: Enhanced Liquidity for Margin Trading on SundaeSwap","text":"

      Category: Solution Applicant: Dan Gonzalez (sundae.fi) Requested funds: \u20b3300,000.00 Links: Ideascale, Lidonation

      "},{"location":"catalystf11/#regulated-and-permissioned-defi-with-sundae-and-kora-labs","title":"Regulated and Permissioned DeFi with Sundae and Kora Labs","text":"

      Category: Concept Applicant: Dan Gonzalez (sundae.fi) Requested funds: \u20b3100,000.00 Links: Ideascale, Lidonation

      "},{"location":"catalystf11/#sundae-labs-comprehensive-specification-development-for-gummiworm-protocol-on-cardano","title":"Sundae Labs Comprehensive Specification Development for Gummiworm Protocol on Cardano","text":"

      Category: Concept Applicant: Dan Gonzalez (sundae.fi) Requested funds: \u20b3100,000.00 Links: Ideascale, Lidonation

      "},{"location":"catalystf11/#nftcdnio-universal-nft-viewer-api-musicvideoweb3d","title":"[nftcdn.io] Universal NFT Viewer API (Music+Video+Web+3D+\u2026)","text":"

      Category: Product Applicant: Smaug (pool.pm) Requested funds: \u20b3299,999.00 Links: Ideascale, Lidonation

      "},{"location":"catalystf11/#nftcdnio-nsfw-nft-detection-for-marketplaces-wallets-explorers","title":"[nftcdn.io] NSFW NFT Detection for Marketplaces, Wallets & Explorers","text":"

      Category: Product Applicant: Smaug (pool.pm) Requested funds: \u20b3149,999.00 Links: Ideascale, Lidonation

      "},{"location":"catalystf11/#cardanoscan-analytics-charts","title":"Cardanoscan Analytics Charts","text":"

      Category: Product Applicant: Strica (cardanoscan.io) Requested funds: \u20b344,000.00 Links: Ideascale, Lidonation

      "},{"location":"catalystf11/#cardanoscan-api-javascript-sdk","title":"Cardanoscan API Javascript SDK","text":"

      Category: Developers Applicant: Strica (cardanoscan.io) Requested funds: \u20b364,000.00 Links: Ideascale, Lidonation

      "},{"location":"catalystf11/#integrate-keystone-hardware-wallet-into-typhon","title":"Integrate Keystone Hardware Wallet into Typhon","text":"

      Category: Product Applicant: Strica (cardanoscan.io) Requested funds: \u20b384,000.00 Links: Ideascale, Lidonation

      "},{"location":"catalystf11/#add-support-for-marlowe-on-cardanoscan","title":"Add support for Marlowe on Cardanoscan","text":"

      Category: Product Applicant: Strica (cardanoscan.io) Requested funds: \u20b3133,000.00 Links: Ideascale, Lidonation

      "},{"location":"catalystf11/#cardanoscan-data-info-bubbles","title":"Cardanoscan data info bubbles","text":"

      Category: Ecosystem Applicant: Strica (cardanoscan.io) Requested funds: \u20b335,000.00 Links: Ideascale, Lidonation

      "},{"location":"catalystf11/#create-a-cnt-marketplace-on-norwegian-block-exchange-nbxcom","title":"Create a CNT marketplace on Norwegian Block Exchange (NBX.COM)","text":"

      Category: Product Applicant: Eystein Hansen (nbx.com) Requested funds: \u20b3145,000.00 Links: Ideascale, Lidonation

      "},{"location":"contributors/","title":"Contributors","text":"

      Everyone is welcome to contribute to the guide, as well as the repository. Below is just a thank you to people who have been contributing consistently:

      Adam Chris Damjan Homer Markus OCG Ola Ahlman Pal Dorogi Papacarp PegasusPool Psychomb RdLrT RedOracle SmaugPool

      To start contributing, simply hit the github repository and raise Issue/Pull Request

      "},{"location":"grest-meets/","title":"GRest Meeting summaries","text":"

      Thank you all for joining and contributing to the project

      Below you can find a short summary of every GRest meeting held, both for logging purposes and for those who were not able to attend.

      "},{"location":"grest-meets/#participants","title":"Participants:","text":"Participant 16Sep2021 02Sep2021 26Aug2021 19Aug2021 12Aug2021 29Jul2021 22Jul2021 15Jul2021 09Jul2021 02Jul2021 25Jun2021 Damjan Homer Markus Ola RdLrT Red Papacarp Paddy GimbaLabs 16Sep2021 02Sep2021 26Aug2021 19Aug2021 12Aug2021 29Jul2021 22Jul2021 15Jul2021 09Jul2021 02Jul2021

      After the initial stand-up updates from participants, we went through the entire Trello board, updating/deleting existing tickets and creating some new ones.

      25Jun2021"},{"location":"grest-meets/#scheduling-running-update-queries","title":"Scheduling running update queries","text":""},{"location":"grest-meets/#refactor-of-queries","title":"Refactor of queries","text":""},{"location":"grest-meets/#postgres-tuning","title":"postgres tuning","text":""},{"location":"grest-meets/#updates","title":"Updates","text":""},{"location":"grest-meets/#queries","title":"Queries","text":""},{"location":"grest-meets/#problems","title":"Problems","text":""},{"location":"grest-meets/#actions","title":"Actions","text":""},{"location":"grest-meets/#queries_1","title":"Queries","text":""},{"location":"grest-meets/#transaction-submission-feature","title":"Transaction submission feature","text":""},{"location":"grest-meets/#db-replication-presentation-by-redoracle","title":"DB replication presentation by Redoracle","text":""},{"location":"grest-meets/#process-for-upgrading-our-instances","title":"Process for upgrading our instances:","text":""},{"location":"grest-meets/#queries_2","title":"Queries:","text":""},{"location":"grest-meets/#stake-distribution","title":"Stake distribution","text":""},{"location":"grest-meets/#tx-history","title":"Tx History","text":""},{"location":"grest-meets/#problems_1","title":"PROBLEMS","text":""},{"location":"grest-meets/#actions_1","title":"ACTIONS","text":""},{"location":"grest-meets/#problems_2","title":"PROBLEMS","text":""},{"location":"grest-meets/#actions_2","title":"ACTIONS","text":""},{"location":"grest-meets/#problems_3","title":"PROBLEMS","text":""},{"location":"grest-meets/#actions_3","title":"ACTIONS","text":"
      1. Team

        • catch live stake distributions in a separate table (in our grest schema)
          • these queries can run on a schedule
          • response comes from the instance with the latest data
        • other approaches:
          • possibly distribute pools between instances (complex approach)
          • run full query once and only check for new/leaving delegators (probably impossible because of existing delegator UTXO movements)
        • implement monitoring of execution times for all the queries
        • come up with a timeline for launch (next call)
        • stress test before launch
        • start building queries listed on Trello board
      2. Individual

        • sync db-sync instances to commit 84226d33eed66be8e61d50b7e1dacebdc095cee9 on release/10.1.x
        • update setups to reflect recent directory restructuring and updated instructions
      "},{"location":"grest-meets/#introduction-for-new-joiner-paddy","title":"Introduction for new joiner - Paddy","text":""},{"location":"grest-meets/#problems_4","title":"Problems","text":""},{"location":"grest-meets/#action-items","title":"Action Items","text":""},{"location":"grest-meets/#deployment-scripts","title":"Deployment scripts","text":"

      Ola added automatic deployment of services to the scripts last week. We added new tasks on Trello ticket, including flags for multiple networks (guild, testnet, mainnet), haproxy service dynamically creating hosts and doc updates. Overall, the script works well with some manual interaction still required at the moment.

      "},{"location":"grest-meets/#supported-networks","title":"Supported Networks","text":"

      Just for the record here, a 16GB (or even 8GB) instance is enough to support both testnet and guild networks.

      "},{"location":"grest-meets/#db-sync-versioning","title":"db-sync versioning","text":"

      We agreed to use the release/10.1.x branch which is not yet released but built to include Alonzo migrations to avoid rework later. This version does require Alonzo config and hash to be in the node's config.json. This has to be done manually and the files are available here. Once fully released, all members should rebuild the released version to ensure each instance is running the same code.

      "},{"location":"grest-meets/#dns-naming","title":"DNS naming","text":"

      For the DNS setup ticket, we started to think about the instance names for the 2 DNS instances (orange in the graph). Submissions for names will be made in the Telegram group, and will probably make a poll once we have the entries finalised.

      "},{"location":"grest-meets/#monitoring-system","title":"Monitoring System","text":"

      Priyank started setting up the monitoring on his instance which can then easily be switched to a separate monitoring instance. We agreed to use Prometheus / Grafana combo for data source / visualisation. We'll probably need to create some custom archiving of data to keep it long term as Prometheus stores only the last 30 days of data.

      "},{"location":"grest-meets/#next-meeting","title":"Next meeting","text":"

      We would like to make Friday @ 07:00 UTC the standard time and keep meetings at weekly frequency. A poll will still be created for next weeks, but if there are no objections / requests for switching the time around (which we have not had so far) we can go ahead with the making Friday the standard with polls no longer required and only reminders / Google invites sent every week.

      "},{"location":"grest-meets/#deployment-scripts_1","title":"Deployment scripts","text":"

      During the last week, work has been done on deployment scripts for all services (db-sync, gRest and haproxy) -> this is now in testing with updated instructions on trello. Everybody can put their name down on the ticket to signify when the setup is complete and note down any comments for bugs/improvements. This is the main priority at the moment as it would allow us to start transferring our setups to mainnet.

      "},{"location":"grest-meets/#switch-to-mainnet","title":"Switch to Mainnet","text":"

      Following on from that, we created a ticket for starting to set up mainnet instances -> we can use 32GB RAM to start and increase later. While making sure everything works against the guild network is priority, people are free to start on this as well as we anticipate we are almost ready for the switch.

      "},{"location":"grest-meets/#supported-networks_1","title":"Supported Networks","text":"

      This brings me to another discussion point which is on which networks are to be supported. After some discussion, it was agreed to keep beefy servers for mainnet, and have small independent instances for testnet maintained by those interested, while guild instance is pretty lightweight and useful to keep.

      "},{"location":"grest-meets/#monitoring-system_1","title":"Monitoring System","text":"

      The ticket for creating a centralised monitoring system was discussed and updated. I would say it would be good to have at least a basic version of the system in place around the time we switch to mainnet. The system could eventually serve for: - analysis of instance - performances and subsequent tuning - endpoints usage - anticipation of system requirements increases - etc.

      I would say that this should be an important topic of the next meeting to come up with an approach on how we will structure this system so that we can start building it in time for mainnet switch.

      "},{"location":"grest-meets/#handling-ssl","title":"Handling SSL","text":"

      Enabling SSL was agreed to not be required by each instance, but is optional and documentation should be created for how to automate the process of renewing SSL certificates for those wishing to add it to their instance. The end user facing endpoints \"Instance Checker\" will of course be SSL-enabled.

      "},{"location":"grest-meets/#next-meeting_1","title":"Next meeting","text":"

      We somewhat agreed to another meeting next week again at the same time, but some participants aren't 100% for availability. Friday at 07:00 UTC might be a good standard time we hold on to, but I will make a poll like last time so that we can get more info before confirming the meeting.

      "},{"location":"grest-meets/#meeting-structure","title":"Meeting Structure","text":"

      As this was the first meeting, at the start we discussed about the meeting structure. In general, we agreed to something like listed below, but this can definitely change in the future:

      1) 2-liner (60s) round the table stand-ups by everyone to sync up on what they were doing / are planning to do / mention struggles etc. This itself often sparks discussions. 2) going through the Trello board tasks with the intention of discussing and possbily assigning them to individuals / smaller groups (maybe 1-2-3 people choose to work together on a single task)

      "},{"location":"grest-meets/#stand-ups","title":"Stand-ups","text":"

      We then proceeded to give a status of where we are individually in terms of what's been done, a summary below:

      "},{"location":"grest-meets/#main-discussion-points","title":"Main discussion points","text":"
      1. Directory structure on the repo -> General agreement is to have anything related to db-sync/postgREST separated from the current cnode-helper-scripts directory. We can finalise the end locations of files a bit later, for now intent should be to simply add them all to /files/dbsync folder. prereqs.sh addendum can be done once artifacts are finalised (added a Trello ticket for tracking).
      2. DNS/haproxy configurations: We have two options: a. controlled approach for endpoints - wherein there is a layer of haproxy that will load balance and ensure tip being in sync for individual providers (individuals can provide haproxy OR gRest instances). b. completely decentralised - each client to maintain haproxy endpoint, and fails over to other node if its not up to recent tip. I think that in general, it was agreed to use a hybrid approach. Details are captured in diagram here. DNS endpoint can be reserved post initial testing of haproxy-agent against mainnet nodes.
      3. Internal monitoring system This would be important and useful and has not been mentioned before this meeting (as far as I know). Basically, a system for monitoring all of our instances together and also handling alerts. Not only for ensuring good quality of service, but also for logging and inspection of short- and long-term trends to better understand what's happening. A ticket is added to trello board
      "},{"location":"grest-meets/#next-meeting_2","title":"Next meeting","text":"

      All in all, I think we saw that there is need for these meetings as there are a lot of things to discuss and new ideas come up (like the monitoring system). We went for over an hour (~1h15min) and still didn't have enough time to go through the board, we basically only touched the DNS/haproxy part of the board. This tells me that we are in a stage where more frequent meetings are required, weekly instead of biweekly, as we are in the initial stage and it's important to build things right from the start rather than having to refactor later on. With that, the participants in general agreed to another meeting next week, but this will be confirmed in the TG chat and the times can be discussed then.

      "},{"location":"sidebar/","title":"Tree","text":""},{"location":"upgrade/","title":"Upgrade","text":"One-Time major upgrade for Koios Scripts from 20-Jan-2023 (expand for details)

      The scripts on guild-operators repository have gone through quite a few changes to accomodate for the below:

      Some of the above required us to add breaking changes to some scripts, but hopefully the above explains the premise for those changes. To ease this one-time upgrade process for existing deployments, we have tried to come up with the guide below, feel free to edit this file to improve the documents based on your experience. Again, apologies in advance to those who do not agree with the above changes (the old code would ofcourse remain unimpacted at tag legacy-scripts, so if you'd like to stick to old scripts , you can use -b legacy-scripts for your tools to switch back).

      "},{"location":"upgrade/#steps-for-ugrading","title":"Steps for Ugrading","text":"

      Warning

      Make sure you go through upgrade steps for your setup in a non-mainnet environment first!

      Remember

      Please add any environment-specific parameters (eg: custom top level folder, network flag, etc) to the execution command below, similar to prereqs.sh (check new syntax using guild-deploy.sh -h)

      mkdir \"$HOME/tmp\";cd \"$HOME/tmp\"\ncurl -sS -o guild-deploy.sh https://raw.githubusercontent.com/cardano-community/guild-operators/master/scripts/cnode-helper-scripts/guild-deploy.sh\nchmod 700 guild-deploy.sh\n./guild-deploy.sh -s f -b master\n
      source \"${HOME}\"/.bashrc\necho \"${PATH}\"\n

      You can move the binaries by using mv command (for example, if you dont have any other files in these folders, you can use the command below:

      Note

      Ideally, you should shutdown services (eg: cnode, cnode-dbsync, etc) prior to running the below to ensure they run from new location (you can also re-deploy them if you haven't done so in a while, eg: ./cnode.sh -d). At the end of the guide, you can start them back up.

      mv -t \"${HOME}\"/.local/bin/ \"${HOME}\"/.cabal/bin/* \"${HOME}\"/.cargo/bin/* \"${HOME}\"/bin/*\n
      whereis bech32 cardano-address cardano-cli cardano-db-sync cardano-hw-cli cardano-node cardano-submit-api cncli ogmios\n

      The above might result in some lines having more than one entry (eg: you might have cardano-cli in \"${HOME}\"/.cabal/bin and \"${HOME}\"/.local/bin) - for which you'd want to delete the reference(s) not in \"${HOME}\"/.local/bin , while for other cases - you might have no values (eg: you may not use cardano-db-sync, cncli, ogmios and/or cardano-hw-cli. You need not take any actions for the binaries you do not use.

      "},{"location":"upgrade/#supportimprovements","title":"Support/Improvements","text":"

      Hope the guide above helps you with the migration, but again - we could've missed some edge cases. If so, please report via chat in Koios Discussions channel only. Please DO NOT make edits to the script content based on forum/alternate guide/channels, while done with best intentions - there have been solutions put online that modify files unnecessarily instead of correcting configs and disabling updates, such actions will only cause trouble for future updates.

      "},{"location":"Appendix/RecoverByronWallet/","title":"Unofficial Instructions for recovering your Byron Era funds on the new Incentivized Shelley Testnet","text":""},{"location":"Appendix/RecoverByronWallet/#1-grab-and-install-haskell","title":"1. Grab and install Haskell","text":"
      curl -sSL https://get.haskellstack.org/ | sh\n
      "},{"location":"Appendix/RecoverByronWallet/#2-get-the-wallet","title":"2. Get the wallet","text":"

      note: you must build from source as of today as there are changes that just got into master you need

      git clone https://github.com/cardano-foundation/cardano-wallet.git\n

      "},{"location":"Appendix/RecoverByronWallet/#3-go-into-the-wallet-directory","title":"3. Go into the wallet directory","text":"
      cd cardano-wallet\n
      "},{"location":"Appendix/RecoverByronWallet/#4-build-the-wallet","title":"4. Build the wallet","text":"

      stack build --test --no-run-tests\n
      If it fails there are a few reasons we have found: - The cardano build instructions reference a few things that may be missing. Check those. - or maybe one of these would help:

      "},{"location":"Appendix/RecoverByronWallet/#libssl","title":"Libssl:","text":"
      sudo apt install libssl-dev\n
      "},{"location":"Appendix/RecoverByronWallet/#sqlite","title":"Sqlite :","text":"
      sudo apt-get install sqlite3 libsqlite3-dev \n
      "},{"location":"Appendix/RecoverByronWallet/#gmp","title":"gmp:","text":"
      sudo apt-get install libgmp3-dev \n
      "},{"location":"Appendix/RecoverByronWallet/#systemd-dev","title":"systemd dev:","text":"
      sudo apt install libsystemd-dev\n

      get coffee... It takes awhile

      "},{"location":"Appendix/RecoverByronWallet/#5-when-its-done-install-executables-to-your-path","title":"5. When its done, install executables to your path","text":"
      stack install\n
      "},{"location":"Appendix/RecoverByronWallet/#6-test-to-make-sure-cardano-wallet-jormungandr-works-fine","title":"6. Test to make sure cardano-wallet-jormungandr works fine.","text":"

      Generate your new mnemonics you will need below. Note that this generates 15 words as opposed to your byron era mnemnomics which were only 12 words.

      cardano-wallet-jormungandr mnemonic generate\n
      "},{"location":"Appendix/RecoverByronWallet/#7-launch-the-wallet-as-a-service","title":"7. Launch the wallet as a service.","text":"

      you can either open another terminal window or use screen or something. anyway, wherever you run this next command you won't be able to use anymore for a terminal until you stop the wallet

      change --node-port 3001 to wherever you have your jormungandr rest interface running. for me it was 5001.. so

      change --port 3002 to wherever you want to access the wallet interface at. If you have other things running avoid those ports. for most, 3002 should be free

      just to future proof these instructions. genesis should be whatever genesis you are on.

      cardano-wallet-jormungandr serve --node-port 3001 --port 3002 --genesis-block-hash e03547a7effaf05021b40dd762d5c4cf944b991144f1ad507ef792ae54603197\n
      "},{"location":"Appendix/RecoverByronWallet/#8-restore-your-byron-wallet","title":"8. Restore your byron wallet:","text":"

      --->in another window

      replace foo, foo, foo with all your mnemnomics from the byron wallet you are restoring

      Also, if you put your wallet on a different port than 3002, fix that too

      curl -X POST -H \"Content-Type: application/json\" -d '{ \"name\": \"legacy_wallet\", \"mnemonic_sentence\": [\"foo\",\"foo\",\"foo\",\"foo\",\"foo\",\"foo\",\"foo\",\"foo\",\"foo\",\"foo\",\"foo\",\"foo\"], \"passphrase\": \"areallylongpassword\"}' http://localhost:3002/v2/byron-wallets\n
      Thats going to spit out some information about a wallet it creates, you should see the value of your wallet - hopefully its not zero. And you need the wallet ID for the next step

      "},{"location":"Appendix/RecoverByronWallet/#9-create-your-shelley-wallet","title":"9. Create your shelley wallet:","text":"

      Remember all those mnemnomics you made above.. put them here instead of all the foo's.

      curl -X POST -H \"Content-Type: application/json\" -d '{ \"name\": \"pool_wallet\", \"mnemonic_sentence\": [\"foo\",\"foo\",\"foo\",\"foo\",\"foo\",\"foo\",\"foo\",\"foo\",\"foo\",\"foo\",\"foo\",\"foo\",\"foo\",\"foo\",\"foo\"], \"passphrase\": \"areallylongpasswordagain\"}' http://localhost:3002/v2/wallets\n
      Important thing to get is the wallet id from this command

      "},{"location":"Appendix/RecoverByronWallet/#10-migrate-your-funds","title":"10. Migrate your funds","text":"

      Now you are ready to migrate your wallet. replace the <old wallet id> and <new wallet id> with the values you got above

      curl -X POST -H \"Content-Type: application/json\" -d '{\"passphrase\": \"areallylongpassword\"}' http://localhost:3002/v2/byron-wallets/<old wallet id>/migrations/<new wallet id>\n
      "},{"location":"Appendix/RecoverByronWallet/#11-congratulations-your-funds-are-now-in-your-new-wallet","title":"11. Congratulations. your funds are now in your new wallet.","text":"

      From here we recommend you send them to a new address entirely owned and created by jcli or whatever method you have been using for the testnet process.

      This technically may not be required. But a lot of us did it and we know it works for setting up pools and stuff.

      send a small amount first just to make sure you are in control of the transaction and don't send your funds to la la land.

      If you want to send to another address use the command below, but replace the address that you want to send it to, the amount, and your <new wallet id>

      curl -X POST -H \"Content-Type: application/json\" -d '{\"payments\": [ { \"address\": \"<address to send to>\"\", \"amount\": { \"quantity\": 83333330000000, \"unit\": \"lovelace\" } } ], \"passphrase\": \"areallylongpasswordagain\"}' http://localhost:3002/v2/wallets/<new wallet id>/transactions\n

      "},{"location":"Appendix/monitoring/","title":"Monitoring","text":"

      Ensure the Pre-Requisites are in place before you proceed.

      This is an easy-to-use script to automate setting up of monitoring tools. Tasks automates the following tasks: - Installs Prometheus, Node Exporter and Grafana Servers for your respective Linux architecture. - Configure Prometheus to connect to cardano node and node exporter jobs. - Provisions the installed prometheus server to be automatically available as data source in Grafana. - Provisions two of the common grafana dashboards used to monitor cardano-node by SkyLight and IOHK to be readily consumed from Grafana. - Deploy prometheus,node_exporter and grafana-server as systemd service on Linux. - Start and enable those services.

      Note that securing prometheus/grafana servers via TLS encryption and other security best practices are out of scope for this document, and its mainly aimed to help you get started with monitoring without much fuss.

      !> Ensure that you've opened the firewall port for grafana server (default used in this script is 5000)

      "},{"location":"Appendix/monitoring/#download-setup_monsh","title":"Download setup_mon.sh","text":"

      If you have run guild-deploy.sh, you can skip this step. To download monitoring script, you can execute the commands below:

      cd $CNODE_HOME/scripts\nwget https://raw.githubusercontent.com/cardano-community/guild-operators/master/scripts/cnode-helper-scripts/setup_mon.sh\nchmod 750 setup_mon.sh\n

      "},{"location":"Appendix/monitoring/#customise-any-environment-variables","title":"Customise any Environment Variables","text":"

      The default selection may not always be usable for everyone. You can customise further environment variable settings by opening in editor (eg: vi setup_mon.sh ), and updating variables below to your liking:

      #!/usr/bin/env bash\n# shellcheck disable=SC2209,SC2164\n\n######################################################################\n#### Environment Variables\n######################################################################\nCNODE_IP=127.0.0.1\nCNODE_PORT=12798\nGRAFANA_HOST=0.0.0.0\nGRAFANA_PORT=5000\nPROJ_PATH=/opt/cardano/monitoring\nPROM_HOST=127.0.0.1\nPROM_PORT=9090\nNEXP_PORT=$(( PROM_PORT + 1 ))\n````\n\n#### Set up Monitoring\n\nExecute setup_mon.sh with full path to destination folder you want to setup monitoring in. If you're following guild folder structure, you do not need to specify `-d`. Read the usage comments below before you run the actual script.\n\nNote that to deploy services as systemd, the script expect sudo access is available to the user running the script.\n\n``` bash\ncd $CNODE_HOME/scripts\n# To check Usage parameters:\n# ./setup_mon.sh -h\n#Usage: setup_mon.sh [-d directory] [-h hostname] [-p port]\n#Setup monitoring using Prometheus and Grafana for Cardano Node\n#-d directory      Directory where you'd like to deploy the packages for prometheus , node exporter and grafana\n#-i IP/hostname    IPv4 address or a FQDN/DNS name where your cardano-node (relay) is running (check for hasPrometheus in config.json; eg: 127.0.0.1 if same machine as cardano-node)\n#-p port           Port at which your cardano-node is exporting stats (check for hasPrometheus in config.json; eg: 12798)\n./setup_mon.sh\n# \n# Downloading prometheus v2.18.1...\n# Downloading grafana v7.0.0...\n# Downloading exporter v0.18.1...\n# Downloading grafana dashboard(s)...\n#   - SKYLight Monitoring Dashboard\n#   - IOHK Monitoring Dashboard\n# \n# NOTE: Could not create directory as rdlrt, attempting sudo ..\n# NOTE: No worries, sudo worked !! Moving on ..\n# Configuring components\n# Registering Prometheus as datasource in Grafana..\n# Creating service files as root..\n# \n# =====================================================\n# Installation is completed\n# =====================================================\n# \n# - Prometheus (default): http://127.0.0.1:9090/metrics\n#     Node metrics:       http://127.0.0.1:12798\n#     Node exp metrics:   http://127.0.0.1:9091\n# - Grafana (default):    http://0.0.0.0:5000\n# \n# \n# You need to do the following to configure grafana:\n# 0. The services should already be started, verify if you can login to grafana, and prometheus. If using 127.0.0.1 as IP, you can check via curl\n# 1. Login to grafana as admin/admin (http://0.0.0.0:5000)\n# 2. Add \"prometheus\" (all lowercase) datasource (http://127.0.0.1:9090)\n# 3. Create a new dashboard by importing dashboards (left plus sign).\n#   - Sometimes, the individual panel's \"prometheus\" datasource needs to be refreshed.\n# \n# Enjoy...\n# \n# Cleaning up...\n
      "},{"location":"Appendix/monitoring/#view-dashboards","title":"View Dashboards","text":"

      You should now be able to Login to grafana dashboard, using the public IP of your server, at port 5000. The initial credentials to login would be admin/admin, and you will be asked to update your password upon first login. Once logged on, you should be able to go to Manage > Dashboards and select the dashboard you'd like to view. Note that if you've just started the server, you might see graphs as empty, as initial interval for dashboards is 12 hours. You can change it to 5 minutes by looking at top right section of the page.

      Thanks to Pal Dorogi for the original setup instructions used for modifying.

      "},{"location":"Appendix/postgres/","title":"Sample Postgres Setup","text":"

      These deployment instructions used for reference while building cardano-db-sync tool, with the scope being ease of set up, and some tuning baselines for those who are new to Postgres DB. It is recommended to customise these as per your needs for Production builds.

      Important

      You'd find it pretty useful to set up ZFS on your system prior to setting up Postgres, to help with your IOPs throughput requirements. You can find sample install instructions here. You can set up your entire root mount to be on ZFS, or you can opt to mount a file as ZFS on \"${CNODE_HOME}\"

      "},{"location":"Appendix/postgres/#install-postgresql-server","title":"Install PostgreSQL Server","text":"

      Execute commands below to set up Postgres Server

      # Determine OS platform\nOS_ID=$( (grep -i ^ID_LIKE= /etc/os-release || grep -i ^ID= /etc/os-release) | cut -d= -f 2)\nDISTRO=$(grep -i ^NAME= /etc/os-release | cut -d= -f 2)\n\nif [ -z \"${OS_ID##*debian*}\" ]; then\n#Debian/Ubuntu\nwget --quiet -O - https://www.postgresql.org/media/keys/ACCC4CF8.asc | sudo apt-key add -\n  RELEASE=$(lsb_release -cs)\necho \"deb [arch=amd64] http://apt.postgresql.org/pub/repos/apt/ ${RELEASE}\"-pgdg main | sudo tee  /etc/apt/sources.list.d/pgdg.list\n  sudo apt-get update\n  sudo apt-get -y install postgresql-15 postgresql-server-dev-15 postgresql-contrib libghc-hdbc-postgresql-dev\n  sudo systemctl restart postgresql\n  sudo systemctl enable postgresql\nelse\necho \"We have no automated procedures for this ${DISTRO} system\"\nfi\n
      "},{"location":"Appendix/postgres/#create-user-in-postgres","title":"Create User in Postgres","text":"

      Login to Postgres instance as superuser:

      echo $(whoami)\n# <user>\nsudo su postgres\npsql\n

      Note the returned as the output of echo $(whoami) command. Replace all instance of in the documentation below. Execute the below in psql prompt. Replace and PasswordYouWant with your OS user (output of echo $(whoami) command executed above) and a password you'd like to authenticate to Postgres with:

      CREATE ROLE <user> SUPERUSER LOGIN;\nALTER USER <user> PASSWORD 'PasswordYouWant';\n\\q\n
      Type exit at shell to return to your user from postgres

      "},{"location":"Appendix/postgres/#verify-login-to-postgres-instance","title":"Verify Login to postgres instance","text":"
      export PGPASSFILE=$CNODE_HOME/priv/.pgpass\necho \"/var/run/postgresql:5432:cexplorer:*:*\" > $PGPASSFILE\nchmod 0600 $PGPASSFILE\npsql postgres\n# psql (15.0)\n# Type \"help\" for help.\n# \n# postgres=#\n
      "},{"location":"Appendix/postgres/#tuning-your-instance","title":"Tuning your instance","text":"

      Before you start populating your DB instance using dbsync data, now might be a good time to put some thought on to baseline configuration of your postgres instance by editing /etc/postgresql/15/main/postgresql.conf. Typically, you might find a lot of common standard practices parameters available in tuning guides. For our consideration, it would be nice to start with some baselines - for which we will use inputs from example here, which would need to be customised further to your environment and resources.

      In a typical Koios [gRest] setup, we use below for minimum viable specs (i.e. 64GB RAM, > 8 CPUs, >16K IOPs for ioping -q -S512M -L -c 10 -s8k . output when postgres data directory is on ZFS configured with max arc of 4GB), we find the below configuration to be the best common setup:

      Parameter Value Comment data_directory '/opt/cardano/cnode/guild-db/pgdb/15' Move postgres data directory to ZFS mount at /opt/cardano/cnode, ensure it's writable by postgres user effective_cache_size 8GB Be conservative as Node and DBSync by themselves will need ~32-40GB of RAM if ledger-state is enabled effective_io_concurrency 4 Can go higher if you have substantially higher IOPs/IO throughputs lc_time 'en_US.UTF-8' Just to use standard server-side time formatting between instances, can adapt to your preferences log_timezone 'UTC' For consistency, to avoid timezone confusions maintenance_work_mem 512MB Helps with vacuum/index/foreign key maintainance (with 4 workers, it's set to max 2GB) max_connections 200 Allow maximum of 200 connections, the koios connections are still controlled via postgrest db-pool max_parallel_maintenance_workers 4 Max workers postgres will use for maintainance max_parallel_workers 4 Max workers postgres will use across the system max_parallel_workers_per_gather 2 Parallel threads per query, do not increase to higher values as it will multiply memory usage max_wal_size 4GB Used for WAL automatic checkpoints (disabled later) max_worker_processes 4 Maximum number of background processes system can support min_wal_size 1GB Used for WAL automatic checkpoints (disabled later) random_page_cost 1.1 Use higher value if IOPs has trouble catching up (you can use 4 instead of 1.1) shared_buffers 4GB Conservative limit to allow for node/dbsync/zfs memory usage timezone 'UTC' For consistency, to avoid timezone confusions wal_buffers 16MB WAL consumption in shared buffer (disabled later) work_mem 16MB Base memory size before writing to temporary disk files

      In addition to above, due to the nature of usage by dbsync (synching from node and restart traversing back to last saved ledger-state snapshot), we leverage data retention on blockchain - as we're not affected by loss of volatile information upon a restart of instance. Thus, we can relax some of the data retention and protection against corruption related settings, as those are IOPs/CPU Load Average impacts that the instance does not need to spend. We'd recommend setting 3 of those below in your /etc/postgresql/15/main/postgresql.conf:

      Parameter Value wal_level minimal max_wal_senders 0 synchronous_commit off

      Once your changes are done, ensure to restart postgres service using sudo systemctl restart postgresql.

      "},{"location":"Build/dbsync/","title":"DBSync","text":"

      Important

      An average pool operator may not require cardano-db-sync at all. Please verify if it is required for your use as mentioned here.

      "},{"location":"Build/dbsync/#build-instructions","title":"Build Instructions","text":""},{"location":"Build/dbsync/#clone-the-repository","title":"Clone the repository","text":"

      Execute the below to clone the cardano-db-sync repository to $HOME/git folder on your system:

      cd ~/git\ngit clone https://github.com/intersectmbo/cardano-db-sync\ncd cardano-db-sync\n
      "},{"location":"Build/dbsync/#build-cardano-db-sync","title":"Build Cardano DB Sync","text":"

      You can use the instructions below to build the latest release of cardano-db-sync.

      git fetch --tags --all\ngit pull\n# Include the cardano-crypto-praos and libsodium components for db-sync\n# On CentOS 7 (GCC 4.8.5) we should also do\n# echo -e \"package cryptonite\\n  flags: -use_target_attributes\" >> cabal.project.local\n# Replace tag against checkout if you do not want to build the latest released version\ngit checkout $(curl -sLf https://api.github.com/repos/intersectmbo/cardano-db-sync/releases/latest | jq -r .tag_name)\n# Use `-l` argument if you'd like to use system libsodium instead of IOG fork of libsodium while compiling\n$CNODE_HOME/scripts/cabal-build-all.sh\n
      The above would copy the cardano-db-sync binary into ~/.local/bin folder.

      "},{"location":"Build/dbsync/#prepare-db-for-sync","title":"Prepare DB for sync","text":"

      Now that binaries are available, let's create our database (when going through breaking changes, you may need to use --recreatedb instead of --createdb used for the first time. Again, we expect that PGPASSFILE environment variable is already set (refer to the top of this guide for sample instructions):

      cd ~/git/cardano-db-sync\n# scripts/postgresql-setup.sh --dropdb #if exists already, will fail if it doesnt - thats OK\nscripts/postgresql-setup.sh --createdb\n# Password:\n# Password:\n# All good!\n

      Verify you can see \"All good!\" as above!

      "},{"location":"Build/dbsync/#create-symlink-to-schema-folder","title":"Create Symlink to schema folder","text":"

      DBSync instance requires the schema files from the git repository to be present and available to the dbsync instance. You can either clone the ~/git/cardano-db-sync/schema folder OR create a symlink to the folder and make it available to the startup command we will be using. We will use the latter in sample below:

      ln -s ~/git/cardano-db-sync/schema $CNODE_HOME/guild-db/schema\n
      "},{"location":"Build/dbsync/#restore-using-snapshot","title":"Restore using Snapshot","text":"

      If you're running a mainnet/preview/preprod instance of dbsync, you might want to consider use of dbsync snapshots as documented here. The snapshot files as of recent epoch are available via links in release notes.

      At high-level, this would involve steps as below (read and update paths as per your environment):

      # Replace the actual link below with the latest one from release notes\nwget https://update-cardano-mainnet.iohk.io/cardano-db-sync/13/db-sync-snapshot-schema-13-block-7622755-x86_64.tgz\nrm -rf ${CNODE_HOME}/guild-db/ledger-state ; mkdir -p ${CNODE_HOME}/guild-db/ledger-state\ncd -; cd ~/git/cardano-db-sync\nscripts/postgresql-setup.sh --restore-snapshot /tmp/dbsyncsnap.tgz ${CNODE_HOME}/guild-db/ledger-state\n# The restore may take a while, please be patient and do not interrupt the restore process. Once restore is successful, you may delete the downloaded snapshot as below:\n#   rm -f /tmp/dbsyncsnap.tgz\n
      "},{"location":"Build/dbsync/#test-running-dbsync-manually-at-terminal","title":"Test running dbsync manually at terminal","text":"

      In order to verify that you can run dbsync, before making a start - you'd want to ensure that you can run it interactively once. To do so, try the commands below:

      cd $CNODE_HOME/scripts\nexport PGPASSFILE=$CNODE_HOME/priv/.pgpass\n./dbsync.sh\n

      You can monitor logs if needed via parallel session using tail -10f $CNODE_HOME/logs/dbsync.json. If there are no error, you would want to press Ctrl-C to stop the dbsync.sh execution and deploy it as a systemd service. To do so, use the commands below (the creation of file is done using sudo permissions, but you can always deploy it manually):

      cd $CNODE_HOME/scripts\n./dbsync.sh -d\n# Deploying cnode-dbsync.service as systemd service..\n# cnode-dbsync.service deployed successfully!!\n

      Now to start dbsync instance, you can run sudo systemctl start cnode-dbsync

      Note

      Note that dbsync while syncs, it might defer creation of indexes/constraints to speed up initial catch up. Once relatively closer to tip, this will initiate creation of indexes - which can take a while in background. Thus, you might notice the query timings right after reaching to tip might not be as good.

      "},{"location":"Build/dbsync/#update-dbsync","title":"Update DBSync","text":"

      Updating dbsync can have different tasks depending on the versions involved. We attempt to briefly explain the tasks involved:

      "},{"location":"Build/dbsync/#validation","title":"Validation","text":"

      To validate, connect to your postgres instance and execute commands as per below:

      export PGPASSFILE=$CNODE_HOME/priv/.pgpass\npsql cexplorer\n

      You should be at the psql prompt, you can check the tables and verify they're populated:

      \\dt\nselect * from meta;\n

      A sample output of the above two commands may look like below (the number of tables and names may vary between versions):

      cexplorer=# \\dt\nList of relations\n Schema |           Name            | Type  | Owner\n--------+---------------------------+-------+-------\n public | ada_pots                  | table | centos\n public | admin_user                | table | centos\n public | block                     | table | centos\n public | delegation                | table | centos\n public | delisted_pool             | table | centos\n public | epoch                     | table | centos\n public | epoch_param               | table | centos\n public | epoch_stake               | table | centos\n public | ma_tx_mint                | table | centos\n public | ma_tx_out                 | table | centos\n public | meta                      | table | centos\n public | orphaned_reward           | table | centos\n public | param_proposal            | table | centos\n public | pool_hash                 | table | centos\n public | pool_meta_data            | table | centos\n public | pool_metadata             | table | centos\n public | pool_metadata_fetch_error | table | centos\n public | pool_metadata_ref         | table | centos\n public | pool_owner                | table | centos\n public | pool_relay                | table | centos\n public | pool_retire               | table | centos\n public | pool_update               | table | centos\n public | pot_transfer              | table | centos\n public | reserve                   | table | centos\n public | reserved_ticker           | table | centos\n public | reward                    | table | centos\n public | schema_version            | table | centos\n public | slot_leader               | table | centos\n public | stake_address             | table | centos\n public | stake_deregistration      | table | centos\n public | stake_registration        | table | centos\n public | treasury                  | table | centos\n public | tx                        | table | centos\n public | tx_in                     | table | centos\n public | tx_metadata               | table | centos\n public | tx_out                    | table | centos\n public | withdrawal                | table | centos\n(37 rows)\n\n\n\nselect * from meta;\n id |     start_time      | network_name\n----+---------------------+--------------\n  1 | 2017-09-23 21:44:51 | mainnet\n(1 row)\n
      "},{"location":"Build/grest-changelog/","title":"Koios gRest Changelog","text":""},{"location":"Build/grest-changelog/#110-for-all-networks","title":"[1.1.0] - For all networks.","text":"

      This will be first major [breaking] release for Koios consumers in a while, and will be rolled out under new base prefix (/api/v1). The major work with this release was to start making use of newer flags in dbsync which help performance of queries under new endpoints. Please ensure to check out the release notes for 1.1.0rc below. The list for this section is only a small addendum to 1.1.0rc:

      "},{"location":"Build/grest-changelog/#chores","title":"Chores:","text":""},{"location":"Build/grest-changelog/#110rc-for-all-networks","title":"[1.1.0rc] - For all networks.","text":"

      This will be first major [breaking] release for Koios consumers in a while, and will be rolled out under new base prefix (/api/v1). The major work with this release was to start making use of newer flags in dbsync which help performance of queries under new endpoints. Also, you'd see quite a few new endpoint additions below, that'd be helping out with slightly lighter version of queries. To keep migration paths easier, we will ensure both v0 and v1 versions of the release is up for a month post release, before retiring v0.

      "},{"location":"Build/grest-changelog/#new-endpoints-added","title":"New endpoints added:","text":""},{"location":"Build/grest-changelog/#data-inputoutput-changes","title":"Data Input/Output Changes:","text":""},{"location":"Build/grest-changelog/#deprecations","title":"Deprecations:","text":""},{"location":"Build/grest-changelog/#chores_1","title":"Chores:","text":""},{"location":"Build/grest-changelog/#1010-for-all-networks","title":"[1.0.10] - For all networks.","text":"

      The release is effectively same as 1.0.10rc except with one minor modification below.

      "},{"location":"Build/grest-changelog/#chores_2","title":"Chores:","text":""},{"location":"Build/grest-changelog/#1010rc-for-non-mainnet-networks","title":"[1.0.10rc] - For non-mainnet networks","text":"

      This release primarily focuses on ability to support better DeFi projects alongwith some value addition for existing clients by bringing in 10 new endpoints (paired with 2 deprecations), and few additional optional input parameters , and some additional output columns to existing endpoints. The only breaking change/fix is for output returned for tx_info.

      Also, dbsync 13.1.x.x has been released and is recommended to be used for this release

      "},{"location":"Build/grest-changelog/#new-endpoints-added_1","title":"New endpoints added","text":""},{"location":"Build/grest-changelog/#data-inputoutput-changes_1","title":"Data Input/Output Changes","text":""},{"location":"Build/grest-changelog/#deprecations_1","title":"Deprecations:","text":""},{"location":"Build/grest-changelog/#chores_3","title":"Chores:","text":""},{"location":"Build/grest-changelog/#109-for-all-networks","title":"[1.0.9] - For all networks","text":"

      This release is effectively same as 1.0.9rc below (please check out the notes accordingly), just with minor bug fix on setup-grest.sh itself.

      "},{"location":"Build/grest-changelog/#109rc-for-non-mainnet-networks","title":"[1.0.9rc] - For non-mainnet networks","text":"

      This release candidate is non-breaking for existing methods and inputs, but breaking for output objects for endpoints. The aim with release candidate version is to allow folks couple of weeks to test, adapt their libraries before applying to mainnet.

      "},{"location":"Build/grest-changelog/#new-endpoints-added_2","title":"New endpoints added","text":""},{"location":"Build/grest-changelog/#data-inputoutput-changes_2","title":"Data Input/Output changes","text":""},{"location":"Build/grest-changelog/#changes-for-instance-providers","title":"Changes for Instance Providers","text":""},{"location":"Build/grest-changelog/#108-for-all-networks","title":"[1.0.8] - For all networks","text":"

      This release is contains minor bug-fixes that were discovered in koios-1.0.7. No major changes to output for this one.

      "},{"location":"Build/grest-changelog/#changes-for-api","title":"Changes for API","text":""},{"location":"Build/grest-changelog/#new-endpoints-added_3","title":"New endpoints added","text":""},{"location":"Build/grest-changelog/#data-inputoutput-changes_3","title":"Data Input/Output changes","text":""},{"location":"Build/grest-changelog/#changes-for-instance-providers_1","title":"Changes for Instance Providers","text":""},{"location":"Build/grest-changelog/#107-for-all-networks","title":"[1.0.7] - For all networks","text":"

      This release continues updates from koios-1.0.6 to further utilise stake-snapshot cache tables which would be useful for SPOs as well as reduce downtime post epoch transition. One largely requested feature to accept bulk inputs for many block/address/account endpoints is now complete. Additionally, koios instance providers are now recommended to use cardano-node 1.35.3 with dbsync 13.0.5.

      "},{"location":"Build/grest-changelog/#changes-for-api_1","title":"Changes for API","text":""},{"location":"Build/grest-changelog/#new-endpoints-added_4","title":"New endpoints added","text":""},{"location":"Build/grest-changelog/#data-inputoutput-changes_4","title":"Data Input/Output changes","text":""},{"location":"Build/grest-changelog/#changes-for-instance-providers_2","title":"Changes for Instance Providers","text":""},{"location":"Build/grest-changelog/#106106m-interim-release-for-all-networks-to-upgrade-to-dbsync-v13","title":"[1.0.6/1.0.6m] - Interim release for all networks to upgrade to dbsync v13","text":"

      The backlog of items not being added to mainnet has been increasing due to delays with Vasil HFC event to Mainnet. As such we had to come up with a split update approach. The mainnet nodes are still not qualified to be Vasil-ready (in our opinion) for 1.35.x , but dbsync 13 can be used against node 1.34.1 fine. In order to cater for this split, we have added an intermediate koios-1.0.6m tag that brings dbsync updates while maintaining node-1.34.1.

      "},{"location":"Build/grest-changelog/#changes-for-api_2","title":"Changes for API","text":""},{"location":"Build/grest-changelog/#data-output-changes","title":"Data Output Changes","text":""},{"location":"Build/grest-changelog/#changes-for-instance-providers_3","title":"Changes for Instance Providers","text":""},{"location":"Build/grest-changelog/#105-alpha-networks-only","title":"[1.0.5] - alpha networks only","text":"

      Since there have been a few deviations wrt Vasil for testnet and mainnet, this version only targets networks except Mainnet!

      "},{"location":"Build/grest-changelog/#changes-for-api_3","title":"Changes for API","text":""},{"location":"Build/grest-changelog/#data-output-changes_1","title":"Data Output Changes","text":""},{"location":"Build/grest-changelog/#changes-for-instance-providers_4","title":"Changes for Instance Providers","text":""},{"location":"Build/grest-changelog/#101","title":"[1.0.1]","text":""},{"location":"Build/grest-changelog/#100","title":"[1.0.0]","text":""},{"location":"Build/grest-changelog/#100-rc1","title":"[1.0.0-rc1]","text":""},{"location":"Build/grest-changelog/#changes-for-api_4","title":"Changes for API","text":""},{"location":"Build/grest-changelog/#data-output-changes_2","title":"Data Output Changes","text":""},{"location":"Build/grest-changelog/#input-parameter-changes","title":"Input Parameter Changes","text":""},{"location":"Build/grest-changelog/#changes-for-instance-providers_5","title":"Changes for Instance Providers","text":""},{"location":"Build/grest-changelog/#added","title":"Added","text":""},{"location":"Build/grest-changelog/#fixed","title":"Fixed","text":""},{"location":"Build/grest-changelog/#100-rc0-2022-04-29","title":"[1.0.0-rc0] - 2022-04-29","text":""},{"location":"Build/grest/","title":"Koios gRest","text":"

      Important

      "},{"location":"Build/grest/#what-is-grest","title":"What is gRest","text":"

      gRest is an open source implementation of a query layer built over dbsync using PostgREST and HAProxy. The package is built as part of Koios team's efforts to unite community individual stream of work together and give back a more aligned structure to query dbsync and adopt standardisation to queries utilising open-source tooling as well as collaboration. In addition to these, there are also accessibility features to deploy rules for failover, do healthchecks, set up priorities, have ability to prevent DDoS attacks, provide timeouts, report tips for analysis over a longer period, etc - which can prove to be really useful when performing any analysis for instances.

      Note

      Note that the scripts below do allow for provisioning ogmios integration too, but Ogmios - currently - is not designed to provide advanced session management for a server-client architecture in absence of a middleware. Thus, the availability for ogmios from monitoring instance is restricted to avoid ability to DDoS an instance.

      "},{"location":"Build/grest/#components","title":"Components","text":"
      1. PostgREST: An RPC JSON interface for any PostgreSQL database (in our case, database served via cardano-db-sync) to provide a RESTful Web Service. The endpoints of PostgREST in itself are essentially the table/functions defined in elected schema via grest config file. You can read more about advanced query syntax using PostgREST API here, but we will provide a simpler view using examples towards the end of the page. It is an easy alternative - with almost no overhead as it directly serves the underlying database as an API, as compared to Cardano GraphQL component (which may often have lags). Some of the other advantages of PostgREST over graphql based projects are also performance, being stateless, 0 overhead, support for JWT / native Postgres DB authentication against the Rest Interface as well.

      2. HAProxy: An easy gateway proxy that automatically provides failover/basic DDoS protection, specify rules management for load balancing, setup multiple frontend/backends, provide easy means to have TLS enabled for public facing instances, etc. You may alter the settings for proxy layer as per your SecOps preferences. This component is optional (eg: if you prefer to expose your PostgREST server itself, you can do so using similar steps below).

      "},{"location":"Build/grest/#setup","title":"Setup gRest services","text":"

      To start with you'd want to ensure your current shell session has access to Postgres credentials, continuing from examples from the above mentioned Sample Postgres deployment guide.

      cd $CNODE_HOME/priv\nPGPASSFILE=$CNODE_HOME/priv/.pgpass\npsql cexplorer\n

      Ensure that you can connect to your Postgres DB fine using above (quit from psql once validated using \\q). As part of guild-deploy.sh execution, you'd find setup-grest.sh file made available in ${CNODE_HOME}/scripts folder, which will help you automate installation of PostgREST, HAProxy as well as brings in latest queries/functions provided via Koios to your instances.

      Warning

      As of now, gRest services are in alpha stage - while can be utilised, please remember there may be breaking changes and every collaborator is expected to work with the team to keep their instances up-to-date using alpha branch.

      Familiarise with the usage options for the setup script , the syntax can be viewed as below:

      cd \"${CNODE_HOME}\"/scripts\n./setup-grest.sh -h\n#\n# Usage: setup-grest.sh [-f] [-i [p][r][m][c][d]] [-u] [-b <branch>]\n# \n# Install and setup haproxy, PostgREST, polling services and create systemd services for haproxy, postgREST and dbsync\n# \n# -f    Force overwrite of all files including normally saved user config sections\n# -i    Set-up Components individually. If this option is not specified, components will only be installed if found missing (eg: -i prcd)\n#     p    Install/Update PostgREST binaries by downloading latest release from github.\n#     r    (Re-)Install Reverse Proxy Monitoring Layer (haproxy) binaries and config\n#     m    Install/Update Monitoring agent scripts\n#     c    Overwrite haproxy, postgREST configs\n#     d    Overwrite systemd definitions\n# -u    Skip update check for setup script itself\n# -q    Run all DB Queries to update on postgres (includes creating grest schema, and re-creating views/genesis table/functions/triggers and setting up cron jobs)\n# -b    Use alternate branch of scripts to download - only recommended for testing/development (Default: master)\n#\n

      To run the setup overwriting all standard deployment tasks from a branch (eg: koios-1.0.9 branch), you may want to use:

      ./setup-grest.sh -f -i prmcd -r -q -b koios-1.0.9\n

      Similarly - if you'd like to re-install all components and force overwrite all configs but not reset cache tables, you may run:

      ./setup-grest.sh -f -i prmcd -q\n

      Another example could be to preserve your config, but only update queries using an alternate branch (eg: let's say you want to try the branch alpha prior to a tagged release). To do so, you may run:

      ./setup-grest.sh -q -b alpha\n

      Please ensure to follow the on-screen instructions, if any (for example restarting deployed services, or updating configs to specify correct target postgres URLs/enable TLS/add peers etc in ${CNODE_HOME}/priv/grest.conf and ${CNODE_HOME}/files/haproxy.cfg).

      The default ports used will make haproxy instance available at port 8053 or 8453 if TLS is enabled (you might want to enable firewall rule to open this port to services you would like to access). If you want to prevent unauthenticated access to grest schema, uncomment the jwt-secret and specify a custom secret-token.

      Reminder

      Once you've successfully deployed the grest instance, it will deploy certain cron jobs that will ensure the relevant cache tables are updated periodically. Until these have finished (especially on first run, it could take an hour or so on mainnet, your instance will likely not pass any tests from grest-poll.sh but that's expected.

      "},{"location":"Build/grest/#tls","title":"Enable TLS on HAProxy","text":"

      In order to enable SSL on your haproxy, all you need to do is edit the file ${CNODE_HOME}/files/haproxy.cfg and update the frontend app section to uncomment ssl bind (and comment normal bind).

      Info

      If you're not familiar with how to configure TLS OR would not like to buy one, you can find tips on how to create a TLS certificate for free via LetsEncrypt using tutorials here. Once you do have a TLS Certificate generated, you need to chain the private key and full chain cert together in a file - /etc/ssl/server.pem - which can be then referenced as below:

      frontend app\n  #bind 0.0.0.0:8053\n  ## If using SSL, comment line above and uncomment line below\n  bind :8453 ssl crt /etc/ssl/server.pem no-sslv3\n  http-request set-log-level silent\n  acl srv_down nbsrv(grest_postgrest) eq 0\n  acl is_wss hdr(Upgrade) -i websocket\n  ...\n
      Restart haproxy service for changes to take effect.

      "},{"location":"Build/grest/#validation","title":"Validation","text":"

      With the setup, you also have a checkstatus.sh script, which will query the Postgres DB instance via haproxy (coming through postgREST), and only show an instance up if the latest block in your DB instance is within 180 seconds.

      Important

      If you'd like to participate in joining to the elastic cluster via Koios, please raise a PR request by editing topology files in this folder to do so!!

      If you were using guild network, you could do a couple of very basic sanity checks as per below:

      1. To query active stake for pool pool1z2ry6kxywgvdxv26g06mdywynvs7jj3uemnxv273mr5esukljsr in epoch 122, we can execute the below:

        curl -d _pool_bech32=pool1z2ry6kxywgvdxv26g06mdywynvs7jj3uemnxv273mr5esukljsr -d _epoch_no=122 -s http://localhost:8053/rpc/pool_active_stake\n## {\"active_stake_sum\" : 19409732875}\n

      2. To check latest owner key(s) for a given pool pool1z2ry6kxywgvdxv26g06mdywynvs7jj3uemnxv273mr5esukljsr, you can execute the below:

        curl -d _pool_bech32=pool1z2ry6kxywgvdxv26g06mdywynvs7jj3uemnxv273mr5esukljsr -s http://localhost:8050/rpc/pool_owners\n## [{\"owner\" : \"stake_test1upx5p04dn3t6dvhfh27744su35vvasgaaq565jdxwlxfq5sdjwksw\"}, {\"owner\" : \"stake_test1uqak99cgtrtpean8wqwp7d9taaqkt9gkkxga05m5azcg27chnzfry\"}]\n

      You may want to explore what all endpoints come out of the box, and test them out, to do so - refer to API documentation for OpenAPI3 documentation. Each endpoint has a pre-filled example for mainnet and connects by default to primary Koios endpoint, allowing you to test endpoints and if needed - grab the curl commands to start testing yourself against your local or remote instances.

      "},{"location":"Build/grest/#participating-in-koios-cluster-as-instance-provider","title":"Participating in Koios Cluster as instance Provider","text":"

      If you're interested to participate in decentralised infrastructure by providing an instance, there are a few additional steps you'd need:

      1. Enable ports for your HAProxy instance (default: 8053), gRest Exporter service (default: 8059) and (optionally) submit API instance (default: 8090) against the monitoring instance (do not need to open these ports to internet) of corresponding network.

      2. Ensure that each of the service above is listening on your public IP address (for instance, submitapi.sh might need to be edited to change HOSTADDR to 0.0.0.0 and restarted).

      3. Create a PR specifying connectivity information to your HAProxy port here.

      4. Make sure to join the telegram discussions group to participate in any discussions, actions, polls for new-features, etc. Feel free to give a shout in the group in case you have trouble following any of the above

      "},{"location":"Build/node-cli/","title":"Node & CLI","text":"

      Reminder !!

      Ensure the Pre-Requisites are in place before you proceed.

      "},{"location":"Build/node-cli/#build-instructions","title":"Build Instructions","text":""},{"location":"Build/node-cli/#clone-the-repository","title":"Clone the repository","text":"

      Execute the below to clone the cardano-node repository to $HOME/git folder on your system:

      cd ~/git\ngit clone https://github.com/intersectmbo/cardano-node\ncd cardano-node\n
      "},{"location":"Build/node-cli/#build-cardano-node","title":"Build Cardano Node","text":"

      You can use the instructions below to build the latest release of cardano-node.

      git fetch --tags --recurse-submodules --all\ngit pull\n# Replace tag against checkout if you do not want to build the latest released version, we recommend using battle tested node versions - which may not always be latest\ngit checkout $(curl -sLf https://api.github.com/repos/intersectmbo/cardano-node/releases/latest | jq -r .tag_name)\n\n# Use `-l` argument if you'd like to use system libsodium instead of IOG fork of libsodium while compiling\n$CNODE_HOME/scripts/cabal-build-all.sh\n

      The above would copy the binaries built into ~/.local/bin folder.

      "},{"location":"Build/node-cli/#download-pre-compiled-binary-from-node-release","title":"Download pre-compiled Binary from Node release","text":"

      While certain folks might want to build the node themselves (could be due to OS/arch compatibility, trust factor or customisations), for most it might not make sense to build the node locally. Instead, you can download the binaries using cardano-node release notes, where-in you can find the download links for every version. This is already taken care of by guild-deploy.sh if you used the option to download binaries (you can always re-run with specific arguments if unsure).

      "},{"location":"Build/node-cli/#verify","title":"Verify","text":"

      Execute cardano-cli and cardano-node to verify output as below (the exact version and git rev should depend on your checkout tag on github repository):

      cardano-cli version\n# cardano-cli 8.x.x - linux-x86_64 - ghc-8.10\n# git rev <...>\ncardano-node version\n# cardano-node 8.x.x - linux-x86_64 - ghc-8.10\n# git rev <...>\n
      "},{"location":"Build/node-cli/#update-port-number-or-pool-name-for-relative-paths","title":"Update port number or pool name for relative paths","text":"

      Before you go ahead with starting your node, you may want to update values for CNODE_PORT in $CNODE_HOME/scripts/env. Note that it is imperative for operational relays and pools to ensure that the port mentioned is opened via firewall to the destination your node is supposed to connect from. Update your network/firewall configuration accordingly. Future executions of guild-deploy.sh will preserve and not overwrite these values (or atleast back up if forced to overwrite).

      CNODEBIN=\"${HOME}/.local/bin/cardano-node\"\nCCLI=\"${HOME}/.local/bin/cardano-cli\"\nCNODE_PORT=6000\nPOOL_NAME=\"GUILD\"\n

      Important

      POOL_NAME is the name of folder that you will use when registering pools and starting node in core mode. This folder would typically contain your hot.skey,vrf.skey and op.cert files required. If the mentioned files are absent (expected if this is a fresh install), the node will automatically start in a relay mode.

      "},{"location":"Build/node-cli/#start-the-node","title":"Start the node","text":"

      To test starting the node in interactive mode, we will make use of pre-built script cnode.sh. This script automatically determines whether to start the node as a relay or block producer (if the required pool keys are present in the $CNODE_HOME/priv/pool/<POOL_NAME> as mentioned above). The script contains a user-defined variable CPU_CORES which determines the number of CPU cores the node will use upon start-up:

      ######################################\n# User Variables - Change as desired #\n# Common variables set in env file   #\n######################################\n\n#CPU_CORES=4            # Number of CPU cores cardano-node process has access to (please don't set higher than physical core count, 4 recommended)\n

      Now let's test starting the node in interactive mode.

      Note

      At this stage, upon executing cnode.sh, you are expected to see the live config and a line ending with Listening on http://127.0.0.1:12798 - this is expected, as your logs are being written to $CNODE_HOME/logs/node.json . If so, you should be alright to return to your console by pressing Ctrl-C. The node will be started later using instructions below using systemd (Linux's service management). In case you receive any errors, please troubleshoot and fix those before proceeding.

      cd \"${CNODE_HOME}\"/scripts\n./cnode.sh\n

      Press Ctrl-C to exit node and return to console.

      "},{"location":"Build/node-cli/#modify-the-nodes-config-files","title":"Modify the node's config files","text":"

      Now that you've tested the basic node operation, you might want to customise your config files (assuming you are in top-level folder , i.e. cd \"${CNODE_HOME}\") :

      1. files/config.json : This file contains the logging configurations (tracers of to tune logging, paths for other genesis config files, address/ports on which the prometheus/EKG monitoring will listen, etc). Unless running more than one node on same machine (not recommended), you should be alright to use most of this file as-is. You might - however - want to double-check PeerSharing in this file, if using a relay node where you'd like connecting peers (marked as \"advertise\": \"true\" in topology.json) to be shared , you may turn this setting to true.

      2. files/topology.json : This file tells your node how to connect to other nodes (especially initially to start synching). You would want to update this file as below:

        • Update the localRoots > accessPoints section to include your local nodes that you want persistent connection against (eg: this could be your BP and own relay nodes) against definition where trustable is set to true.
        • If you want specific peers to be advertised on the network for discovery, you may set advertise to true for that peer group. You do NOT want to do that on BP
        • You'd want to update localRoots > valency (valency is the same as hotValency, not yet replaced since the example in cardano-node-wiki repo still suggests valency) to number of connections from your localRoots that you always want to keep active connection to for that node.
        • [Optional] - you can add/remove nodes from publicRoots section as well as localRoots > accessPoints as desired, tho defaults populated should work fine. On mainnet, we did add a few additional nodes to help add more redundancy for initial sync.
        • useLedgerAfterSlot tells the node to establish networking with nodes from defined peers to sync the node initially until reaching an absolute slot number, after which - it can start attempting to connect to peers registered as pool relays on the network. You may want this number to be relatively recent (eg: not have it 50 epochs old).
        • You can read further about topology file configuration here

      Important

      On BP, You'd want to set useLedgerAfterSlot to -1 for your Block Producing (Core) node - thereby, telling your Core node to remain in non-P2P mode, and ensure PeerSharing is to false.

      The resultant topology file could look something like below:

      {\n\"bootstrapPeers\": [\n{\n\"address\": \"backbone.cardano.iog.io\",\n\"port\": 3001\n},\n{\n\"address\": \"backbone.mainnet.emurgornd.com\",\n\"port\": 3001\n}\n],\n\"localRoots\": [\n{\n\"accessPoints\": [\n{\"address\": \"xx.xx.xx.xx\", \"port\": 6000 },\n{\"address\": \"xx.xx.xx.yy\", \"port\": 6000 }\n],\n\"advertise\": false,\n\"trustable\": true,\n\"valency\": 2\n},\n{\n\"accessPoints\": [\n{\"address\": \"node-dus.poolunder.com\",           \"port\": 6900, \"pool\": \"UNDR\",   \"location\": \"EU/DE/Dusseldorf\" },\n{\"address\": \"node-syd.poolunder.com\",           \"port\": 6900, \"pool\": \"UNDR\",   \"location\": \"OC/AU/Sydney\" },\n{\"address\": \"194.36.145.157\",                   \"port\": 6000, \"pool\": \"RDLRT\",  \"location\": \"EU/DE/Baden\" },\n{\"address\": \"152.53.18.60\",                     \"port\": 6000, \"pool\": \"RDLRT\",  \"location\": \"NA/US/StLouis\" },\n{\"address\": \"148.72.153.168\",                   \"port\": 16000, \"pool\": \"AAA\",   \"location\": \"US/StLouis\" },\n{\"address\": \"78.47.99.41\",                      \"port\": 6000, \"pool\": \"AAA\",    \"location\": \"EU/DE/Nuremberg\" },\n{\"address\": \"relay1-pub.ahlnet.nu\",             \"port\": 2111, \"pool\": \"AHL\",    \"location\": \"EU/SE/Malmo\" },\n{\"address\": \"relay2-pub.ahlnet.nu\",             \"port\": 2111, \"pool\": \"AHL\",    \"location\": \"EU/SE/Malmo\" },\n{\"address\": \"relay1.clio.one\",                  \"port\": 6010, \"pool\": \"CLIO\",   \"location\": \"EU/IT/Milan\" },\n{\"address\": \"relay2.clio.one\",                  \"port\": 6010, \"pool\": \"CLIO\",   \"location\": \"EU/IT/Bozlano\" },\n{\"address\": \"relay3.clio.one\",                  \"port\": 6010, \"pool\": \"CLIO\",   \"location\": \"EU/IT/Bozlano\" }\n],\n\"advertise\": false,\n\"trustable\": false,\n\"valency\": 5,\n\"warmValency\": 10\n}\n],\n\"publicRoots\": [\n{\n\"accessPoints\": [],\n\"advertise\": false\n}\n],\n\"useLedgerAfterSlot\": 119160667\n}\n

      Once above two files are updated, since you modified the file manually - there is always a chance of human errors (eg: missing comma/quotes). Thus, we would recommend you to start the node interactively once again before proceeding.

      cd \"${CNODE_HOME}\"/scripts\n./cnode.sh\n

      As before, ensure you do not have any errors in the console. To stop the node, hit Ctrl-C - we will start the node as systemd later in the document.

      "},{"location":"Build/node-cli/#start-the-submit-api","title":"Start the submit-api","text":"

      Note

      An average pool operator may not require cardano-submit-api at all. Please verify if it is required for your use as mentioned here. If - however - you do run submit-api for accepting sizeable transaction load, you would want to override the default MEMPOOL_BYTES by uncommenting it in cnode.sh.

      cardano-submit-api is one of the binaries built as part of cardano-node repository and allows you to submit transactions over a Web API. To run this service interactively, you can use the pre-built script below (submitapi.sh). Make sure to update submitapi.sh script to change listen IP or Port that you'd want to make this service available on.

      cd $CNODE_HOME/scripts\n./submitapi.sh\n

      To stop the process, hit Ctrl-C

      "},{"location":"Build/node-cli/#systemd","title":"Run as systemd service","text":"

      The preferred way to run the node (and submit-api) is through a service manager like systemd. This section explains how to setup a systemd service file.

      1. Deploy as a systemd service Execute the below command to deploy your node as a systemd service (from the respective scripts folder):

      cd $CNODE_HOME/scripts\n./cnode.sh -d\n# Deploying cnode.service as systemd service..\n# cnode.service deployed successfully!!\n\n./submitapi.sh -d\n# Deploying cnode-submit-api.service as systemd service..\n# cnode-submit-api deployed successfully!!\n

      2. Start the service Run below commands to enable automatic start of service on startup and start it.

      sudo systemctl start cnode.service\nsudo systemctl start cnode-submit-api.service\n

      3. Check status and stop/start commands Replace status with stop/start/restart depending on what action to take.

      sudo systemctl status cnode.service\nsudo systemctl status cnode-submit-api.service\n

      Important

      In case you see the node exit unsuccessfully upon checking status, please verify you've followed the transition process correctly as documented below, and that you do not have another instance of node already running. It would help to check your system logs (/var/log/syslog for debian-based and /var/log/messages for Red Hat/CentOS/Fedora systems, you can also check journalctl -f -u <service> to examine startup attempt for services) for any errors while starting node.

      You can use gLiveView to monitor your node that was started as a systemd service.

      cd $CNODE_HOME/scripts\n./gLiveView.sh\n
      "},{"location":"Build/offchain-metadata-tools/","title":"Offchain Metadata Tools","text":"

      Important

      In the Cardano multi-asset era, this project helps you create and submit metadata describing your assets, storing them off-chain.

      "},{"location":"Build/offchain-metadata-tools/#download-pre-built-binaries","title":"Download pre-built binaries","text":"

      Go to input-output-hk/offchain-metadata-tools to download the binaries and place in a directory specified by PATH, e.g. $HOME/.local/bin/.

      "},{"location":"Build/offchain-metadata-tools/#build-instructions","title":"Build Instructions","text":"

      An alternative to pre-built binaries - instructions describe how to build the token-metadata-creator tool but the offchain-metadata-tools repository contains other tools as well. Build the ones needed for your installation.

      "},{"location":"Build/offchain-metadata-tools/#clone-the-repository","title":"Clone the repository","text":"

      Execute the below to clone the offchain-metadata-tools repository to $HOME/git folder on your system:

      cd ~/git\ngit clone https://github.com/input-output-hk/offchain-metadata-tools.git\ncd offchain-metadata-tools/token-metadata-creator\n
      "},{"location":"Build/offchain-metadata-tools/#build-token-metadata-creator","title":"Build token-metadata-creator","text":"

      You can use the instructions below to build token-metadata-creator, same steps can be executed in future to update the binaries (replacing appropriate tag) as well.

      git fetch --tags --all\ngit pull\n# Replace master with appropriate tag if you'd like to avoid compiling against master\ngit checkout master\n$CNODE_HOME/scripts/cabal-build-all.sh\n
      The above would copy the binaries into ~/.local/bin folder.

      "},{"location":"Build/offchain-metadata-tools/#verify","title":"Verify","text":"

      Verify that the tool is executable from anywhere by running:

      token-metadata-creator -h\n
      "},{"location":"Build/wallet/","title":"Wallet","text":"

      !> - An average pool operator may not require cardano-wallet at all. Please verify if it is required for your use as mentioned here.

      Ensure the Pre-Requisites are in place before you proceed.

      "},{"location":"Build/wallet/#build-instructions","title":"Build Instructions","text":"

      Follow instructions below for building the cardano-wallet binary:

      "},{"location":"Build/wallet/#clone-the-repository","title":"Clone the repository","text":"

      Execute the below to clone the cardano-wallet repository to $HOME/git folder on your system:

      cd ~/git\ngit clone https://github.com/cardano-foundation/cardano-wallet\ncd cardano-wallet\n
      "},{"location":"Build/wallet/#build-cardano-wallet","title":"Build Cardano Wallet","text":"

      You can use the instructions below to build the latest release of cardano-wallet.

      !> - Note that the latest release of cardano-wallet may not work with the latest release of cardano-node. Please check the compatibility of each cardano-wallet release yourself in the official docs, e.g. https://github.com/cardano-foundation/cardano-wallet/releases/latest.

      git fetch --tags --all\ngit pull\n# Replace tag against checkout if you do not want to build the latest released version\ngit checkout $(curl -s https://api.github.com/repos/cardano-foundation/cardano-wallet/releases/latest | jq -r .tag_name)\n$CNODE_HOME/scripts/cabal-build-all.sh\n

      The above would copy the binaries into ~/.local/bin folder.

      "},{"location":"Build/wallet/#start-the-wallet","title":"Start the wallet","text":"

      You can run the below to connect to a cardano-node instance that is expected to be already running and the wallet will start syncing.

      cardano-wallet serve /\n    --node-socket $CNODE_HOME/sockets/node.socket /\n    --mainnet / # if using the testnet flag you also need to specify the testnet shelley-genesis.json file\n--database $CNODE_HOME/priv/wallet\n

      "},{"location":"Build/wallet/#verify-the-wallet-is-handling-requests","title":"Verify the wallet is handling requests","text":"

      cardano-wallet network information\n
      Expected output should be similar to the following
      Ok.\n{\n\"network_tip\": {\n\"time\": \"2021-06-01T17:31:05Z\",\n\"epoch_number\": 269,\n\"absolute_slot_number\": 31002374,\n\"slot_number\": 157574\n},\n\"node_era\": \"mary\",\n\"node_tip\": {\n\"height\": {\n\"quantity\": 5795127,\n\"unit\": \"block\"\n},\n\"time\": \"2021-06-01T17:31:00Z\",\n\"epoch_number\": 269,\n\"absolute_slot_number\": 31002369,\n\"slot_number\": 157569\n},\n\"sync_progress\": {\n\"status\": \"ready\"\n},\n\"next_epoch\": {\n\"epoch_start_time\": \"2021-06-04T21:44:51Z\",\n\"epoch_number\": 270\n}\n}\n

      "},{"location":"Build/wallet/#creatingrestoring-wallet","title":"Creating/Restoring Wallet","text":"

      If you're creating a new wallet, you'd first want to generate a mnemonic for use (see below):

      cardano-wallet recovery-phrase generate\n# false brother typical saddle settle phrase foster sauce ask sunset firm gate service render burger\n
      You can use the above mnemonic to then restore a wallet as per below:
      cardano-wallet wallet create from-recovery-phrase MyWalletName\n

      "},{"location":"Build/wallet/#expected-output","title":"Expected output:","text":"
      Please enter a 15\u201324 word recovery phrase: false brother typical saddle settle phrase foster sauce ask sunset firm gate service render burger\n(Enter a blank line if you do not wish to use a second factor.)\nPlease enter a 9\u201312 word second factor:\nPlease enter a passphrase: **********\nEnter the passphrase a second time: **********\nOk.\n{\n    ...\n}\n
      "},{"location":"Mithril/mithril-overview/","title":"Mithril Overview","text":"

      Mithril Networks provide the ability to download and bootstrap cardano nodes via snapshots of the the Cardano blockchain. This is a great way to speed up the process of syncing a new node, especially for stake pool operators. The tools provided by Guild Operators are designed to facilitate the ease of use in setting up and managing the:

      The env file contains a new environment variable MITHRIL_DOWNLOAD that when enabled allows the cnode.sh script to automatically download the latest Mithril snapshot if the local db directory is empty. This is useful for new nodes that need to be bootstrapped with the latest snapshot to avoid synchronizing the entire blockchain from scratch. While also providing a high level of trust that the snapshot is valid since it is signed by multiple pool operators.

      "},{"location":"Mithril/mithril-overview/#architecture","title":"Architecture","text":"

      The architecture for Mithril Networks is described in detail at Mithril network architecture by CF/IOHK. However the architecture suggested and supported by the Guild Operators tools is not identical to the upstream documentation in that we provide a more simplified approach to the setup and management of the Mithril Network components and tools that allow setting up a Squid Mithril relays and an Nginx loadbalancer (aka sidecar) local to the Mithril signer. The Nginx sidecar provides the ability to loadbalance requests to multiple Squid based Mithril Relays running on each of the SPO's Cardano Relay nodes.

      "},{"location":"Mithril/mithril-overview/#single-relay-architecture","title":"Single Relay Architecture","text":"

      For SPO's who only have a single Cardano relay node, an Squid based Mithril relay can be run on the same node as the Cardano relay. This can be used by the Mithril signer to submit the snapshot signatures to the Mithril Aggregator.

      "},{"location":"Mithril/mithril-overview/#multi-relay-architecture","title":"Multi Relay Architecture","text":"

      For SPO's who have multiple Cardano relay nodes, a Nginx relay sidecar can be run on the Block Producer and load balance requests over mutliple Cardano relay nodes, each running its own Nginx Mithril relay to pass the signature along to the Mithril aggregator. This can be used to avoid a single point of failure in case a Relay server is offline for any reason. This provides high availability for the Mithril signer through multiple relays as long as the local Nginx Mithril relay is running on the same server as the Cardano Block Producer node.

      "},{"location":"Mithril/mithril-overview/#installation","title":"Installation","text":"

      The installation of the Mithril tools is automated via guild-deploy.sh. To participate in a Mithril network include the -s m flag which will install the Mithril Client and Mithril Signer release binaries to \"${HOME}\"/.local/bin.

      guild-deploy.sh -s m\n
      "},{"location":"Mithril/mithril-overview/#bootstrapping-a-node-using-mithril-client","title":"Bootstrapping a node using Mithril Client","text":"

      The Mithril client is used to download a snapshot of the Cardano blockchain from a Mithril Aggregator. The snapshot is then used to bootstrap a new Cardano node. The Mithril client can be used to download the latest snapshot, list all available snapshots, or show details of a specific snapshot.

      To bootstrap a Cardano node using the Mithril client, follow these steps:

      1. Setup the Cardano Node: Use the guild tools to setup the Cardano node, either by building the binaries or using pre-compiled binaries. Follow the instructions in the guild-operators documentation.

      2. Create the Mithril environment file: Run the script with the environment setup command. This will create a new mithril.env file with all the necessary environment variables for the Mithril client.

      ./mithril-client.sh environment setup\n
      1. Download the latest Mithril snapshot: Once the environment file is set up, you can download the latest Mithril snapshot by running the script with the snapshot download command. This snapshot contains the latest state of the Cardano blockchain db from a Mithril Aggregator.
      ./mithril-client.sh snapshot download\n
      "},{"location":"Mithril/mithril-overview/#participating-in-mithril-network","title":"Participating in Mithril Network","text":"

      The Mithril signer is used to participate in the creation of stake based signatures of snapshots. The Mithril signer can be used to sign a snapshots. The signed snapshot is then submitted to a Mithril Aggregator, via a Squid based Mithril Relay.

      The first step to participate in the Mithril network is to deploy your Squid based Mithril Relays. The Mithril relay is used to provide a private and highly available network for submitting the snapshots to a Mithril Aggregator.

      "},{"location":"Mithril/mithril-overview/#deploying-the-squid-mithril-relay","title":"Deploying the Squid Mithril Relay","text":"

      To deploy your Squid based Mithril Relays with your Cardano relay node, follow these steps:

      1. Deploy the Squid Mithril Relay: Run the mithril-relay.sh script:

      2. Use the -d flag to deploy the Squid Mithril Relay.

      3. Provide the IP address of your Block Producer when prompted to secure the Mithril Relay to only accept traffic from your Block Producer.
      4. Optionally provide the relays listening port when prompted to use a port other than the default 3132, or just press enter to use the default.
      5. Create the appropriate firewall rule to allow traffic from your Block Producer to the Mithril Relay.
      ./mithril-relay.sh -d\n\nInstalling squid proxy\nEnter the IP address of your Block Producer: 1.2.3.4\nEnter the relays listening port (press Enter to use default 3132):\nUsing port 3132 for relays listening port.\nCreate the appropriate firewall rule: sudo ufw allow from 1.2.3.4 to any port 3132 proto tcp\n
      1. Enable the Systemd Squid Mithril Relay service to start on boot.
         sudo systemctl enable --now squid\n
      1. Repeat the process for each of your Cardano relay nodes.
      "},{"location":"Mithril/mithril-overview/#deploying-the-mithril-signer","title":"Deploying the Mithril Signer","text":""},{"location":"Mithril/mithril-overview/#mithril-signer-with-single-relay","title":"Mithril Signer with Single Relay","text":"
      1. Deploy the Mithril Signer: Run the mithril-signer.sh script:

      2. Use the -u flag to update the mithril.env file with the Mithril Signer environment variables.

      3. Provide the IP address of your Mithril Relay when prompted.
      4. Optionally provide the relays listening port when prompted to use a port.

          ./mithril-signer.sh -u\n  Enter the IP address of the relay endpoint: 4.5.6.7\n  Enter the port of the relay endpoint (press Enter to use default 3132):\n  Using RELAY_ENDPOINT=4.5.6.7:3132 for the Mithril signer relay endpoint.\n
      5. Use the -d flag to deploy the Mithril Signer.

          ./mithril-signer.sh -d\n  Creating cnode-mithril-signer systemd service environment file..\n  Mithril signer service successfully deployed\n
      6. Enable the Systemd service to start the Mithril Signer on boot.

          sudo systemctl enable cnode-mithril-signer\n
      "},{"location":"Mithril/mithril-overview/#mithril-signer-with-multi-relay","title":"Mithril Signer with Multi Relay","text":"
      1. Deploy the Nginx sidecar loadbalancer: Run the mithril-relay.sh script:

      2. Use the -l flag to deploy the Nginx Mithril Relay.

      3. Provide the IP address of your Block Producer when prompted to secure the Mithril Relay to only accept traffic from your Block Producer.
      4. Optionally provide the relays listening port when prompted to use a port other than the default 3132, or just press enter to use the default.
      5. Create the appropriate firewall rule to allow traffic from your Block Producer to the Mithril Relay.

        ./mithril-relay.sh -d\n\nnInstalling nginx load balancer\nEnter the IP address of a relay: 4.5.6.7\nAre there more relays? (y/n) y\nEnter the IP address of a relay: 8.9.10.11\nAre there more relays? (y/n) n\nEnter the IP address of the load balancer (press Enter to use default 127.0.0.1):\nUsing IP address 127.0.0.1 for the load balancer configuration.\nEnter the relays listening port (press Enter to use default 3132):\nUsing port 3132 for relays listening port.\nStarting Mithril relay sidecar (nginx load balancer)\n
      6. Enable the Systemd Nginx Mithril Relay service to start on boot.

        sudo systemctl enable --now nginx\n
      7. Deploy the Mithril Signer: Run the mithril-signer.sh script:

      8. Use the -u flag to update the mithril.env file with the Mithril Signer environment variables.

      9. Provide the IP address of your Mithril Relay when prompted.
      10. Optionally provide the relays listening port when prompted to use a port.

            ./mithril-signer.sh -u\n    Enter the IP address of the relay endpoint: 127.0.0.1\n    Enter the port of the relay endpoint (press Enter to use default 3132):\n    Using RELAY_ENDPOINT=127.0.0.1:3132 for the Mithril signer relay endpoint.\n
      11. Use the -d flag to deploy the Mithril Signer.

            ./mithril-signer.sh -d\n    Creating cnode-mithril-signer systemd service environment file..\n    Mithril signer service successfully deployed\n
      12. Enable the Systemd service to start the Mithril Signer on boot.

            sudo systemctl enable cnode-mithril-signer\n
      "},{"location":"Scripts/blockperf/","title":"BlockPerf","text":"

      Reminder !!

      Ensure the Pre-Requisites are in place before you proceed.

      blockPerf.sh is a script to monitor the network propagation of new blocks as seen by the local cardano-node.

      "},{"location":"Scripts/blockperf/#block-propagation-traces","title":"Block propagation traces","text":"

      Although blockPerf can also run on the block producer, it makes the most sense to run it on the upstream relays. There it waits for each new block announced to the relay over the network by its remote peers.

      It looks for the delay times that result

      You can view this data locally as a console stream, or run it as a systemd service in background.

      BlockPerf also sends this data to the TopologyUpdater server, so that there is a possibility to compare this data (similar to sendtip to pooltool). As a contributing operator you get the possibility to see how your own relays compare to other nodes regarding receive quality, delay times and thus performance.

      There is no connection or constraint between the TopologyUpdater Relay subscription and the BlockPerf analysis. BlockPerf is even designed to work outside the cnTools suite.

      The results of these data are a good basis to make optimizations and to evaluate which changes were useful or might by required to improve the performance compared to other relay nodes.

      "},{"location":"Scripts/blockperf/#installation","title":"Installation","text":"

      The script is best run as a background process. This can be accomplished in many ways but the preferred method is to run it as a systemd service. A terminal multiplexer like tmux or screen could also be used but not covered here.

      "},{"location":"Scripts/blockperf/#run-as-service","title":"Run as service","text":"

      Use the deploy-as-systemd.sh script to create a systemd unit file. In this setup the script is started in \"service\" mode. Error/Warn level log output is handled by syslog and end up in the systems standard syslog file, normally /var/log/syslog. journalctl -f -u cnode-tu-blockperf.service can be used to check service output (follow mode).

      Outside the cnTools environment call blockPerf.sh -d to install it as a systemd service.

      "},{"location":"Scripts/blockperf/#console-view","title":"Console view","text":"

      If you run blockPerf local in the console (scripts/blockPerf.sh) , immediately after the appearance of a new block it shows where it came from, how many slots away from the previous block it was, and how many milliseconds the individual steps took.

      Block:.... 6860534\n Slot..... 52833850 (+59s)\n ......... 2022-02-09 09:49:01\n Header... 2022-02-09 09:49:02,780 (+1780 ms)\n Request.. 2022-02-09 09:49:02,780 (+0 ms)\n Block.... 2022-02-09 09:49:02,830 (+50 ms)\n Adopted.. 2022-02-09 09:49:02,900 (+70 ms)\n Size..... 79976 bytes\n delay.... 1.819971868 sec\n From..... 104.xxx.xxx.61:3001\n\nBlock:.... 6860535\n Slot..... 52833857 (+7s)\n ......... 2022-02-09 09:49:08\n Header... 2022-02-09 09:49:08,960 (+960 ms)\n Request.. 2022-02-09 09:49:08,970 (+10 ms)\n Block.... 2022-02-09 09:49:09,020 (+50 ms)\n Adopted.. 2022-02-09 09:49:09,090 (+70 ms)\n Size..... 64950 bytes\n delay.... 1.028341023 sec\n From..... 34.xxx.xxx.15:4001\n
      "},{"location":"Scripts/blockperf/#collaborative-web-view","title":"Collaborative web view","text":"

      A further aim of the blockPerf project is to use the data that individual nodes send to the central TopologyUpdater database to produce graphical visualisations and evaluations that provide the participating node operators with useful insights into their performance compared to all others.

      "},{"location":"Scripts/cncli/","title":"CNCLI","text":"

      Reminder !!

      Ensure the Pre-Requisites are in place before you proceed.

      cncli.sh is a script to download and deploy CNCLI created and maintained by Andrew Westberg. It's a community-based CLI tool written in RUST for low-level cardano-node communication. Usage is optional and no script is dependent on it. The main features include:

      "},{"location":"Scripts/cncli/#installation","title":"Installation","text":"

      cncli.sh script's main functions, sync, leaderlog, validate and PoolTool sendslots/sendtip are not meant to be run manually, but instead deployed as systemd services that run in the background to do the block scraping and validation automatically. Additional commands exist for manual execution to initiate the sqlite db, filling the blocklog DB with all blocks created by the pool known to the blockchain, migration of old cntoolsBlockCollector JSON blocklog, and re-validation of blocks and leaderlogs. See usage output below for a complete list of available commands.

      The script works in tandem with Log Monitor to provide faster adopted status but mainly to catch slots the node is leader for but are unable to create a block for. These are marked as invalid. Blocklog will however work fine without the logMonitor service and CNCLI is able to handle everything except catching invalid blocks.

      1. Run the latest version of guild-deploy.sh with guild-deploy.sh -s c to download and install RUST and CNCLI. IOG fork of libsodium required by CNCLI is automatically compiled by CNCLI build process. If a previous installation is found, RUST and CNCLI will be updated to the latest version.
      2. Run deploy-as-systemd.sh to deploy the systemd services that handle all the work in the background. Six systemd services in total are deployed whereof four are related to CNCLI. See above for the different purposes they serve.
      3. If you want to disable some of the deployed services, run sudo systemctl disable <service>

      4. cnode.service (main cardano-node launcher)

      5. cnode-cncli-sync.service
      6. cnode-cncli-leaderlog.service
      7. cnode-cncli-validate.service
      8. cnode-cncli-ptsendtip.service
      9. cnode-cncli-ptsendslots.service
      10. cnode-logmonitor.service (see Log Monitor)
      "},{"location":"Scripts/cncli/#configuration","title":"Configuration","text":"

      You can override the values in the script at the User Variables section shown below. POOL_ID, POOL_VRF_SKEY and POOL_VRF_VKEY should automatically be detected if POOL_NAME is set in the common env file and can be left commented. PT_API_KEY and POOL_TICKER need to be set in the script if PoolTool sendtip/sendslots are to be used before starting the services. For the rest of the commented values, if the defaults do not provide the right values, uncomment and make adjustments.

      #POOL_ID=\"\"                               # Automatically detected if POOL_NAME is set in env. Required for leaderlog calculation & pooltool sendtip, lower-case hex pool id\n#POOL_VRF_SKEY=\"\"                         # Automatically detected if POOL_NAME is set in env. Required for leaderlog calculation, path to pool's vrf.skey file\n#POOL_VRF_VKEY=\"\"                         # Automatically detected if POOL_NAME is set in env. Required for block validation, path to pool's vrf.vkey file\n#PT_API_KEY=\"\"                            # POOLTOOL sendtip: set API key, e.g \"a47811d3-0008-4ecd-9f3e-9c22bdb7c82d\"\n#POOL_TICKER=\"\"                           # POOLTOOL sendtip: set the pools ticker, e.g. \"TCKR\"\n#PT_HOST=\"127.0.0.1\"                      # POOLTOOL sendtip: connect to a remote node, preferably block producer (default localhost)\n#PT_PORT=\"${CNODE_PORT}\"                  # POOLTOOL sendtip: port of node to connect to (default is CNODE_PORT from the env file)\n#CNCLI_DIR=\"${CNODE_HOME}/guild-db/cncli\" # path to the directory for cncli sqlite db\n#SLEEP_RATE=60                            # CNCLI leaderlog/validate: time to wait until next check (in seconds)\n#CONFIRM_SLOT_CNT=600                     # CNCLI validate: require at least these many slots to have passed before validating\n#CONFIRM_BLOCK_CNT=15                     # CNCLI validate: require at least these many blocks on top of minted before validating\n#TIMEOUT_LEDGER_STATE=300                 # CNCLI leaderlog: timeout in seconds for ledger-state query\n#BATCH_AUTO_UPDATE=N                      # Set to Y to automatically update the script if a new version is available without user interaction\n
      "},{"location":"Scripts/cncli/#run","title":"Run","text":"

      Services are controlled by sudo systemctl <status|start|stop|restart> <service name> All services are configured as child services to cnode.service and as such, when an action is taken against this service it's replicated to all child services. E.g running sudo systemctl start cnode.service will also start all child services.

      Log output is handled by syslog and end up in the systems standard syslog file, normally /var/log/syslog. journalctl -f -u <service> can be used to check service output (follow mode). Other logging configurations are not covered here.

      Recommended workflow to get started with CNCLI blocklog.

      1. Install and deploy services according to Installation section.
      2. Set required user variables according to Configuration section.
      3. (optional) If a previous blocklog db exist created by cntoolsBlockCollector, run this command to migrate json storage to new SQLite DB:
      4. $CNODE_HOME/scripts/cncli.sh migrate <path> where is the location to the directory containing all blocks_.json files.
      5. Start deployed services with:
      6. sudo systemctl start cnode-cncli-sync.service (starts leaderlog, validate & ptsendslots automatically)
      7. sudo systemctl start cnode-logmonitor.service
      8. sudo systemctl start cnode-cncli-ptsendtip.service (optional but recommended)
      9. alternatively restart the main service that will trigger a start of all services with:
      10. sudo systemctl restart cnode.service
      11. Run init command to fill the db with all blocks made by your pool known to the blockchain
      12. $CNODE_HOME/scripts/cncli.sh init
      13. Enjoy full blocklog automation and visit View Blocklog section for instructions on how to show blocks from the blocklog DB.
      14. Usage: cncli.sh [operation <sub arg>]\nScript to run CNCLI, best launched through systemd deployed by 'deploy-as-systemd.sh'\n\nsync        Start CNCLI chainsync process that connects to cardano-node to sync blocks stored in SQLite DB (deployed as service)\nleaderlog   One-time leader schedule calculation for current epoch, then continuously monitors and calculates schedule for coming epochs, 1.5 days before epoch boundary on the mainnet (deployed as service)\n  force     Manually force leaderlog calculation and overwrite even if already done, exits after leaderlog is calculated\nvalidate    Continuously monitor and confirm that the blocks made actually was accepted and adopted by chain (deployed as service)\n  all       One-time re-validation of all blocks in blocklog db\n  epoch     One-time re-validation of blocks in blocklog db for the specified epoch \nptsendtip   Send node tip to PoolTool for network analysis and to show that your node is alive and well with a green badge (deployed as service)\nptsendslots Securely sends PoolTool the number of slots you have assigned for an epoch and validates the correctness of your past epochs (deployed as service)\ninit        One-time initialization adding all minted and confirmed blocks to blocklog\nmigrate     One-time migration from old blocklog (cntoolsBlockCollector) to new format (post cncli)\n  path      Path to the old cntoolsBlockCollector blocklog folder holding json files with blocks created\n
        "},{"location":"Scripts/cncli/#view-blocklog","title":"View Blocklog","text":"

        Best and easiest viewed in CNTools and gLiveView but the blocklog database is a SQLite DB so if you are comfortable with SQL, the sqlite3 command can be used to query the DB.

        Block status

        - Leader    : Scheduled to make block at this slot\n- Ideal     : Expected/Ideal number of blocks assigned based on active stake (sigma)\n- Luck      : Leader slots assigned vs ideal slots for this epoch\n- Adopted   : Block created successfully\n- Confirmed : Block created validated to be on-chain with the certainty set in `cncli.sh` for `CONFIRM_BLOCK_CNT`\n- Missed    : Scheduled at slot but no record of it in CNCLI DB and no other pool has made a block for this slot\n- Ghosted   : Block created but marked as orphaned and no other pool has made a valid block for this slot -> height battle or block propagation issue\n- Stolen    : Another pool has a valid block registered on-chain for the same slot\n- Invalid   : Pool failed to create block, base64 encoded error message can be decoded with `echo <base64 hash> | base64 -d | jq -r`\n
        CNTools

        Open CNTools and select [b] Blocks to open the block viewer. Either select Epoch and enter the epoch you want to see a detailed view for or choose Summary to display blocks for last x epochs.

        If the node was elected to create blocks in the selected epoch it could look something like this:

        Summary
         >> BLOCKS\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\nCurrent epoch: 96\n\n+--------+---------------------------+----------------------+--------------------------------------+\n| Epoch  | Leader | Ideal | Luck     | Adopted | Confirmed  | Missed | Ghosted | Stolen | Invalid  |\n+--------+---------------------------+----------------------+--------------------------------------+\n| 96     | 34     | 31.66 | 107.39%  | 18      | 18         | 0      | 0       | 0      | 0        |\n| 95     | 32     | 30.57 | 104.68%  | 32      | 32         | 0      | 0       | 0      | 0        |\n+--------+---------------------------+----------------------+--------------------------------------+\n\n[h] Home | [b] Block View | [i] Info | [*] Refresh\n
        Epoch
         >> BLOCKS\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\nCurrent epoch: 96\n\n+---------------------------+----------------------+--------------------------------------+\n| Leader | Ideal | Luck     | Adopted | Confirmed  | Missed | Ghosted | Stolen | Invalid  |\n+---------------------------+----------------------+--------------------------------------+\n| 34     | 31.66 | 107.39%  | 18      | 18         | 0      | 0       | 0      | 0        |\n+---------------------------+----------------------+--------------------------------------+\n\n+-----+------------+----------+---------------------+--------------------------+-------+-------------------------------------------------------------------+\n| #   | Status     | Block    | Slot | SlotInEpoch  | Scheduled At             | Size  | Hash                                                              |\n+-----+------------+----------+---------------------+--------------------------+-------+-------------------------------------------------------------------+\n| 1   | confirmed  | 2043444  | 11142827 | 40427    | 2020-11-16 08:34:03 CET  | 3     | ec216d3fb01e4a3cc3e85305145a31875d9561fa3bbcc6d0ee8297236dbb4115  |\n| 2   | confirmed  | 2044321  | 11165082 | 62682    | 2020-11-16 14:44:58 CET  | 3     | b75c33a5bbe49a74e4b4cc5df4474398bfb10ed39531fc65ec2acc51f89ddce5  |\n| 3   | confirmed  | 2044397  | 11166970 | 64570    | 2020-11-16 15:16:26 CET  | 3     | c1ea37fd72543779b6dab46e3e29e0e422784b5fd6188f828ace9eabcc87088f  |\n| 4   | confirmed  | 2044879  | 11178909 | 76509    | 2020-11-16 18:35:25 CET  | 3     | 35a116cec80c5dc295415e4fc8e6435c562b14a5d6833027006c988706c60307  |\n| 5   | confirmed  | 2046965  | 11232557 | 130157   | 2020-11-17 09:29:33 CET  | 3     | d566e5a1f6a3d78811acab4ae3bdcee6aa42717364f9afecd6cac5093559f466  |\n| 6   | confirmed  | 2047101  | 11235675 | 133275   | 2020-11-17 10:21:31 CET  | 3     | 3a638e01f70ea1c4a660fe4e6333272e6c61b11cf84dc8a5a107b414d1e057eb  |\n| 7   | confirmed  | 2047221  | 11238453 | 136053   | 2020-11-17 11:07:49 CET  | 3     | 843336f132961b94276603707751cdb9a1c2528b97100819ce47bc317af0a2d6  |\n| 8   | confirmed  | 2048692  | 11273507 | 171107   | 2020-11-17 20:52:03 CET  | 3     | 9b3eb79fe07e8ebae163870c21ba30460e689b23768d2e5f8e7118c572c4df36  |\n| 9   | confirmed  | 2049058  | 11282619 | 180219   | 2020-11-17 23:23:55 CET  | 3     | 643396ea9a1a2b6c66bb83bdc589fa19c8ae728d1f1181aab82e8dfe508d430a  |\n| 10  | confirmed  | 2049321  | 11289237 | 186837   | 2020-11-18 01:14:13 CET  | 3     | d93d305a955f40b2298247d44e4bc27fe9e3d1486ef3ef3e73b235b25247ccd7  |\n| 11  | confirmed  | 2049747  | 11299205 | 196805   | 2020-11-18 04:00:21 CET  | 3     | 19a43deb5014b14760c3e564b41027c5ee50e0a252abddbfcac90c8f56dc0245  |\n| 12  | confirmed  | 2050415  | 11316075 | 213675   | 2020-11-18 08:41:31 CET  | 3     | dd2cb47653f3bfb3ccc8ffe76906e07d96f1384bafd57a872ddbab3b352403e3  |\n| 13  | confirmed  | 2050505  | 11318274 | 215874   | 2020-11-18 09:18:10 CET  | 3     | deb834bc42360f8d39eefc5856bb6d7cabb6b04170c842dcbe7e9efdf9dbd2e1  |\n| 14  | confirmed  | 2050613  | 11320754 | 218354   | 2020-11-18 09:59:30 CET  | 3     | bf094f6fde8e8c29f568a253201e4b92b078e9a1cad60706285e236a91ec95ff  |\n| 15  | confirmed  | 2050807  | 11325239 | 222839   | 2020-11-18 11:14:15 CET  | 3     | 21f904346ba0fd2bb41afaae7d35977cb929d1d9727887f541782576fc6a62c9  |\n| 16  | confirmed  | 2050997  | 11330062 | 227662   | 2020-11-18 12:34:38 CET  | 3     | 109799d686fe3cad13b156a2d446a544fde2bf5d0e8f157f688f1dc30f35e912  |\n| 17  | confirmed  | 2051286  | 11336791 | 234391   | 2020-11-18 14:26:47 CET  | 3     | bb1beca7a1d849059110e3d7dc49ecf07b47970af2294fe73555ddfefb9561a8  |\n| 18  | confirmed  | 2051734  | 11348498 | 246098   | 2020-11-18 17:41:54 CET  | 3     | 87940b53c2342999c1ba4e185038cda3d8382891a16878a865f5114f540683de  |\n| 19  | leader     | -        | 11382001 | 279601   | 2020-11-19 03:00:17 CET  | -     | -                                                                 |\n| 20  | leader     | -        | 11419959 | 317559   | 2020-11-19 13:32:55 CET  | -     | -                                                                 |\n| 21  | leader     | -        | 11433174 | 330774   | 2020-11-19 17:13:10 CET  | -     | -                                                                 |\n| 22  | leader     | -        | 11434241 | 331841   | 2020-11-19 17:30:57 CET  | -     | -                                                                 |\n| 23  | leader     | -        | 11435289 | 332889   | 2020-11-19 17:48:25 CET  | -     | -                                                                 |\n| 24  | leader     | -        | 11440314 | 337914   | 2020-11-19 19:12:10 CET  | -     | -                                                                 |\n| 25  | leader     | -        | 11442361 | 339961   | 2020-11-19 19:46:17 CET  | -     | -                                                                 |\n| 26  | leader     | -        | 11443861 | 341461   | 2020-11-19 20:11:17 CET  | -     | -                                                                 |\n| 27  | leader     | -        | 11446997 | 344597   | 2020-11-19 21:03:33 CET  | -     | -                                                                 |\n| 28  | leader     | -        | 11453110 | 350710   | 2020-11-19 22:45:26 CET  | -     | -                                                                 |\n| 29  | leader     | -        | 11455323 | 352923   | 2020-11-19 23:22:19 CET  | -     | -                                                                 |\n| 30  | leader     | -        | 11505987 | 403587   | 2020-11-20 13:26:43 CET  | -     | -                                                                 |\n| 31  | leader     | -        | 11514983 | 412583   | 2020-11-20 15:56:39 CET  | -     | -                                                                 |\n| 32  | leader     | -        | 11516010 | 413610   | 2020-11-20 16:13:46 CET  | -     | -                                                                 |\n| 33  | leader     | -        | 11518958 | 416558   | 2020-11-20 17:02:54 CET  | -     | -                                                                 |\n| 34  | leader     | -        | 11533254 | 430854   | 2020-11-20 21:01:10 CET  | -     | -                                                                 |\n+-----+------------+----------+---------------------+--------------------------+-------+-------------------------------------------------------------------+\n
        gLiveView

        Currently shows a block summary for current epoch. For full block details use CNTools for now. Invalid, missing, ghosted and stolen blocks only shown in case of a non-zero value.

        \u2502--------------------------------------------------------------\u2502\n\u2502 BLOCKS   Leader  | Ideal  | Luck    | Adopted | Confirmed    \u2502\n\u2502          24        27.42    87.53%    1         1            \u2502\n\u2502          08:07:57 until leader XXXXXXXXX.....................\u2502\n\u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518\n
        "},{"location":"Scripts/cntools-changelog/","title":"Changelog","text":"

        All notable changes to this tool will be documented in this file.

        Whenever you're updating between versions where format/hash of keys have changed , or you're changing networks - it is recommended to Backup your Wallet and Pool folders before you proceed with launching cntools on a fresh network.

        The format is based on Keep a Changelog, and this adheres to Semantic Versioning.

        "},{"location":"Scripts/cntools-changelog/#1210-2024-01-19","title":"[12.1.0] - 2024-01-19","text":""},{"location":"Scripts/cntools-changelog/#changed","title":"Changed","text":""},{"location":"Scripts/cntools-changelog/#added","title":"Added","text":""},{"location":"Scripts/cntools-changelog/#1202-2024-02-11","title":"[12.0.2] - 2024-02-11","text":""},{"location":"Scripts/cntools-changelog/#fixed","title":"Fixed","text":""},{"location":"Scripts/cntools-changelog/#1201-2024-01-26","title":"[12.0.1] - 2024-01-26","text":""},{"location":"Scripts/cntools-changelog/#fixed_1","title":"Fixed","text":""},{"location":"Scripts/cntools-changelog/#1200-2024-01-19","title":"[12.0.0] - 2024-01-19","text":""},{"location":"Scripts/cntools-changelog/#changed_1","title":"Changed","text":""},{"location":"Scripts/cntools-changelog/#fixed_2","title":"Fixed","text":""},{"location":"Scripts/cntools-changelog/#1102-2023-10-30","title":"[11.0.2] - 2023-10-30","text":""},{"location":"Scripts/cntools-changelog/#fixed_3","title":"Fixed","text":""},{"location":"Scripts/cntools-changelog/#1101-2023-10-25","title":"[11.0.1] - 2023-10-25","text":""},{"location":"Scripts/cntools-changelog/#fixed_4","title":"Fixed","text":""},{"location":"Scripts/cntools-changelog/#1100-2023-07-05","title":"[11.0.0] - 2023-07-05","text":""},{"location":"Scripts/cntools-changelog/#changed_2","title":"Changed","text":""},{"location":"Scripts/cntools-changelog/#1040-2023-06-19","title":"[10.4.0] - 2023-06-19","text":""},{"location":"Scripts/cntools-changelog/#added_1","title":"Added","text":""},{"location":"Scripts/cntools-changelog/#1031-2023-06-03","title":"[10.3.1] - 2023-06-03","text":""},{"location":"Scripts/cntools-changelog/#fixed_5","title":"Fixed","text":""},{"location":"Scripts/cntools-changelog/#1030-2023-05-18","title":"[10.3.0] - 2023-05-18","text":""},{"location":"Scripts/cntools-changelog/#added_2","title":"Added","text":""},{"location":"Scripts/cntools-changelog/#1023-2023-04-28","title":"[10.2.3] - 2023-04-28","text":""},{"location":"Scripts/cntools-changelog/#fixed_6","title":"Fixed","text":""},{"location":"Scripts/cntools-changelog/#1022-2023-04-24","title":"[10.2.2] - 2023-04-24","text":""},{"location":"Scripts/cntools-changelog/#fixed_7","title":"Fixed","text":""},{"location":"Scripts/cntools-changelog/#1021-2023-04-04","title":"[10.2.1] - 2023-04-04","text":""},{"location":"Scripts/cntools-changelog/#fixed_8","title":"Fixed","text":""},{"location":"Scripts/cntools-changelog/#1020-2023-03-13","title":"[10.2.0] - 2023-03-13","text":""},{"location":"Scripts/cntools-changelog/#fixed_9","title":"Fixed","text":""},{"location":"Scripts/cntools-changelog/#changed_3","title":"Changed","text":""},{"location":"Scripts/cntools-changelog/#1011-2023-02-07","title":"[10.1.1] - 2023-02-07","text":""},{"location":"Scripts/cntools-changelog/#fixed_10","title":"Fixed","text":""},{"location":"Scripts/cntools-changelog/#1010-2023-01-17","title":"[10.1.0] - 2023-01-17","text":""},{"location":"Scripts/cntools-changelog/#added_3","title":"Added","text":""},{"location":"Scripts/cntools-changelog/#changed_4","title":"Changed","text":""},{"location":"Scripts/cntools-changelog/#fixed_11","title":"Fixed","text":""},{"location":"Scripts/cntools-changelog/#1005-2022-11-07","title":"[10.0.5] - 2022-11-07","text":""},{"location":"Scripts/cntools-changelog/#changed_5","title":"Changed","text":""},{"location":"Scripts/cntools-changelog/#1004-2022-08-26","title":"[10.0.4] - 2022-08-26","text":""},{"location":"Scripts/cntools-changelog/#changed_6","title":"Changed","text":""},{"location":"Scripts/cntools-changelog/#fixed_12","title":"Fixed","text":""},{"location":"Scripts/cntools-changelog/#1003-2022-08-16","title":"[10.0.3] - 2022-08-16","text":""},{"location":"Scripts/cntools-changelog/#fixed_13","title":"Fixed","text":""},{"location":"Scripts/cntools-changelog/#1002-2022-08-13","title":"[10.0.2] - 2022-08-13","text":""},{"location":"Scripts/cntools-changelog/#fixed_14","title":"Fixed","text":""},{"location":"Scripts/cntools-changelog/#1001-2022-07-14","title":"[10.0.1] - 2022-07-14","text":""},{"location":"Scripts/cntools-changelog/#changed_7","title":"Changed","text":""},{"location":"Scripts/cntools-changelog/#fixed_15","title":"Fixed","text":""},{"location":"Scripts/cntools-changelog/#1000-2022-06-28","title":"[10.0.0] - 2022-06-28","text":""},{"location":"Scripts/cntools-changelog/#added_4","title":"Added","text":""},{"location":"Scripts/cntools-changelog/#changed_8","title":"Changed","text":""},{"location":"Scripts/cntools-changelog/#910-2022-05-11","title":"[9.1.0] - 2022-05-11","text":""},{"location":"Scripts/cntools-changelog/#changed_9","title":"Changed","text":""},{"location":"Scripts/cntools-changelog/#9010-2022-05-03","title":"[9.0.10] - 2022-05-03","text":""},{"location":"Scripts/cntools-changelog/#fixed_16","title":"Fixed","text":""},{"location":"Scripts/cntools-changelog/#909-2022-03-14","title":"[9.0.9] - 2022-03-14","text":""},{"location":"Scripts/cntools-changelog/#changed_10","title":"Changed","text":""},{"location":"Scripts/cntools-changelog/#908-2022-03-07","title":"[9.0.8] - 2022-03-07","text":""},{"location":"Scripts/cntools-changelog/#changed_11","title":"Changed","text":""},{"location":"Scripts/cntools-changelog/#907-2022-03-02","title":"[9.0.7] - 2022-03-02","text":""},{"location":"Scripts/cntools-changelog/#fixed_17","title":"Fixed","text":""},{"location":"Scripts/cntools-changelog/#906-2022-02-20","title":"[9.0.6] - 2022-02-20","text":""},{"location":"Scripts/cntools-changelog/#fixed_18","title":"Fixed","text":""},{"location":"Scripts/cntools-changelog/#905-2022-02-16","title":"[9.0.5] - 2022-02-16","text":""},{"location":"Scripts/cntools-changelog/#fixed_19","title":"Fixed","text":""},{"location":"Scripts/cntools-changelog/#904-2022-02-14","title":"[9.0.4] - 2022-02-14","text":""},{"location":"Scripts/cntools-changelog/#fixed_20","title":"Fixed","text":""},{"location":"Scripts/cntools-changelog/#903-2022-02-01","title":"[9.0.3] - 2022-02-01","text":""},{"location":"Scripts/cntools-changelog/#added_5","title":"Added","text":""},{"location":"Scripts/cntools-changelog/#902-2022-01-22","title":"[9.0.2] - 2022-01-22","text":""},{"location":"Scripts/cntools-changelog/#changed_12","title":"Changed","text":""},{"location":"Scripts/cntools-changelog/#901-2022-01-17","title":"[9.0.1] - 2022-01-17","text":""},{"location":"Scripts/cntools-changelog/#changed_13","title":"Changed","text":""},{"location":"Scripts/cntools-changelog/#900-2022-01-10","title":"[9.0.0] - 2022-01-10","text":""},{"location":"Scripts/cntools-changelog/#changed_14","title":"Changed","text":""},{"location":"Scripts/cntools-changelog/#882-2021-12-28","title":"[8.8.2] - 2021-12-28","text":""},{"location":"Scripts/cntools-changelog/#fixed_21","title":"Fixed","text":""},{"location":"Scripts/cntools-changelog/#881-2021-12-18","title":"[8.8.1] - 2021-12-18","text":""},{"location":"Scripts/cntools-changelog/#fixed_22","title":"Fixed","text":""},{"location":"Scripts/cntools-changelog/#880-2021-12-15","title":"[8.8.0] - 2021-12-15","text":""},{"location":"Scripts/cntools-changelog/#fixed_23","title":"Fixed","text":""},{"location":"Scripts/cntools-changelog/#873-2021-11-30","title":"[8.7.3] - 2021-11-30","text":""},{"location":"Scripts/cntools-changelog/#fixed_24","title":"Fixed","text":""},{"location":"Scripts/cntools-changelog/#872-2021-11-08","title":"[8.7.2] - 2021-11-08","text":""},{"location":"Scripts/cntools-changelog/#changed_15","title":"Changed","text":""},{"location":"Scripts/cntools-changelog/#871-2021-11-04","title":"[8.7.1] - 2021-11-04","text":""},{"location":"Scripts/cntools-changelog/#fixed_25","title":"Fixed","text":""},{"location":"Scripts/cntools-changelog/#870-2021-10-05","title":"[8.7.0] - 2021-10-05","text":""},{"location":"Scripts/cntools-changelog/#changed_16","title":"Changed","text":""},{"location":"Scripts/cntools-changelog/#866-2021-09-26","title":"[8.6.6] - 2021-09-26","text":""},{"location":"Scripts/cntools-changelog/#fixed_26","title":"Fixed","text":""},{"location":"Scripts/cntools-changelog/#865-2021-09-15","title":"[8.6.5] - 2021-09-15","text":""},{"location":"Scripts/cntools-changelog/#fixed_27","title":"Fixed","text":""},{"location":"Scripts/cntools-changelog/#864-2021-09-14","title":"[8.6.4] - 2021-09-14","text":""},{"location":"Scripts/cntools-changelog/#fixed_28","title":"Fixed","text":""},{"location":"Scripts/cntools-changelog/#863-2021-08-31","title":"[8.6.3] - 2021-08-31","text":""},{"location":"Scripts/cntools-changelog/#fixed_29","title":"Fixed","text":""},{"location":"Scripts/cntools-changelog/#862-2021-08-30","title":"[8.6.2] - 2021-08-30","text":""},{"location":"Scripts/cntools-changelog/#fixed_30","title":"Fixed","text":""},{"location":"Scripts/cntools-changelog/#861-2021-08-27","title":"[8.6.1] - 2021-08-27","text":""},{"location":"Scripts/cntools-changelog/#changed_17","title":"Changed","text":""},{"location":"Scripts/cntools-changelog/#860-2021-08-27","title":"[8.6.0] - 2021-08-27","text":""},{"location":"Scripts/cntools-changelog/#changed_18","title":"Changed","text":""},{"location":"Scripts/cntools-changelog/#8415-2021-07-15","title":"[8.4.15] - 2021-07-15","text":""},{"location":"Scripts/cntools-changelog/#changed_19","title":"Changed","text":""},{"location":"Scripts/cntools-changelog/#8414-2021-07-14","title":"[8.4.14] - 2021-07-14","text":""},{"location":"Scripts/cntools-changelog/#fixed_31","title":"Fixed","text":""},{"location":"Scripts/cntools-changelog/#8413-2021-07-08","title":"[8.4.13] - 2021-07-08","text":""},{"location":"Scripts/cntools-changelog/#changed_20","title":"Changed","text":""},{"location":"Scripts/cntools-changelog/#8412-2021-06-28","title":"[8.4.12] - 2021-06-28","text":""},{"location":"Scripts/cntools-changelog/#fixed_32","title":"Fixed","text":""},{"location":"Scripts/cntools-changelog/#8411-2021-06-25","title":"[8.4.11] - 2021-06-25","text":""},{"location":"Scripts/cntools-changelog/#changed_21","title":"Changed","text":""},{"location":"Scripts/cntools-changelog/#8410-2021-06-15","title":"[8.4.10] - 2021-06-15","text":""},{"location":"Scripts/cntools-changelog/#fixed_33","title":"Fixed","text":""},{"location":"Scripts/cntools-changelog/#849-2021-06-15","title":"[8.4.9] - 2021-06-15","text":""},{"location":"Scripts/cntools-changelog/#changed_22","title":"Changed","text":""},{"location":"Scripts/cntools-changelog/#846-2021-06-04","title":"[8.4.6] - 2021-06-04","text":""},{"location":"Scripts/cntools-changelog/#fixed_34","title":"Fixed","text":""},{"location":"Scripts/cntools-changelog/#845-2021-05-31","title":"[8.4.5] - 2021-05-31","text":""},{"location":"Scripts/cntools-changelog/#fixed_35","title":"Fixed","text":""},{"location":"Scripts/cntools-changelog/#844-2021-05-19","title":"[8.4.4] - 2021-05-19","text":""},{"location":"Scripts/cntools-changelog/#fixed_36","title":"Fixed","text":""},{"location":"Scripts/cntools-changelog/#843-2021-05-17","title":"[8.4.3] - 2021-05-17","text":""},{"location":"Scripts/cntools-changelog/#fixed_37","title":"Fixed","text":""},{"location":"Scripts/cntools-changelog/#842-2021-05-16","title":"[8.4.2] - 2021-05-16","text":""},{"location":"Scripts/cntools-changelog/#fixed_38","title":"Fixed","text":""},{"location":"Scripts/cntools-changelog/#841-2021-05-16","title":"[8.4.1] - 2021-05-16","text":""},{"location":"Scripts/cntools-changelog/#changed_23","title":"Changed","text":""},{"location":"Scripts/cntools-changelog/#840-2021-05-16","title":"[8.4.0] - 2021-05-16","text":""},{"location":"Scripts/cntools-changelog/#added_6","title":"Added","text":""},{"location":"Scripts/cntools-changelog/#830-2021-05-15","title":"[8.3.0] - 2021-05-15","text":""},{"location":"Scripts/cntools-changelog/#added_7","title":"Added","text":""},{"location":"Scripts/cntools-changelog/#changed_24","title":"Changed","text":""},{"location":"Scripts/cntools-changelog/#fixed_39","title":"Fixed","text":""},{"location":"Scripts/cntools-changelog/#822-2021-05-02","title":"[8.2.2] - 2021-05-02","text":""},{"location":"Scripts/cntools-changelog/#fixed_40","title":"Fixed","text":""},{"location":"Scripts/cntools-changelog/#821-2021-04-26","title":"[8.2.1] - 2021-04-26","text":""},{"location":"Scripts/cntools-changelog/#changed_25","title":"Changed","text":""},{"location":"Scripts/cntools-changelog/#820-2021-04-18","title":"[8.2.0] - 2021-04-18","text":""},{"location":"Scripts/cntools-changelog/#added_8","title":"Added","text":""},{"location":"Scripts/cntools-changelog/#changed_26","title":"Changed","text":""},{"location":"Scripts/cntools-changelog/#816-2021-04-14","title":"[8.1.6] - 2021-04-14","text":""},{"location":"Scripts/cntools-changelog/#changed_27","title":"Changed","text":""},{"location":"Scripts/cntools-changelog/#fixed_41","title":"Fixed","text":""},{"location":"Scripts/cntools-changelog/#815-2021-04-09","title":"[8.1.5] - 2021-04-09","text":""},{"location":"Scripts/cntools-changelog/#fixed_42","title":"Fixed","text":""},{"location":"Scripts/cntools-changelog/#814-2021-04-05","title":"[8.1.4] - 2021-04-05","text":""},{"location":"Scripts/cntools-changelog/#changed_28","title":"Changed","text":""},{"location":"Scripts/cntools-changelog/#813-2021-04-01","title":"[8.1.3] - 2021-04-01","text":""},{"location":"Scripts/cntools-changelog/#fixed_43","title":"Fixed","text":""},{"location":"Scripts/cntools-changelog/#812-2021-03-31","title":"[8.1.2] - 2021-03-31","text":""},{"location":"Scripts/cntools-changelog/#changed_29","title":"Changed","text":""},{"location":"Scripts/cntools-changelog/#811-2021-03-30","title":"[8.1.1] - 2021-03-30","text":""},{"location":"Scripts/cntools-changelog/#fixed_44","title":"Fixed","text":""},{"location":"Scripts/cntools-changelog/#810-2021-03-26","title":"[8.1.0] - 2021-03-26","text":""},{"location":"Scripts/cntools-changelog/#added_9","title":"Added","text":""},{"location":"Scripts/cntools-changelog/#changed_30","title":"Changed","text":""},{"location":"Scripts/cntools-changelog/#fixed_45","title":"Fixed","text":""},{"location":"Scripts/cntools-changelog/#802-2021-03-15","title":"[8.0.2] - 2021-03-15","text":""},{"location":"Scripts/cntools-changelog/#fixed_46","title":"Fixed","text":""},{"location":"Scripts/cntools-changelog/#801-2021-03-05","title":"[8.0.1] - 2021-03-05","text":""},{"location":"Scripts/cntools-changelog/#fixed_47","title":"Fixed","text":""},{"location":"Scripts/cntools-changelog/#800-2021-02-28","title":"[8.0.0] - 2021-02-28","text":""},{"location":"Scripts/cntools-changelog/#added_10","title":"Added","text":""},{"location":"Scripts/cntools-changelog/#changed_31","title":"Changed","text":""},{"location":"Scripts/cntools-changelog/#fixed_48","title":"Fixed","text":""},{"location":"Scripts/cntools-changelog/#716-2021-02-10","title":"[7.1.6] - 2021-02-10","text":""},{"location":"Scripts/cntools-changelog/#715-2021-02-03","title":"[7.1.5] - 2021-02-03","text":""},{"location":"Scripts/cntools-changelog/#changed_32","title":"Changed","text":""},{"location":"Scripts/cntools-changelog/#fixed_49","title":"Fixed","text":""},{"location":"Scripts/cntools-changelog/#714-2021-02-01","title":"[7.1.4] - 2021-02-01","text":""},{"location":"Scripts/cntools-changelog/#fixed_50","title":"Fixed","text":""},{"location":"Scripts/cntools-changelog/#713-2021-01-30","title":"[7.1.3] - 2021-01-30","text":""},{"location":"Scripts/cntools-changelog/#fixed_51","title":"Fixed","text":""},{"location":"Scripts/cntools-changelog/#711-2021-01-29","title":"[7.1.1] - 2021-01-29","text":""},{"location":"Scripts/cntools-changelog/#changed_33","title":"Changed","text":""},{"location":"Scripts/cntools-changelog/#710-2021-01-29","title":"[7.1.0] - 2021-01-29","text":""},{"location":"Scripts/cntools-changelog/#changed_34","title":"Changed","text":""},{"location":"Scripts/cntools-changelog/#702-2021-01-17","title":"[7.0.2] - 2021-01-17","text":""},{"location":"Scripts/cntools-changelog/#changed_35","title":"Changed","text":""},{"location":"Scripts/cntools-changelog/#fixed_52","title":"Fixed","text":""},{"location":"Scripts/cntools-changelog/#701-2021-01-13","title":"[7.0.1] - 2021-01-13","text":""},{"location":"Scripts/cntools-changelog/#changed_36","title":"Changed","text":""},{"location":"Scripts/cntools-changelog/#700-2021-01-11","title":"[7.0.0] - 2021-01-11","text":"

        Though mostly unchanged in the user interface, this is a major update with most of the code re-written/touched in the back-end. Only the most noticeable changes added to changelog.

        "},{"location":"Scripts/cntools-changelog/#added_11","title":"Added","text":""},{"location":"Scripts/cntools-changelog/#changed_37","title":"Changed","text":""},{"location":"Scripts/cntools-changelog/#fixed_53","title":"Fixed","text":""},{"location":"Scripts/cntools-changelog/#631-2020-12-14","title":"[6.3.1] - 2020-12-14","text":""},{"location":"Scripts/cntools-changelog/#fixed_54","title":"Fixed","text":""},{"location":"Scripts/cntools-changelog/#630-2020-12-03","title":"[6.3.0] - 2020-12-03","text":""},{"location":"Scripts/cntools-changelog/#changed_38","title":"Changed","text":""},{"location":"Scripts/cntools-changelog/#fixed_55","title":"Fixed","text":""},{"location":"Scripts/cntools-changelog/#621-2020-11-28","title":"[6.2.1] - 2020-11-28","text":""},{"location":"Scripts/cntools-changelog/#changed_39","title":"Changed","text":""},{"location":"Scripts/cntools-changelog/#620-alpha-branch","title":"[6.2.0] - (alpha branch)","text":""},{"location":"Scripts/cntools-changelog/#added_12","title":"Added","text":""},{"location":"Scripts/cntools-changelog/#changed_40","title":"Changed","text":""},{"location":"Scripts/cntools-changelog/#fixed_56","title":"Fixed","text":""},{"location":"Scripts/cntools-changelog/#610-2020-10-22","title":"[6.1.0] - 2020-10-22","text":""},{"location":"Scripts/cntools-changelog/#added_13","title":"Added","text":""},{"location":"Scripts/cntools-changelog/#changed_41","title":"Changed","text":""},{"location":"Scripts/cntools-changelog/#fixed_57","title":"Fixed","text":""},{"location":"Scripts/cntools-changelog/#603-2020-10-16","title":"[6.0.3] - 2020-10-16","text":""},{"location":"Scripts/cntools-changelog/#fixed_58","title":"Fixed","text":""},{"location":"Scripts/cntools-changelog/#602-2020-10-16","title":"[6.0.2] - 2020-10-16","text":""},{"location":"Scripts/cntools-changelog/#fixed_59","title":"Fixed","text":""},{"location":"Scripts/cntools-changelog/#601-2020-10-16","title":"[6.0.1] - 2020-10-16","text":""},{"location":"Scripts/cntools-changelog/#fixed_60","title":"Fixed","text":""},{"location":"Scripts/cntools-changelog/#600-2020-10-15","title":"[6.0.0] - 2020-10-15","text":"

        This is a major release with a lot of changes. It is highly recommended that you familiarise yourself with the usage for Hybrid or Online v/s Offline mode on a testnet environment before doing it on production. Please visit https://cardano-community.github.io/guild-operators/upgrade for details.

        "},{"location":"Scripts/cntools-changelog/#added_14","title":"Added","text":""},{"location":"Scripts/cntools-changelog/#changed_42","title":"Changed","text":""},{"location":"Scripts/cntools-changelog/#removed","title":"Removed","text":""},{"location":"Scripts/cntools-changelog/#fixed_61","title":"Fixed","text":""},{"location":"Scripts/cntools-changelog/#500-2020-07-20","title":"[5.0.0] - 2020-07-20","text":""},{"location":"Scripts/cntools-changelog/#added_15","title":"Added","text":""},{"location":"Scripts/cntools-changelog/#changed_43","title":"Changed","text":""},{"location":"Scripts/cntools-changelog/#removed_1","title":"Removed","text":""},{"location":"Scripts/cntools-changelog/#fixed_62","title":"Fixed","text":""},{"location":"Scripts/cntools-changelog/#400-2020-07-13","title":"[4.0.0] - 2020-07-13","text":""},{"location":"Scripts/cntools-changelog/#added_16","title":"Added","text":""},{"location":"Scripts/cntools-changelog/#changed_44","title":"Changed","text":""},{"location":"Scripts/cntools-changelog/#300-2020-07-12","title":"[3.0.0] - 2020-07-12","text":""},{"location":"Scripts/cntools-changelog/#added_17","title":"Added","text":""},{"location":"Scripts/cntools-changelog/#changed_45","title":"Changed","text":""},{"location":"Scripts/cntools-changelog/#fixed_63","title":"Fixed","text":""},{"location":"Scripts/cntools-changelog/#200-2020-07-12","title":"[2.0.0] - 2020-07-12","text":""},{"location":"Scripts/cntools-changelog/#added_18","title":"Added","text":""},{"location":"Scripts/cntools-changelog/#changed_46","title":"Changed","text":""},{"location":"Scripts/cntools-changelog/#removed_2","title":"Removed","text":""},{"location":"Scripts/cntools-changelog/#fixed_64","title":"Fixed","text":""},{"location":"Scripts/cntools-changelog/#100-2020-07-07","title":"[1.0.0] - 2020-07-07","text":""},{"location":"Scripts/cntools-common/","title":"Common Tasks","text":"

        Important

        Familiarize yourself with the Online workflow of creating wallets and pools on the Preview/Preprod/Guild network first. You can then move on to test the Offline Workflow. The Offline workflow means that the private keys never touch the Online node. When comfortable with both the online and offline CNTools workflow, it's time to deploy what you learnt on the mainnet.

        This chapter describes some common use-cases for wallet and pool creation when running CNTools in Online mode. CNTools contains much more functionality not described here.

        Create Wallet

        A wallet is needed for pledge and to pay for pool registration fee.

        1. Choose [w] Wallet and you will be presented with the following menu:
          ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n >> WALLET\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n Wallet Management\n\n ) New         - create a new wallet\n ) Import      - import a Daedalus/Yoroi 24/25 mnemonic or Ledger/Trezor HW wallet\n ) Register    - register a wallet on chain\n ) De-Register - De-Register (retire) a registered wallet\n ) List        - list all available wallets in a compact view\n ) Show        - show detailed view of a specific wallet\n ) Remove      - remove a wallet\n ) Decrypt     - remove write protection and decrypt wallet\n ) Encrypt     - encrypt wallet keys and make all files immutable\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n Select Wallet Operation\n\n  [n] New\n  [i] Import\n  [r] Register\n  [z] De-Register\n  [l] List\n  [s] Show\n  [x] Remove\n  [d] Decrypt\n  [e] Encrypt\n  [h] Home\n
        2. Choose [n] New to create a new wallet. [i] Import can also be used to import a Daedalus/Yoroi based 15 or 24 word wallet seed
        3. Give the wallet a name
        4. CNTools will give you the wallet address. For example:
          ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n >> WALLET >> NEW\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\nName of new wallet: Test\n\nNew Wallet         : Test\nAddress            : addr_test1qpq5qjr774cyc6kxcwp060k4t4hwp42q43v35lmcg3gcycu5uwdwld5yr8m8fgn7su955zf5qahtrgljqfjfa4nr8jfsj4alxk\nEnterprise Address : addr_test1vpq5qjr774cyc6kxcwp060k4t4hwp42q43v35lmcg3gcyccuxhdka\n\nYou can now send and receive Ada using the above addresses.\nNote that Enterprise Address will not take part in staking.\nWallet will be automatically registered on chain if you\nchoose to delegate or pledge wallet when registering a stake pool.\n
        5. Send some money to this wallet. Either through the faucet or have a friend send you some.
        Import Daedalus/Yoroi/HW Wallet

        The Import feature of CNTools is originally based on this guide from Ilap.

        If you would like to use Import function to import a Daedalus/Yoroi based 15 or 24 word wallet seed, please ensure that cardano-address and bech32 bineries are available in your $PATH environment variable:

        bech32 --version\n1.1.0\n\ncardano-address --version\n3.5.0\n

        If the version is not as per above, please run the latest guild-deploy.sh from here and rebuild cardano-node as instructed here.

        To import a Daedalus/Yoroi wallet to CNTools, open CNTools and select the [w] Wallet option, and then select the [i] Import, the following menu will appear:

        ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n >> WALLET >> IMPORT\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n Wallet Import\n\n ) Mnemonic  - Daedalus/Yoroi 24 or 25 word mnemonic\n ) HW Wallet - Ledger/Trezor hardware wallet\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n Select Wallet operation\n\n  [m] Mnemonic\n  [w] HW Wallet\n  [h] Home\n

        Note

        You can import Hardware wallet using [w] HW Wallet above, but please note that before you are able to use hardware wallet in CNTools, you need to ensure you can detect your hardware device at OS level using cardano-hw-cli

        Select the wallet you want to import, for Daedalus / Yoroi wallets select [m] Mnemonic:

        ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n >> WALLET >> IMPORT >> MNEMONIC\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\nName of imported wallet: TEST\n\n24 or 15 word mnemonic(space separated):\n
        Give your wallet a name (in this case 'TEST'), and enter your mnemonic phrase. Please ensure that you **READ* through the complete notes presented by CNTools before proceeding.

        Create Pool

        Create the necessary pool keys.

        1. From the main menu select [p] Pool
          ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n >> POOL\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n Pool Management\n\n ) New      - create a new pool\n ) Register - register created pool on chain using a stake wallet (pledge wallet)\n ) Modify   - change pool parameters and register updated pool values on chain\n ) Retire   - de-register stake pool from chain in specified epoch\n ) List     - a compact list view of available local pools\n ) Show     - detailed view of specified pool\n ) Rotate   - rotate pool KES keys\n ) Decrypt  - remove write protection and decrypt pool\n ) Encrypt  - encrypt pool cold keys and make all files immutable\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n Select Pool Operation\n\n  [n] New\n  [r] Register\n  [m] Modify\n  [x] Retire\n  [l] List\n  [s] Show\n  [o] Rotate\n  [d] Decrypt\n  [e] Encrypt\n  [h] Home\n
        2. Select [n] New to create a new pool
        3. Give the pool a name. In our case, we call it TEST. The result should look something like this:
          ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n >> POOL >> NEW\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\nPool Name: TEST\n\nPool: TEST\nID (hex)    : 8d5a3510f18ce241115da38a1b2419ed82d308599c16e98caea1b4c0\nID (bech32) : pool134dr2y833n3yzy2a5w9pkfqeakpdxzzenstwnr9w5x6vqtnclue\n
        Register Pool

        Register the pool on-chain.

        1. From the main menu select [p] Pool
        2. Select [r] Register
        3. Select the pool you just created
        4. CNTools will give you prompts to set pledge, margin, cost, metadata, and relays. Enter values that are useful to you.

        Make sure you set your pledge low enough to insure your funds in your wallet will cover pledge plus pool registration fees.

        1. Select wallet to use as pledge wallet, Test in our case. As this is a newly created wallet, you will be prompted to continue with wallet registration. When complete and if successful, both wallet and pool will be registered on-chain.

        It will look something like this:

        ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n >> POOL >> REGISTER\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\nOnline mode  -  The default mode to use if all keys are available\n\nHybrid mode  -  1) Go through the steps to build a transaction file\n                2) Copy the built tx file to an offline node\n                3) Sign it using 'Sign Tx' with keys on offline node\n                   (CNTools started in offline mode '-o' without node connection)\n                4) Copy the signed tx file back to the online node and submit using 'Submit Tx'\n\nSelected value: [o] Online\n\n# Select pool\nSelected pool: TEST\n\n# Pool Parameters\npress enter to use default value\n\nPledge (in Ada, default: 50,000):\nMargin (in %, default: 7.5):\nCost (in Ada, minimum: 340, default: 340):\n\n# Pool Metadata\n\nEnter Pool's JSON URL to host metadata file - URL length should be less than 64 chars (default: https://foo.bat/poolmeta.json):\n\nEnter Pool's Name (default: TEST):\nEnter Pool's Ticker , should be between 3-5 characters (default: TEST):\nEnter Pool's Description (default: No Description):\nEnter Pool's Homepage (default: https://foo.com):\n\nOptionally set an extended metadata URL?\nSelected value: [n] No\n{\n  \"name\": \"TEST\",\n  \"ticker\": \"TEST\",\n  \"description\": \"No Description\",\n  \"homepage\": \"https://foo.com\",\n  \"nonce\": \"1613146429\"\n}\n\nPlease host file /opt/cardano/guild/priv/pool/TEST/poolmeta.json as-is at https://foo.bat/poolmeta.json\n\n# Pool Relay Registration\nSelected value: [d] A or AAAA DNS record (single)\nEnter relays's DNS record, only A or AAAA DNS records: relay.foo.com\nEnter relays's port: 6000\nAdd more relay entries?\nSelected value: [n] No\n\n# Select main owner/pledge wallet (normal CLI wallet)\nSelected wallet: Test (100,000.000000 Ada)\nWallet Test3 not registered on chain\n\nWaiting for new block to be created (timeout = 600 slots, 600s)\nINFO: press any key to cancel and return (won't stop transaction)\n\nOwner #1 : Test added!\n\nRegister a multi-owner pool (you need to have stake.vkey of any additional owner in a seperate wallet folder under $CNODE_HOME/priv/wallet)?\nSelected value: [n] No\n\nUse a separate rewards wallet from main owner?\nSelected value: [n] No\n\nWaiting for new block to be created (timeout = 600 slots, 600s)\nINFO: press any key to cancel and return (won't stop transaction)\n\nPool TEST successfully registered!\nOwner #1      : Test\nReward Wallet : Test\nPledge        : 50,000 Ada\nMargin        : 7.5 %\nCost          : 340 Ada\n\nUncomment and set value for POOL_NAME in ./env with 'TEST'\n\nINFO: Total balance in 1 owner/pledge wallet(s) are: 99,497.996518 Ada\n

        1. As mentioned in the above output: Uncomment and set value for POOL_NAME in ./env with 'TEST' (in our case, the POOL_NAME is TEST). The cnode.sh script will automatically detect whether the files required to run as a block producing node are present in the $CNODE_HOME/priv/pool/<POOL_NAME> directory.
        Rotate KES Keys

        The node runs with an operational certificate, generated using the KES hot key. For security reasons, the protocol asks to re-generate (or rotate) your KES key once reaching expiry. On mainnet, this expiry is in 62 cycles of 18 hours (thus, to ask for rotation quarterly), after which your node will not be able to forge valid blocks unless rotated. To be able to rotate KES keys, your cold keys files (cold.skey, cold.vkey and cold.counter) need to be present on the machine where you run CNTools to rotate your KES key.

        1. To Rotate KES keys and generate the operational certificate - op.cert.

        2. From the main menu select [p] Pool

        3. Select [o] Rotate
        4. Select the pool you just created

        The output should look like:

        ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n >> POOL >> ROTATE KES\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\nSelect pool to rotate KES keys on\nSelected pool: TEST\n\nPool KES keys successfully updated\nNew KES start period  : 240\nKES keys will expire  : 302 - 2021-09-04 11:24:31 UTC\n\nRestart your pool node for changes to take effect\n\npress any key to return to home menu\n
        1. Start or restart your cardano-node. If deployed as a systemd service as shown here, you can run sudo systemctl restart cnode.
        2. Ensure the node is running as a block producing (core) node.

        You can use gLiveView - the output at the top should say > Cardano Node - (Core - Guild).

        Alternatively, you can check the node logs in $CNODE_HOME/logs/ to see whether the node is performing leadership checks (TraceStartLeadershipCheck, TraceNodeIsNotLeader, etc.)

        "},{"location":"Scripts/cntools/","title":"Overview","text":"

        Important

        Koios CNTools is like a swiss army knife for pool operators to simplify typical operations regarding their wallet keys and pool management. Please note that this tool only aims to simplify usual tasks for its users, but it should NOT act as an excuse to skip understanding how to manually work through things or basics of Linux operations. The skills highlighted on the home page are paramount for a stake pool operator, and so is the understanding of configuration files and network. Please ensure you've read and understood the disclaimers before proceeding.

        Visit the Changelog section to see progress and current release.

        "},{"location":"Scripts/cntools/#overview","title":"Overview","text":"

        The tool consist of three files.

        In addition to the above files, there is also a dependency on the common env file. CNTools connects to your node through the configuration in the env file located in the same directory as the script. Customize env and cntools.sh files to your needs.

        Additionally, CNTools can integrate and enable optional functionalities based on external components:

        See CNCLI and Log Monitor sections for more details.

        Koios CNTools can operate in following modes:

        "},{"location":"Scripts/cntools/#download-and-update","title":"Download and Update","text":"

        The update functionality is provided from within CNTools. In case of breaking changes, please follow the prompts post-upgrade. If stuck, it's always best to re-run the latest guild-deploy.sh before proceeding.

        If you have not updated in a while, it is possible that you might come from a release with breaking changes. If so, please be sure to check out the upgrade instructions.

        "},{"location":"Scripts/cntools/#navigation","title":"Navigation","text":"

        The scripts menu supports both arrow key navigation and shortcut key selection. The character within the square brackets is the shortcut to press for quick navigation. For other selections like wallet and pool menu that don't contain shortcuts, there is a third way to navigate. Key pressed is compared to the first character of the menu option and if there is a match the selection jumps to this location. A handy way to quickly navigate a large menu.

        "},{"location":"Scripts/cntools/#hardware-wallet","title":"Hardware Wallet","text":"

        CNTools includes hardware wallet support since version 7.0.0 through Vacuumlabs cardano-hw-cli application. Initialize and update firmware/app on the device to the latest version before usage following the manufacturer instructions.

        To enable hardware support run guild-deploy.sh -s w. This downloads and installs Vacuumlabs cardano-hw-cli including udev configuration. When a new version of Vacuumlabs cardano-hw-cli is released, run guild-deploy.sh -s w again to update. For additional runtime options, run guild-deploy.sh -h.

        Ledger Trezor "},{"location":"Scripts/cntools/#offline-workflow","title":"Offline Workflow","text":"

        CNTools can be run in online and offline mode. At a very high level, for working with offline devices, remember that you need to use CNTools in an online node to generate a staging transaction for the desired type of transaction, and then move the staging transaction to an offline node to sign (authorize) using the signing keys on your offline node - and then bring back the signed transaction to the online node for submission to the chain.

        For the offline workflow, all the wallet and pool keys should be kept on the offline node. The backup function in CNTools has an option to create a backup without private keys (sensitive signing keys) to be transferred to online node. All other files are included in the backup to be transferred to the online node.

        Keys excluded from backup when created without private keys: Wallet - payment.skey, stake.skey Pool - cold.skey

        Note that setting up an offline server requires good SysOps background (you need to be aware of how to set up your server with offline mirror repository, how to transfer files across and be fairly familiar with the disk layout presented in the documentation). The guild-deploy.sh in its current state is not expected to run on an offline machine. Essentially, you simply need the cardano-cli, bech32, cardano-address binaries in your $PATH, OS level dependency packages [jq, coreutils, pkgconfig, gcc-c++ and bc ], and perhaps a copy from your online cnode directory (to ensure you have the right genesis/config files on your offline server). We strongly recommend you to familiarise yourself with the workflow on the preview / preprod / guild networks first, before attempting on mainnet.

        Example workflow for creating a wallet and pool:

        sequenceDiagram Note over Offline: Create/Import a wallet Note over Offline: Create a new pool Note over Offline: Rotate KES keys to generate op.cert Note over Offline: Create a backup w/o private keys Offline->>Online: Transfer backup to online node Note over Online: Fund the wallet base address with enough Ada Note over Online: Register wallet using ' Wallet \u00bb Register ' in hybrid mode Online->>Offline: Transfer built tx file back to offline node Note over Offline: Use ' Transaction >> Sign ' with payment.skey from wallet to sign transaction Offline->>Online: Transfer signed tx back to online node Note over Online: Use ' Transaction >> Submit ' to send signed transaction to blockchain Note over Online: Register pool in hybrid mode loop Offline-->Online: Repeat steps to sign and submit built pool registration transaction end Note over Online: Verify that pool was successfully registered with ' Pool \u00bb Show ' Online mode

        To start CNTools in Online (advanced) Mode, execute the script from the $CNODE_HOME/scripts/ directory:

        cd $CNODE_HOME/scripts\n./cntools.sh -a\n

        You should get a screen that looks something like this:

        ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n >> Koios CNTools vX.X.X - Guild - CONNECTED <<\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n Main Menu    Telegram Announcement / Support channel: t.me/CardanoKoios/9759\n\n ) Wallet      - create, show, remove and protect wallets\n ) Funds       - send, withdraw and delegate\n ) Pool        - pool creation and management\n ) Transaction - Sign and Submit a cold transaction (hybrid/offline mode)\n ) Blocks      - show core node leader schedule & block production statistics\n ) Backup      - backup & restore of wallet/pool/config\n ) Advanced    - Developer and advanced features: metadata, multi-assets, ...\n ) Refresh     - reload home screen content\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n                                                  Epoch 276 - 3d 19:08:27 until next\n What would you like to do?                                         Node Sync: 12 :)\n\n  [w] Wallet\n  [f] Funds\n  [p] Pool\n  [t] Transaction\n  [b] Blocks\n  [u] Update\n  [z] Backup & Restore\n  [a] Advanced\n  [r] Refresh\n  [q] Quit\n
        Offline mode

        To start CNTools in Offline Mode, execute the script from the $CNODE_HOME/scripts/ directory using the -o flag:

        cd $CNODE_HOME/scripts\n./cntools.sh -o\n

        The main menu header should let you know that node is started in offline mode:

        ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n >> Koios CNTools vX.X.X - Guild - OFFLINE <<\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n Main Menu    Telegram Announcement / Support channel: t.me/CardanoKoios/9759\n\n ) Wallet      - create, show, remove and protect wallets\n ) Funds       - send, withdraw and delegate\n ) Pool        - pool creation and management\n ) Transaction - Sign and Submit a cold transaction (hybrid/offline mode)\n\n ) Backup      - backup & restore of wallet/pool/config\n\n ) Refresh     - reload home screen content\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n                                                  Epoch 276 - 3d 19:03:46 until next\n What would you like to do?\n\n  [w] Wallet\n  [f] Funds\n  [p] Pool\n  [t] Transaction\n  [z] Backup & Restore\n  [r] Refresh\n  [q] Quit\n

        "},{"location":"Scripts/env/","title":"Common env","text":"

        A common environment file called env is sourced by most scripts in the Guild Operators repository. This file holds common variables and functions needed by other scripts. There are several benefits to this, not having to specify duplicate settings and being able to reuse functions decreasing the risk of misconfiguration and inconsistency.

        "},{"location":"Scripts/env/#installation","title":"Installation","text":"

        env file is downloaded together with the rest of the scripts when Pre-Requisites if followed and located in the $CNODE_HOME/scripts/ directory. The file is also automatically downloaded/updated by some of the individual scripts if missing, like cntools.sh, gLiveView.sh and topologyUpdater.sh. All custom changes in User Variables section are untouched on updates unless a forced overwrite is selected when running guild-deploy.sh.

        "},{"location":"Scripts/env/#configuration","title":"Configuration","text":"

        Most variables can be left commented to use the automatically detected or default value. But there are some that need to be set as explained below.

        Take your time and look through the different variables and their explanations and decide if you need/want to change the default setting. For a default deployment using guild-deploy.sh, the CNODE_PORT (all installs) and POOL_NAME (only block producer) should be the only variables needed to be set.

        ######################################\n# User Variables - Change as desired #\n# Leave as is if unsure              #\n######################################\n\n#CCLI=\"${HOME}/.local/bin/cardano-cli\"                  # Override automatic detection of path to cardano-cli executable\n#CNCLI=\"${HOME}/.local/bin/cncli\"                       # Override automatic detection of path to cncli executable (https://github.com/AndrewWestberg/cncli)\n#CNODE_HOME=\"/opt/cardano/cnode\"                        # Override default CNODE_HOME path (defaults to /opt/cardano/cnode)\nCNODE_PORT=6000                                         # Set node port\n#CONFIG=\"${CNODE_HOME}/files/config.json\"               # Override automatic detection of node config path\n#SOCKET=\"${CNODE_HOME}/sockets/node.socket\"             # Override automatic detection of path to socket\n#TOPOLOGY=\"${CNODE_HOME}/files/topology.json\"           # Override default topology.json path\n#LOG_DIR=\"${CNODE_HOME}/logs\"                           # Folder where your logs will be sent to (must pre-exist)\n#DB_DIR=\"${CNODE_HOME}/db\"                              # Folder to store the cardano-node blockchain db\n#UPDATE_CHECK=\"Y\"                                       # Check for updates to scripts, it will still be prompted before proceeding (Y|N).\n#TMP_DIR=\"/tmp/cnode\"                                   # Folder to hold temporary files in the various scripts, each script might create additional subfolders\n#EKG_HOST=127.0.0.1                                     # Set node EKG host IP\n#EKG_PORT=12788                                         # Override automatic detection of node EKG port\n#PROM_HOST=127.0.0.1                                    # Set node Prometheus host IP\n#PROM_PORT=12798                                        # Override automatic detection of node Prometheus port\n#EKG_TIMEOUT=3                                          # Maximum time in seconds that you allow EKG request to take before aborting (node metrics)\n#CURL_TIMEOUT=10                                        # Maximum time in seconds that you allow curl file download to take before aborting (GitHub update process)\n#BLOCKLOG_DIR=\"${CNODE_HOME}/guild-db/blocklog\"         # Override default directory used to store block data for core node\n#BLOCKLOG_TZ=\"UTC\"                                      # TimeZone to use when displaying blocklog - https://en.wikipedia.org/wiki/List_of_tz_database_time_zones\n#SHELLEY_TRANS_EPOCH=208                                # Override automatic detection of shelley epoch start, e.g 208 for mainnet\n#TG_BOT_TOKEN=\"\"                                        # Uncomment and set to enable telegramSend function. To create your own BOT-token and Chat-Id follow guide at:\n#TG_CHAT_ID=\"\"                                          # https://cardano-community.github.io/guild-operators/Scripts/sendalerts\n#USE_EKG=\"N\"                                            # Use EKG metrics from the node instead of Promethus. Promethus metrics(default) should yield slightly better performance\n#TIMEOUT_LEDGER_STATE=300                               # Timeout in seconds for querying and dumping ledger-state\n#IP_VERSION=4                                           # The IP version to use for push and fetch, valid options: 4 | 6 | mix (Default: 4)\n\n#WALLET_FOLDER=\"${CNODE_HOME}/priv/wallet\"              # Root folder for Wallets\n#POOL_FOLDER=\"${CNODE_HOME}/priv/pool\"                  # Root folder for Pools\n# Each wallet and pool has a friendly name and subfolder containing all related keys, certificates, ...\n#POOL_NAME=\"\"                                           # Set the pool's name to run node as a core node (the name, NOT the ticker, ie folder name)\n\n#WALLET_PAY_VK_FILENAME=\"payment.vkey\"                  # Standardized names for all wallet related files\n#WALLET_PAY_SK_FILENAME=\"payment.skey\"\n#WALLET_HW_PAY_SK_FILENAME=\"payment.hwsfile\"\n#WALLET_PAY_ADDR_FILENAME=\"payment.addr\"\n#WALLET_BASE_ADDR_FILENAME=\"base.addr\"\n#WALLET_STAKE_VK_FILENAME=\"stake.vkey\"\n#WALLET_STAKE_SK_FILENAME=\"stake.skey\"\n#WALLET_HW_STAKE_SK_FILENAME=\"stake.hwsfile\"\n#WALLET_STAKE_ADDR_FILENAME=\"reward.addr\"\n#WALLET_STAKE_CERT_FILENAME=\"stake.cert\"\n#WALLET_STAKE_DEREG_FILENAME=\"stake.dereg\"\n#WALLET_DELEGCERT_FILENAME=\"delegation.cert\"\n\n#POOL_ID_FILENAME=\"pool.id\"                             # Standardized names for all pool related files\n#POOL_HOTKEY_VK_FILENAME=\"hot.vkey\"\n#POOL_HOTKEY_SK_FILENAME=\"hot.skey\"\n#POOL_COLDKEY_VK_FILENAME=\"cold.vkey\"\n#POOL_COLDKEY_SK_FILENAME=\"cold.skey\"\n#POOL_OPCERT_COUNTER_FILENAME=\"cold.counter\"\n#POOL_OPCERT_FILENAME=\"op.cert\"\n#POOL_VRF_VK_FILENAME=\"vrf.vkey\"\n#POOL_VRF_SK_FILENAME=\"vrf.skey\"\n#POOL_CONFIG_FILENAME=\"pool.config\"\n#POOL_REGCERT_FILENAME=\"pool.cert\"\n#POOL_CURRENT_KES_START=\"kes.start\"\n#POOL_DEREGCERT_FILENAME=\"pool.dereg\"\n\n#ASSET_FOLDER=\"${CNODE_HOME}/priv/asset\"                # Root folder for Multi-Assets containing minted assets and subfolders for Policy IDs\n#ASSET_POLICY_VK_FILENAME=\"policy.vkey\"                 # Standardized names for all multi-asset related files\n#ASSET_POLICY_SK_FILENAME=\"policy.skey\"\n#ASSET_POLICY_SCRIPT_FILENAME=\"policy.script\"           # File extension '.script' mandatory\n#ASSET_POLICY_ID_FILENAME=\"policy.id\"\n
        "},{"location":"Scripts/gliveview/","title":"gLiveView","text":"

        Reminder !!

        Ensure the Pre-Requisites are in place before you proceed.

        Koios gLiveView is a local monitoring tool to use in addition to remote monitoring tools like Prometheus/Grafana, Zabbix or IOG's RTView. This is especially useful when moving to a systemd deployment - if you haven't done so already - as it offers an intuitive UI to monitor the node status.

        "},{"location":"Scripts/gliveview/#configuration-startup","title":"Configuration & Startup","text":"

        For most setups, it's enough to set CNODE_PORT in the env file. The rest of the variables should automatically be detected. If required, modify User Variables in env and gLiveView.sh to suit your environment (if the environment is customised). This should lead you to a stage where you can now start running ./gLiveView.sh in the folder you downloaded the script (the default location would be $CNODE_HOME/scripts). Note that the script is smart enough to automatically detect when you're running as a Core or Relay and will show fields accordingly.

        The tool can be run in legacy mode with only standard ASCII characters for terminals with trouble displaying the box-drawing characters. Run ./gLiveView.sh -h to show available command-line parameters or permanently set it directly in script.

        Note !!

        Keeping gLiveView to it's intent of being a dashboard and not a full-fledged monitoring tool, we intend to keep most relevant information for a node operator in a minimalistic dashboard, accordingly - gLiveView runs by default in compact mode. One can enable verbose mode by pressing 'v' to unhide additional fields.

        A sample output from both core and relay together with peer analysis:

        Core

        Relay

        Peer Analysis

        "},{"location":"Scripts/gliveview/#upper-main-section","title":"Upper main section","text":"

        Displays live metrics from cardano-node gathered through the nodes EKG/Prometheus(env setting) endpoint.

        "},{"location":"Scripts/gliveview/#core-section","title":"Core section","text":"

        If the node is run as a core, identified by the 'forge-about-to-lead' parameter, a second core section is displayed.

        "},{"location":"Scripts/gliveview/#peer-analysis","title":"Peer analysis","text":"

        A manual peer analysis can be triggered by key press p. A latency test will be done on incoming and outgoing connections to the node.

        Note

        Note that with P2P enabled, an incoming/outgoing connection can be reused for bi-directional traffic. There isnt a way to distinctly identify the P2P peer's direction yet for a given IP.

        Outgoing connections(peers in topology file), ping type used is done in this order: 1. cncli - If available, this gives the most accurate measure as it checks the entire handshake process against the remote peer. 2. ss - Sends a TCP SYN package to ping the remote peer on the cardano-node port. Should give ~100% success rate. 2. tcptraceroute - Same as ss. 3. ping - fallback method using ICMP ping against IP. Will only work if firewall of remote peer accept ICMP traffic.

        For incoming connections, only ICMP ping is used as remote peer port is unknown. It's not uncommon to see many undetermined peers for incoming connections as it's a good security practice to disable ICMP in firewall.

        Once the analysis is finished, it will display the RTTs (return-trip times) for the peers and group them in ranges 0-50, 50-100, 100-200, 200<. The analysis is NOT live. Press [h] Home to go back to default view or [i] Info to show in-script help text. Up and Down arrow keys is used to select incoming or outgoing detailed list of IPs and their RTT value. Left (<) and Right (>) arrow keys can be used to navigate the pages in the selected list.

        "},{"location":"Scripts/gliveview/#troubleshootingcustomisations","title":"Troubleshooting/Customisations","text":"

        In case you run into trouble while running the script, you might want to edit env & gLiveView.sh and look at User Variables section. You can override the values if the automatic detection do not provide the right information, but we would appreciate if you could also notify us by raising an issue against the GitHub repository:

        gLiveView.sh

        ######################################\n# User Variables - Change as desired #\n######################################\n\nNODE_NAME=\"Cardano Node\"                  # Change your node's name prefix here, keep at or below 19 characters!\nREFRESH_RATE=2                            # How often (in seconds) to refresh the view (additional time for processing and output may slow it down)\nLEGACY_MODE=false                         # (true|false) If enabled unicode box-drawing characters will be replaced by standard ASCII characters\nRETRIES=3                                 # How many attempts to connect to running Cardano node before erroring out and quitting\nPEER_LIST_CNT=6                           # Number of peers to show on each in/out page in peer analysis view\nTHEME=\"dark\"                              # dark  = suited for terminals with a dark background\n# light = suited for terminals with a bright background\nENABLE_IP_GEOLOCATION=\"Y\"                 # Enable IP geolocation on outgoing and incoming connections using ip-api.com\n

        "},{"location":"Scripts/itnrewards/","title":"Itnrewards","text":""},{"location":"Scripts/itnrewards/#concept","title":"Concept","text":"

        To claim rewards earned during the Incentivized TestNet the private and public keys from ITN must be converted to Shelley stake keys. A script called itnRewards.sh has been created to guide you through the process of converting the keys and to create a CNTools compatible wallet from were the rewards can be withdrawn.

        graph TB A([\"itnRewards.sh\"]) A --x B([\"ITN Owner skey (ed25519[e]_sk)..\"]) --x D([\"cardano-cli shelley key convert-itn-key ..\"]) A --x C([\"ITN Owner vkey (ed25519_pk)..\"]) --x D D --x E([\"Stake skey/vkey\"]) --x L A --x F([\"cardano-cli shelley ..\"]) F --x G([\"Payment skey/vkey/addr\"]) --x L F --x H([\"Reward addr\"]) --x L F --x I([\"Base addr\"]) --x L L[CNTools Wallet] ;"},{"location":"Scripts/itnrewards/#steps","title":"Steps","text":""},{"location":"Scripts/itnwitness/","title":"Itnwitness","text":"

        Disclaimer

        Currently this is to protect the existing pools from the ITN who already have a delegator base against spoofing - to avoid scammers building on results of ITN from known pools. There would be a solution in the future for Mainnet nodes too - but it doesn't apply to those in its current form.

        "},{"location":"Scripts/itnwitness/#concept","title":"Concept","text":"

        Due to the expected ticker spoofing attack for pools that were famous during ITN, some of the community members have proposed an interim solution to verify the legitimacy of a pool for delegators. You can check the high-level workflow below:

        graph TB A(\"ITN Owner skey (ed25519/ed25519e) ..\") --x C([\"jcli key sign ..\"]) B(\"Haskell Pool ID (pool.id) ..\") --x C C --x D(\"Signature key, (pool.sig) ..\") E(\"ITN Owner vkey (ed25519_pk) ..\") --x F(\"Extended Metadata JSON (poolmeta_extended.json) ..\") D --x F F --x G(\"Pool Meta JSON (poolmeta.json) ..\") ;"},{"location":"Scripts/itnwitness/#steps","title":"Steps","text":"

        The actual implementation is pretty straightforward, we will keep it brisk - as we assume ones participating are fairly familiar with jcli usage.

        If the process is approved to appear for wallets, we may consider providing easier alternatives. If any queries about the process, or any additions please create a git issue/PR against guild repository - to capture common queries and update instructions/help text where appropriate.

        "},{"location":"Scripts/itnwitness/#sample-output-of-json-files-generated","title":"Sample output of JSON files generated","text":"
        {\n\"itn\": {\n\"owner\": \"ed25519_pk1...\",\n\"witness\": \"ed25519_sig1...\"\n}\n}\n
        "},{"location":"Scripts/logmonitor/","title":"Log Monitor","text":"

        Reminder !!

        Ensure the Pre-Requisites are in place before you proceed.

        logMonitor.sh is a general purpose JSON log monitoring script for traces created by cardano-node. Currently, it looks for traces related to leader slots and block creation but other uses could be added in the future.

        "},{"location":"Scripts/logmonitor/#block-traces","title":"Block traces","text":"

        For the core node (block producer) the logMonitor.sh script can be run to monitor the JSON log file created by cardano-node for traces related to leader slots and block creation.

        For optimal coverage, it's best run together with CNCLI scripts as they provide different functionalities. Together, they create a complete picture of blocks assigned, created, validated or invalidated due to node issues.

        "},{"location":"Scripts/logmonitor/#installation","title":"Installation","text":"

        The script is best run as a background process. This can be accomplished in many ways but the preferred method is to run it as a systemd service. A terminal multiplexer like tmux or screen could also be used but not covered here.

        Use the deploy-as-systemd.sh script to create a systemd unit file (deployed together with CNCLI). Log output is handled by syslog and end up in the systems standard syslog file, normally /var/log/syslog. journalctl -f -u cnode-logmonitor.service can be used to check service output (follow mode). Other logging configurations are not covered here.

        "},{"location":"Scripts/logmonitor/#view-blocklog","title":"View Blocklog","text":"

        Best viewed in CNTools or gLiveView. See CNCLI for example output.

        "},{"location":"Scripts/mithril-client/","title":"Client","text":"

        mithril-client.sh is a script to manage the Mithril client, a tool used to set up the Mithril client environment and manage downloading Mithril snapshots and stake distributions. The main features include:

        "},{"location":"Scripts/mithril-client/#preparing-a-relay-or-block-producer-node","title":"Preparing a Relay or Block Producer Node","text":"

        To prepare a relay or block producer node, you should follow these steps:

        1. Create the Mithril environment file: Run the script with the environment setup command. This will create a new mithril.env file with all the necessary environment variables for the Mithril client.
        ./mithril-client.sh environment setup\n
        1. Download the latest Mithril snapshot: Once the environment file is set up, you can download the latest Mithril snapshot by running the script with the snapshot download command. This snapshot contains the latest state of the Cardano blockchain db from a Mithril Aggregator.
        ./mithril-client.sh snapshot download\n
        "},{"location":"Scripts/mithril-client/#investigating-available-snapshots","title":"Investigating Available Snapshots","text":"

        You can investigate the available snapshots by using the snapshot list and snapshot show commands:

        ./mithril-client.sh snapshot list\n./mithril-client.sh snapshot list json\n
        ./mithril-client.sh snapshot show <DIGEST>\n./mithril-client.sh snapshot show <DIGEST> json\n./mithril-client.sh snapshot show json <DIGEST>\n
        "},{"location":"Scripts/mithril-client/#managing-stake-distributions","title":"Managing Stake Distributions","text":"

        You can manage stake distributions by using the stake-distribution download and stake-distribution list commands:

        ./mithril-client.sh stake-distribution download\n
        ./mithril-client.sh stake-distribution list\n./mithril-client.sh stake-distribution list json\n
        "},{"location":"Scripts/mithril-relay/","title":"Relay","text":"

        mithril-relay.sh is a bash script for deployment of Squid Mithril Relays and a Nginx loadbalancer. It provides functionalities such as:

        "},{"location":"Scripts/mithril-relay/#usage","title":"Usage","text":"
        Usage: mithril-relay.sh [-d] [-l]\n\nOptions:\n    -d  Install squid and configure as a relay\n    -l  Install nginx and configure as a load balancer\n    -h  Show this help text\n
        "},{"location":"Scripts/mithril-relay/#description","title":"Description","text":"

        The mithril-relay.sh script is a bash script for managing the Mithril Relay Server. It provides functionalities such as installing and configuring Squid as a relay, installing and configuring Nginx as a load balancer.

        "},{"location":"Scripts/mithril-relay/#environment-variables","title":"Environment Variables","text":"

        The script uses the following environment variable:

        "},{"location":"Scripts/mithril-relay/#execution","title":"Execution","text":"

        The script parses command line options and performs the corresponding actions based on the options provided. If the -d option is provided, it installs Squid and configures it as a relay. If the -l option is provided, it installs Nginx and configures it as a load balancer. If no options are provided, it displays the usage message.

        "},{"location":"Scripts/mithril-signer/","title":"Signer","text":"

        mithril-signer.sh is a bash script for managing the Mithril Signer Server. It provides functionalities such as deploying the server as a systemd service and updating the environment file to contain variables specific to the Mithril Signer.

        "},{"location":"Scripts/mithril-signer/#usage","title":"Usage","text":"
        Usage: mithril-signer.sh [-d] [-u]\n\nOptions:\n    -d    Deploy mithril-signer as a systemd service\n    -u    Update mithril environment file\n    -h    Show this help text\n
        "},{"location":"Scripts/mithril-signer/#description","title":"Description","text":"

        This script is a bash script for managing the Mithril Signer Server. It provides functionalities such as deploying the server as a systemd service, updating the environment file, and running the server.

        "},{"location":"Scripts/mithril-signer/#environment-variables","title":"Environment Variables","text":"

        The script uses several environment variables, some of which are:

        "},{"location":"Scripts/mithril-signer/#execution","title":"Execution","text":"

        The script parses command line options, sources the environment file, sets default values, and performs basic sanity checks. It then checks if the -d or -u options were specified and performs the corresponding actions. If no options were specified, it runs the Mithril Signer Server.

        "},{"location":"Scripts/sendalerts/","title":"Sendalerts","text":"

        !> Ensure the Pre-Requisites are in place before you proceed.

        This section describes the ways in which CNTools can send important messages to the operator.

        "},{"location":"Scripts/sendalerts/#telegram-alerts","title":"Telegram alerts","text":"

        If known but unwanted errors occur on your node, or if characteristic values indicate an unusual status , CNTools can send you Telegram alert messages.

        To do this, you first have to activate your own bot and link it to your own Telegram user. Here is an explanation of how this works:

        1. Open Telegram and search for \"botfather\".

        2. Write him your wish: /newbot.

        3. Define a name for your bot, such as cntools_[POOLNAME]_alerts.

        4. Botfather will confirm the creation of your bot by giving you the unique bot access token. Keep it safe and private.

        5. Now send at least one direct message to your new bot.

        6. Open this URL in your browser by using your own, just created bot access token:

        https://api.telegram.org/bot<your-access-token>/getUpdates\n
        1. the result is a JSON. Look for the value of result.message.chat.id. This chat id should be a large integer number.

        This is all you need to enable your Telegram alerts in the scripts/env file - uncomment and add the chat ID to the TG_CHAT_ID user variable in the env file:

        ...\nTG_CHAT_ID=\"<YOUR_TG_CHAT_ID>\"\n...  \n

        "},{"location":"Scripts/topologyupdater/","title":"Topology Updater","text":"

        Reminder !!

        The topologyUpdater shell script must be executed on the relay node as a cronjob exactly every 60 minutes. After 4 consecutive requests (3 hours) the node is considered a new relay node in listed in the topology file. If the node is turned off, it's automatically delisted after 3 hours.

        "},{"location":"Scripts/topologyupdater/#download","title":"Download and Configure","text":"

        If you have run guild-deploy.sh, this should already be available in your scripts folder and make this step unnecessary.

        Before the updater can make a valid request to the central topology service, it must query the current tip/blockNo from the well-synced local node. It connects to your node through the configuration in the script as well as the common env configuration file. Customize these files for your needs.

        To download topologyUpdater.sh manually, you can execute the commands below and test executing Topology Updater once (it's OK if first execution gives back an error):

        cd $CNODE_HOME/scripts\ncurl -s -o topologyUpdater.sh https://raw.githubusercontent.com/cardano-community/guild-operators/master/scripts/cnode-helper-scripts/topologyUpdater.sh\ncurl -s -o env https://raw.githubusercontent.com/cardano-community/guild-operators/master/scripts/cnode-helper-scripts/env\nchmod 750 topologyUpdater.sh\n./topologyUpdater.sh\n

        "},{"location":"Scripts/topologyupdater/#modify","title":"Examine and modify the variables within topologyUpdater.sh script","text":"

        Out of the box, the scripts might come with some assumptions, that may or may not be valid for your environment. One of the common changes as an SPO would be to the complete CUSTOM_PEERS section as below to include your local relays/BP nodes (described in the How do I add my own nodes section), and any additional peers you'd like to be always available at minimum. Please do take time to update the variables in User Variables section in env & topologyUpdater.sh:

        ### topologyUpdater.sh\n\n######################################\n# User Variables - Change as desired #\n######################################\n\nCNODE_HOSTNAME=\"CHANGE ME\"                                # (Optional) Must resolve to the IP you are requesting from\nCNODE_VALENCY=1                                           # (Optional) for multi-IP hostnames\nMAX_PEERS=15                                              # Maximum number of peers to return on successful fetch\n#CUSTOM_PEERS=\"None\"                                      # Additional custom peers to (IP,port[,valency]) to add to your target topology.json\n# eg: \"10.0.0.1,3001|10.0.0.2,3002|relays.mydomain.com,3003,3\"\n#BATCH_AUTO_UPDATE=N                                      # Set to Y to automatically update the script if a new version is available without user interaction\n

        Any customisations you add above, will be saved across future guild-deploy.sh executions, unless you specify the -f flag to overwrite completely.

        "},{"location":"Scripts/topologyupdater/#deploy","title":"Deploy the script","text":"

        systemd service The script can be deployed as a background service in different ways but the recommended and easiest way if guild-deploy.sh was used, is to utilize the deploy-as-systemd.sh script to setup and schedule the execution. This will deploy both push & fetch service files as well as timers for a scheduled 60 min node alive message and cnode restart at the user set interval (default: 24 hours) when running the deploy script.

        systemctl list-timers can be used to to check the push and restart service schedule.

        crontab job Another way to deploy the topologyUpdater.sh script is as a crontab job. Add the script to be executed once per hour at a minute of your choice (eg xx:25 o'clock in the example below). The example below will handle both the fetch and push in a single call to the script once an hour. In addition to the below crontab job for topologyUpdater, it's expected that you also add a scheduled restart of the relay node to pick up a fresh topology file fetched by topologyUpdater script with relays that are alive and well.

        25 * * * * /opt/cardano/cnode/scripts/topologyUpdater.sh\n
        "},{"location":"Scripts/topologyupdater/#logs","title":"Logs","text":"

        You can check the last result of push message in logs/topologyUpdater_lastresult.json. If deployed as systemd service, use sudo journalctl -u <service> to check output from service.

        If one of the parameters is outside the allowed ranges, invalid or missing the returned JSON will tell you what needs to be fixed.

        Don't try to execute the script more often than once per hour. It's completely useless and may lead to a temporary blacklisting.

        "},{"location":"Scripts/topologyupdater/#why-does-my-topology-file-only-contain-iog-peers","title":"Why does my topology file only contain IOG peers?","text":"

        Each subscribed node (4 consecutive requests) is allowed to fetch a subset of other nodes to prove loyalty/stability of the relay. Until reaching this point, your fetch calls will only return IOG peers combined with any custom peers added in USER VARIABLES section of topologyUpdater.sh script

        The engineers of cardano-node network stack suggested to use around 20 peers. More peers create unnecessary and unwanted system load and delays.

        In its default setting, topologyUpdater returns a list of 15 remote peers.

        Note that the change in topology is only effective upon restart of your node. Make sure you account for some scheduled restarts on your relays, to help onboard newer relays onto the network (as described in the systemd section).

        "},{"location":"Scripts/topologyupdater/#how-do-i-add-my-own-relaysstatic-nodes-in-addition-to-dynamic-list-generated-by-topologyupdater","title":"How do I add my own relays/static nodes in addition to dynamic list generated by topologyUpdater?","text":"

        Most of the Stake Pool Operators may have few preferences (own relays, close friends, etc) that they would like to add to their topology by default. This is where the CUSTOM_PEERS variable in topologyUpdater.sh comes in. You can add a list of peers in the format of: hostname/IP:port[:valency] here and the output topology.json formed will already include the custom peers that you supplied. Every custom peer is defined in the form [address]:[port] and optional :[valency] (if not specified, the valency defaults to 1). Multiple custom peers are separated by |. An example of a valid CUSTOM_PEERS variable would be:

        CUSTOM_PEERS=\"foo.bar.io,3001,2|198.175.21.197,6001|36.233.3.89,6000\n
        The list above would add three custom peers with the specified addresses and ports, with the first one additionally specifying the optional valency parameter (in this case 2).

        "},{"location":"Scripts/topologyupdater/#how-are-the-peers-for-my-topology-file-selected","title":"How are the peers for my topology file selected?","text":"

        We calculate the distance on the Earth's surface from your node's IP to all subscribed peers. We then order the peers by distance (closest first) and start by selecting one peer. We then skip some, pick the next, skip, pick, skip, pick ... until we reach the end of the list (furthest away). The number of skipped records is calculated in a way to have the desired number of peers at the end.

        Every requesting node has its personal distance to all other nodes.

        We assume this should result in a well-distributed and interconnected peering network.

        "},{"location":"docker/build/","title":"Build","text":""},{"location":"docker/build/#intro","title":"Intro","text":"

        \ud83d\udca1 Docker containers are the fastest way to run a Cardano node in both \"Relay\" and \"Block-Producing\" (Pool) mode.

        "},{"location":"docker/build/#how-to-build","title":"How to build","text":"
        docker build -t cardanocommunity/cardano-node:latest - < dockerfile_bin\n
        "},{"location":"docker/build/#for-windows-users","title":"For Windows Users","text":"

        With Powershell on Windows, you can run docker by typing the following command:

        Get-Content dockerfile_bin  | docker build -t guild-operators/cardano-node:latest -\n
        "},{"location":"docker/build/#see-also","title":"See also","text":"

        Docker Tips

        Docker Official Docs

        "},{"location":"docker/docker/","title":"Overview","text":"

        Running your own Cardano node has never been so fast and easy.

        But first, a kind reminder to the security aspects of running docker containers.

        "},{"location":"docker/docker/#external-resources","title":"External resources","text":""},{"location":"docker/docker/#built-in-cardano-software","title":"\ud83d\udd14 Built-in Cardano software","text":""},{"location":"docker/docker/#mithril","title":"Mithril","text":""},{"location":"docker/docker/#built-in-tools","title":"\ud83d\udd14 Built-in tools","text":""},{"location":"docker/docker/#docker-splash-screen","title":"Docker Splash screen","text":""},{"location":"docker/docker/#cntools","title":"Cntools","text":""},{"location":"docker/docker/#gliveview","title":"gLiveView","text":""},{"location":"docker/docker/#gliveview-peers-analyzer","title":"gLiveView Peers analyzer","text":""},{"location":"docker/docker/#cncli","title":"CNCLI","text":""},{"location":"docker/docker/#strategy","title":"Guild Operators Docker strategy ( mainnet/ preview / preprod / guild)","text":"

        Modular docker images based on Debian.

        Based on the Guild's work the Cardano Node image is built in a single stage: -> dockerfile_bin

        "},{"location":"docker/docker/#additional-docs","title":"Additional docs","text":"

        If you prefer to build the images your own than you can check:

        "},{"location":"docker/docker/#port-mapping","title":"Port mapping","text":"

        The dockerfiles are located in ./files/docker/

        Node Ports Wallet Ports Flavor Node (6000) Wallet (8090) Debian Prometheus (12798) Prometheus (12798) EKG (12781)"},{"location":"docker/run/","title":"Run","text":""},{"location":"docker/run/#os-requirements","title":"OS Requirements","text":" Private mode Public mode

        Note

        1) --entrypoint=bash # This option won't start the node's container but only the OS running (the node software wont actually start, you'll need to manually execute entrypoint.sh ), ready to get in (trough the command docker exec -it < container name or hash > /bin/bash) and play/explore around with it in command line mode. 2) all guild tools env variable can be used to start a new container using custom values by using the \"-e\" option. 3) CPU and RAM and Shared Memory allocation option for the container can be used when you start the container (i.e. --shm-size or --memory or --cpus official docker resource docs) 4) --env MITHRIL_DOWNLOAD=Y # This option will allow Mithril client to download the latest Mithril snapshot of the blockchain when the container starts and does not have a copy of the blockchain yet. This is useful when you want to start a new node from scratch and don't want to wait for the node to sync from the network. This option is only available for the mainnet, preprod, and preview networks.

        "},{"location":"docker/run/#use-cases","title":"Use Cases","text":"
        docker run --init -dit\n--name <YourCName>\n--security-opt=no-new-privileges\n-e NETWORK=mainnet\n-v <your_custom_path>:/opt/cardano/cnode/priv\n-v <your_custom_db_path>:/opt/cardano/cnode/db\ncardanocommunity/cardano-node\n
        "},{"location":"docker/run/#use-cases_1","title":"Use Cases:","text":"
        docker run --init -dit\n--name <YourCName>\n--security-opt=no-new-privileges\n-e NETWORK=mainnet\n-p 6000:6000\n-v <your_custom_path>:/opt/cardano/cnode/priv\n-v <your_custom_db_path>:/opt/cardano/cnode/db\ncardanocommunity/cardano-node\n
        docker run --init -dit\n--name <YourCName>\n--security-opt=no-new-privileges\n-e NETWORK=mainnet\n-e CONFIG=/opt/cardano/cnode/priv/<your own configuration files>.yml\n-p 6000:6000\n-v <your_custom_path>:/opt/cardano/cnode/priv\n-v <your_custom_db_path>:/opt/cardano/cnode/db\ncardanocommunity/cardano-node\n
        "},{"location":"docker/security/","title":"Security","text":""},{"location":"docker/security/#docker-security-best-practices","title":"Docker Security best practices","text":""},{"location":"docker/security/#intro","title":"Intro","text":"

        On the security front, Docker developers are faced with different types of security attacks such as:

        Docker containers are now being exploited to covertly mine for cryptocurrency, marking a shift from ransomware to cryptocurrency malware. As with all things in security, also Docker security is a moving target \u2014 so it\u2019s helpful to have access to up-to-date information, including experience-based best practices, for securing your containerized environments.

        "},{"location":"docker/security/#here-below-some-key-concepts","title":"Here below some key concepts:","text":"
        1. Use a Third-Party Security Tool Docker allows you to use containers from untrusted public repositories, which increases the need to scrutinize whether the container was created securely and whether it is free of any corrupt or malicious files. For this, use a multi-purpose security tool that gives extensive dev-to-production security controls.(keep reading below)

        2. Manage Vulnerability It is best to have a sound vulnerability management program that has multiple checks throughout the container lifecycle. Vulnerability management should incorporate quality gates to detect access issues and weaknesses for a potential exploit from dev-to-production environments.

        3. Monitor and Audit Container Activity It is vital to monitor the container ecosystem and detect suspicious activity. Container monitoring activities provide real-time reports that can help you react promptly to a security breach.

        4. Enable Docker Content Trust Docker Content Trustis a new feature incorporated into Docker 1.8. It is disabled by default, but once enabled, allows you to verify the integrity, authenticity, and publication date of all Docker images from the Docker Hub Registry.

        5. Use Docker Bench for Security You should consider Docker Bench for Security as your must-use script. Once the script is run, you will notice a lot of information regarding configuration best practices for deploying Docker containers that can be used to further secure your Docker server and containers.

        6. Resource Utilization To reduce performance impacts and denial-of-service attacks, it is a good practice to implement limits on the system resources that the containers can consume. If, for example, a web server is compromised, it helps to limit the impact to the other processes that are running on a host.

        7. RBAC RBAC is role-based access control. If you have multiple users accessing you enviroment, this is a must-have. It can be quite expensive to implement but portainer makes it super easy.

        "},{"location":"docker/security/#security-docker-best-practices","title":"Security Docker best practices:","text":""},{"location":"docker/security/#the-guild-docker-images-are-not-using-all-the-following-tips-due-to-functional-purpose","title":"The Guild Docker images are not using all the following tips due to functional purpose","text":"

        Guild tips:

        Some more general tips:

        "},{"location":"docker/security/#notes","title":"Notes:","text":""},{"location":"docker/tips/","title":"Tips","text":""},{"location":"docker/tips/#how-to-run-a-cardano-node-with-docker","title":"How to run a Cardano Node with Docker","text":"

        With this quick guide you will be able to run a cardano node in seconds and also have the powerfull Koios SPO scripts built-in.

        "},{"location":"docker/tips/#how-to-operate-interactively-within-the-container","title":"How to operate interactively within the container","text":"

        Once executed the container as a deamon with attached tty you are then able to enter the container by using the flag -dit .

        While if you have a hook within the container console, use the following command (change CN with your container name):

        docker exec -it CN bash 

        This command will bring you within the container bash env ready to use the Koios tools.

        "},{"location":"docker/tips/#docker-flags-explained","title":"Docker flags explained","text":"
        \"docker build\" options explained:\n -t : option is to \"tag\" the image you can name the image as you prefer as long as you maintain the references between dockerfiles.\n\n\"docker run\" options explained:\n -d : for detach the container\n -i : interactive enabled -t : terminal session enabled\n -e : set an Env Variable\n -p : set exposed ports (by default if not specified the ports will be reachable only internally)\n--hostname : Container's hostname\n --name : Container's name\n
        "},{"location":"docker/tips/#custom-container-with-your-own-cfg","title":"Custom container with your own cfg","text":"
        docker run --init -itd  \n-name Relay                                   # Optional (recommended for quick access): set a name for your newly created container.\n-p 9000:6000                                  # Optional: to expose the internal container's port (6000) to the host <IP> port 9000\n-e NETWORK=mainnet                            # Mandatory: mainnet / preprod / guild-mainnet / guild\n--security-opt=no-new-privileges              # Option to prevent privilege escalations\n-v <YourNetPath>:/opt/cardano/cnode/sockets   # Optional: useful to share the node socket with other containers\n-v <YourCfgPath>:/opt/cardano/cnode/priv      # Optional: if used has to contain all the sensitive keys needed to run a node as core\n-v <YourDBbk>:/opt/cardano/cnode/db           # Optional: if not set a fresh DB will be downloaded from scratch\ncardanocommunity/cardano-node:latest          # Mandatory: image to run\n

        Note

        To be able to use the CNTools encryption key feature you need to manually change in \"cntools.config\" ENABLE_CHATTR to \"true\" and not use the --security-opt=no-new-privileges docker run option.

        "},{"location":"docker/tips/#docker-cli-managment","title":"Docker CLI managment","text":""},{"location":"docker/tips/#official","title":"Official","text":""},{"location":"docker/tips/#un-official-docker-managment-cli-tool","title":"Un-Official Docker managment cli tool","text":""},{"location":"docker/tips/#docker-backups-and-restores","title":"Docker backups and restores","text":"

        The docker container has an optional backup and restore functionality that can be used to backup the /opt/cardano/cnode/db directory. To have the backup persist longer than the countainer, the backup directory should be mounted as a volume.

        [!NOTE] The backup and restore functionality is disabled by default.

        [!WARNING] Make sure adequate space exists on the host as the backup will double the space consumed by the database.

        "},{"location":"docker/tips/#creating-a-backup","title":"Creating a Backup","text":"

        When the container is started with the ENABLE_BACKUP environment variable set to Y the container will automatically create a backup in the /opt/cardano/cnode/backup/$NETWORK-db directory. The backup will be created when the container is started and if the backup directory is smaller than the db directory.

        "},{"location":"docker/tips/#restoring-from-a-backup","title":"Restoring from a Backup","text":"

        When the container is started with the ENABLE_RESTORE environment variable set to Y the container will automatically restore the latest backup from the /opt/cardano/cnode/backup/$NETWORK-db directory. The database will be restored when the container is started and if the backup directory is larger than the db directory.

        "},{"location":"docker/tips/#configuration-update-check-functionality","title":"Configuration Update Check Functionality","text":"

        The container now includes a static copy of each network's configuration files (Mainnet, Preprod, Preview, Sanchonet, and Guild networks). The NETWORK environment variable passed into the container determines which configuration files are copied into $CNODE_HOME/files.

        The UPDATE_CHECK environment variable controls whether the container updates these configuration files from GitHub before starting. By default, the container has the environment variable set to UPDATE_CHECK=N, meaning the container uses the configuration files it was built with. This can be overriden either persistently or dynamically.

        "},{"location":"docker/tips/#persistently-updating-configuration-files","title":"Persistently updating configuration files","text":"

        To always update the configuration files from GitHub, set the UPDATE_CHECK environment variable when creating the container by using the --env option, for example --env UPDATE_CHECK=Y.

        To always update the configuration files from a specific GitHub account, set the G_ACCOUNT environment variable when creating the container by using the --env option, for example --env G_ACCOUNT=gh-fork-user.

        [!NOTE] There is no way to change the environment variable of an already running container. To rollback the configuration files and scripts stop and remove the container and start it without setting the environment variable.

        "},{"location":"docker/tips/#dynamically-updating-configuration-files","title":"Dynamically updating configuration files","text":"

        Set an environment file during create/run using --env-file=file, for example --env-file=/opt/cardano/cnode/.env.

        To rollback the configuration files to the built-in versions, remove the UPDATE_CHECK=Y or set it to UPDATE_CHECK=N in the environment file. The static configuration files in the container will be used, however the scripts will remain updated. If you want both the configuration files and scripts to be rolled back, you will need to stop and remove the container and create a new one.

        "},{"location":"docker/tips/#building-images-from-forked-repositories","title":"Building Images from Forked Repositories","text":"

        Run the Docker Image GitHub Action to build and push images to the ghcr.io registry.

        "}]} \ No newline at end of file diff --git a/sitemap.xml b/sitemap.xml index 7a34573ca..ec6b57611 100644 --- a/sitemap.xml +++ b/sitemap.xml @@ -2,192 +2,192 @@ https://cardano-community.github.io/guild-operators/ - 2024-04-15 + 2024-04-21 daily https://cardano-community.github.io/guild-operators/basics/ - 2024-04-15 + 2024-04-21 daily https://cardano-community.github.io/guild-operators/build/ - 2024-04-15 + 2024-04-21 daily https://cardano-community.github.io/guild-operators/catalystf11/ - 2024-04-15 + 2024-04-21 daily https://cardano-community.github.io/guild-operators/contributors/ - 2024-04-15 + 2024-04-21 daily https://cardano-community.github.io/guild-operators/grest-meets/ - 2024-04-15 + 2024-04-21 daily https://cardano-community.github.io/guild-operators/sidebar/ - 2024-04-15 + 2024-04-21 daily https://cardano-community.github.io/guild-operators/upgrade/ - 2024-04-15 + 2024-04-21 daily https://cardano-community.github.io/guild-operators/Appendix/RecoverByronWallet/ - 2024-04-15 + 2024-04-21 daily https://cardano-community.github.io/guild-operators/Appendix/monitoring/ - 2024-04-15 + 2024-04-21 daily https://cardano-community.github.io/guild-operators/Appendix/postgres/ - 2024-04-15 + 2024-04-21 daily https://cardano-community.github.io/guild-operators/Build/dbsync/ - 2024-04-15 + 2024-04-21 daily https://cardano-community.github.io/guild-operators/Build/grest-changelog/ - 2024-04-15 + 2024-04-21 daily https://cardano-community.github.io/guild-operators/Build/grest/ - 2024-04-15 + 2024-04-21 daily https://cardano-community.github.io/guild-operators/Build/node-cli/ - 2024-04-15 + 2024-04-21 daily https://cardano-community.github.io/guild-operators/Build/offchain-metadata-tools/ - 2024-04-15 + 2024-04-21 daily https://cardano-community.github.io/guild-operators/Build/wallet/ - 2024-04-15 + 2024-04-21 daily https://cardano-community.github.io/guild-operators/Mithril/mithril-overview/ - 2024-04-15 + 2024-04-21 daily https://cardano-community.github.io/guild-operators/Scripts/blockperf/ - 2024-04-15 + 2024-04-21 daily https://cardano-community.github.io/guild-operators/Scripts/cncli/ - 2024-04-15 + 2024-04-21 daily https://cardano-community.github.io/guild-operators/Scripts/cntools-changelog/ - 2024-04-15 + 2024-04-21 daily https://cardano-community.github.io/guild-operators/Scripts/cntools-common/ - 2024-04-15 + 2024-04-21 daily https://cardano-community.github.io/guild-operators/Scripts/cntools/ - 2024-04-15 + 2024-04-21 daily https://cardano-community.github.io/guild-operators/Scripts/env/ - 2024-04-15 + 2024-04-21 daily https://cardano-community.github.io/guild-operators/Scripts/gliveview/ - 2024-04-15 + 2024-04-21 daily https://cardano-community.github.io/guild-operators/Scripts/itnrewards/ - 2024-04-15 + 2024-04-21 daily https://cardano-community.github.io/guild-operators/Scripts/itnwitness/ - 2024-04-15 + 2024-04-21 daily https://cardano-community.github.io/guild-operators/Scripts/logmonitor/ - 2024-04-15 + 2024-04-21 daily https://cardano-community.github.io/guild-operators/Scripts/mithril-client/ - 2024-04-15 + 2024-04-21 daily https://cardano-community.github.io/guild-operators/Scripts/mithril-relay/ - 2024-04-15 + 2024-04-21 daily https://cardano-community.github.io/guild-operators/Scripts/mithril-signer/ - 2024-04-15 + 2024-04-21 daily https://cardano-community.github.io/guild-operators/Scripts/sendalerts/ - 2024-04-15 + 2024-04-21 daily https://cardano-community.github.io/guild-operators/Scripts/topologyupdater/ - 2024-04-15 + 2024-04-21 daily https://cardano-community.github.io/guild-operators/docker/build/ - 2024-04-15 + 2024-04-21 daily https://cardano-community.github.io/guild-operators/docker/docker/ - 2024-04-15 + 2024-04-21 daily https://cardano-community.github.io/guild-operators/docker/run/ - 2024-04-15 + 2024-04-21 daily https://cardano-community.github.io/guild-operators/docker/security/ - 2024-04-15 + 2024-04-21 daily https://cardano-community.github.io/guild-operators/docker/tips/ - 2024-04-15 + 2024-04-21 daily \ No newline at end of file diff --git a/sitemap.xml.gz b/sitemap.xml.gz index 9dd074676563043f95ad790c32aa60c03eb590b5..2e92414772e756058a1835dc60140841b007935a 100644 GIT binary patch literal 527 zcmV+q0`UDGiwFoM<|JkU|8r?{Wo=<_E_iKh0M(eij@vK{fcJZfz`Ly^xS=f)A9}-W zUHXI0vP3&VWl<;6@%imbCutj?Yf*p(3|TZqeS}3)^5N-ijGHqQ3Yd=7uGv-_NDh5S z!?F7I^`(ASJ+%*qsYC+ah;pK1bz;sRjG5=Tp};WOObwR7kUL3-%uQ&v&HJi-*vPdS zxISI3LqylER!Gb^Tf9wJI~>O;1zwsV@M-FrfM%FN^fhKEma&jIn04g6Axk`>Z};2% zW4(Q>_d8L!$?nwL$)*AN0{WT54xZ~<_yV)H}e@BN`ZM^3a0NiMqs6mj3}B| zq%taXAS3Y_xw`P5FBQpmv?QmI{F*DT(?uNAx}Xs}p@-VVpphN>Ugio($}C$idz_jP zqsri&6{h`9@}`7n#%o)zHS8Q<*5$<)S3;IeS|LVZ_oAn>ao4sPbCuYMwAmBsv~p$Xv{p%4F!hLW@@kuhTKs)WUfN9Z9Y`(-A1m} zz~$+D?IXH&wL)UX+2U=&+Tl1(De%$^fsa$y1T@1GqOUPSv5bYx-mD{U4O!w5eS5#X zf2g+)_3oXhTxB0E)P8;*u*M;mX+qe+-yPYy{ zOk6@Y>08-D0pfZDU}fMGJao3`f}ZkFtOs4tpK^xe!>joM4yC|6Ed|pL8zZn%M@AG) zEK(VjI*^fgja*&$Pv?r{J6e*{NPfwcm+2x7YF*F>p3p<>V$jHreJ^u`BxRPZmpx9+ zh*4$m&I;51XL(aXG~=bM*BW*TFzfQ-t1BVPCan_#KSkQ>#CJ(u5dTF#hppBE@Q)iZ z29s(ZbizYA=>#8QIDy{G88{V~Wgw~LpXCNp$}bu7dhpW8|9k%+1q>4F_Mt$ui*6Sv z3<;DjC6Z&jjZj!=C^Kf1a9*Y?#~E!O!OI3(m=*pXzmi`Yjp$`cD!B>VR76uHJcs-S SdFI>@aQh2?h;mYj8~^}!4gT5y