diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 20360a6942..6a1d7eae9d 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -72,8 +72,6 @@ jobs: - name: Build osx binary run: | export VERSION=${{ needs.build_init.outputs.version }} - export WITH_CLEVELDB=false - export WITH_ROCKSDB=false make build-release-zip - name: Provenanced version # TODO[1760]: github: Re-enable the build_osx provenanced version step. @@ -102,12 +100,9 @@ jobs: run: | sudo apt-get update sudo apt-get install -y libgflags-dev libsnappy-dev zlib1g-dev libbz2-dev liblz4-dev libzstd-dev - - name: Build and install cleveldb - run: make cleveldb - name: Build linux binary run: | export VERSION=${{ needs.build_init.outputs.version }} - export WITH_CLEVELDB=true make build-release-zip - name: Provenanced version # TODO[1760]: github: Re-enable the build_linux provenanced version step. @@ -121,40 +116,6 @@ jobs: name: linux-zip path: build/provenance*.zip - build_dbmigrate: - runs-on: ubuntu-20.04 - needs: - - build_init - name: Build dbmigrate - env: - LD_LIBRARY_PATH: /usr/local/lib:/usr/local/lib/x86_64-linux-gnu - steps: - - name: Checkout - uses: actions/checkout@v4 - - name: Setup go - uses: actions/setup-go@v4 - with: - go-version: ${{ needs.build_init.outputs.go_version }} - - name: Install deps - run: | - sudo apt-get update - sudo apt-get install -y libgflags-dev libsnappy-dev zlib1g-dev libbz2-dev liblz4-dev libzstd-dev - - name: Build and install cleveldb - run: make cleveldb - - name: Build dbmigrate binary - run: | - export VERSION=${{ needs.build_init.outputs.version }} - export WITH_CLEVELDB=true - export WITH_ROCKSDB=false - export WITH_BADGERDB=false - make build-dbmigrate-zip - - name: dbmigrate --help - run: build/dbmigrate --help - - uses: actions/upload-artifact@v3 - with: - name: dbmigrate-zip - path: build/dbmigrate*.zip - buf_push: needs: - build_init @@ -176,7 +137,6 @@ jobs: needs: - build_init - build_linux - - build_dbmigrate if: needs.build_init.outputs.is_release == 'true' runs-on: ubuntu-latest name: Create Release @@ -216,11 +176,6 @@ jobs: with: name: linux-zip path: build/ - - name: Download dbmigrate zip artifact - uses: actions/download-artifact@v3 - with: - name: dbmigrate-zip - path: build/ - name: Create release items id: create-items run: | @@ -235,16 +190,6 @@ jobs: asset_path: ./build/provenance-linux-amd64-${{ needs.build_init.outputs.version }}.zip asset_name: provenance-linux-amd64-${{ needs.build_init.outputs.version }}.zip asset_content_type: application/octet-stream - - name: Upload dbmigrate zip artifact - if: always() && steps.create-items.outcome == 'success' - uses: actions/upload-release-asset@v1 - env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - with: - upload_url: ${{ needs.create_release.outputs.release_url }} - asset_path: ./build/dbmigrate-linux-amd64-${{ needs.build_init.outputs.version }}.zip - asset_name: dbmigrate-linux-amd64-${{ needs.build_init.outputs.version }}.zip - asset_content_type: application/octet-stream - name: Upload release checksum if: always() && steps.create-items.outcome == 'success' uses: actions/upload-release-asset@v1 diff --git a/.github/workflows/sims.yml b/.github/workflows/sims.yml index 6a00dc10a0..5ad2335dd8 100644 --- a/.github/workflows/sims.yml +++ b/.github/workflows/sims.yml @@ -41,9 +41,6 @@ jobs: file_prefix="sim-test-${GITHUB_SHA:0:7}-${GITHUB_RUN_ATTEMPT}" echo "Setting output: file-prefix=$file_prefix" echo "file-prefix=$file_prefix" >> "$GITHUB_OUTPUT" - db_cache_key_hash="${{ hashFiles('scripts/cleveldb_build_and_install.sh') }}" - echo "Setting output: db-cache-key-hash=$db_cache_key_hash" - echo "db-cache-key-hash=$db_cache_key_hash" >> "$GITHUB_OUTPUT" go_cache_key_hash="${{ hashFiles('go.sum') }}" echo "Setting output: go-cache-key-hash=$go_cache_key_hash" echo "go-cache-key-hash=$go_cache_key_hash" >> "$GITHUB_OUTPUT" @@ -51,13 +48,6 @@ jobs: go-version: '1.21' should-run: ${{ env.GIT_DIFF }} file-prefix: ${{ steps.def-vars.outputs.file-prefix }} - db-cache-key-suffix: sims-db3-${{ steps.def-vars.outputs.db-cache-key-hash }} - # In Order: - # * The leveldb repo tarball - # * The directory extracted from the leveldb tarball - db-cache-path: | - leveldb*.tar.gz - leveldb-* go-cache-key-suffix: sims-go3-${{ steps.def-vars.outputs.go-cache-key-hash }} # In Order: # * Go binary directory @@ -79,16 +69,8 @@ jobs: echo " go-version: [${{ needs.setup.outputs.go-version }}]" echo " should-run: [${{ needs.setup.outputs.should-run }}]" echo " file-prefix: [${{ needs.setup.outputs.file-prefix }}]" - echo "db-cache-key-suffix: [${{ needs.setup.outputs.db-cache-key-suffix }}]" - echo " db-cache-path: [${{ needs.setup.outputs.db-cache-path }}]" echo "go-cache-key-suffix: [${{ needs.setup.outputs.go-cache-key-suffix }}]" echo " go-cache-path: [${{ needs.setup.outputs.go-cache-path }}]" - - uses: actions/cache@v3 - name: Load db cache - id: db-cache-setup - with: - key: ${{ runner.os }}-${{ needs.setup.outputs.db-cache-key-suffix }} - path: ${{ needs.setup.outputs.db-cache-path }} - uses: actions/cache@v3 name: Load go cache id: go-cache-setup @@ -101,21 +83,6 @@ jobs: run: | sudo apt-get update sudo apt-get install -y libgflags-dev libsnappy-dev zlib1g-dev libbz2-dev liblz4-dev libzstd-dev wget curl build-essential cmake gcc sqlite3 - - name: Build cleveldb - if: steps.db-cache-setup.outputs.cache-hit != 'true' - run: | - export CLEVELDB_DO_BUILD='true' - export CLEVELDB_DO_INSTALL='false' - export CLEVELDB_DO_CLEANUP='false' - make cleveldb - - name: Install cleveldb - run: | - export CLEVELDB_DO_BUILD='false' - export CLEVELDB_DO_INSTALL='true' - export CLEVELDB_SUDO='true' - export CLEVELDB_DO_CLEANUP='false' - make cleveldb - echo 'WITH_CLEVELDB=true' >> "$GITHUB_ENV" - uses: actions/setup-go@v4 with: go-version: ${{ needs.setup.outputs.go-version }} @@ -198,16 +165,11 @@ jobs: # The test-sim-simple test is pretty quick and should be able to identify glaring problems. # The test-sim-benchmark is handy to have for each db type. test: ["simple", "benchmark"] - db-backend: ["goleveldb", "cleveldb"] + db-backend: ["goleveldb"] os: ["ubuntu-latest"] runs-on: ${{ matrix.os }} steps: - uses: actions/checkout@v4 - - uses: actions/cache@v3 - name: Load db cache - with: - key: ${{ runner.os }}-${{ needs.setup.outputs.db-cache-key-suffix }} - path: ${{ needs.setup.outputs.db-cache-path }} - uses: actions/cache@v3 name: Load go cache with: @@ -219,15 +181,6 @@ jobs: test_logs="${{ needs.setup.outputs.file-prefix }}-${{ matrix.test }}-${{ matrix.db-backend }}-${{ matrix.os }}" echo "Setting output: test-logs=$test_logs" echo "test-logs=$test_logs" >> "$GITHUB_OUTPUT" - - name: Install cleveldb - if: matrix.db-backend == 'cleveldb' - run: | - export CLEVELDB_DO_BUILD='false' - export CLEVELDB_DO_INSTALL='true' - export CLEVELDB_SUDO='true' - export CLEVELDB_DO_CLEANUP='false' - make cleveldb - echo 'WITH_CLEVELDB=true' >> "$GITHUB_ENV" - uses: actions/setup-go@v4 with: go-version: ${{ needs.setup.outputs.go-version }} diff --git a/.gitignore b/.gitignore index e0a56f83ad..b7b5babd83 100644 --- a/.gitignore +++ b/.gitignore @@ -8,7 +8,6 @@ data/ .idea/ *.swp utils/ -!cmd/dbmigrate/utils .blockade blockade.yaml *.sock diff --git a/.golangci.yml b/.golangci.yml index d679efafdf..7ee16aa53a 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -105,8 +105,6 @@ linters-settings: - github.com/grpc-ecosystem/grpc-gateway - - github.com/otiai10/copy # Used by the dbmigrate only - - github.com/provenance-io/provenance - github.com/rakyll/statik/fs diff --git a/CHANGELOG.md b/CHANGELOG.md index 4900d3d5a3..15eaf9f494 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -52,6 +52,7 @@ Ref: https://keepachangelog.com/en/1.0.0/ * Add upgrade handler for 1.18 [#1756](https://github.com/provenance-io/provenance/pull/1756). * Updated documentation for each module to work with docusaurus [PR 1763](https://github.com/provenance-io/provenance/pull/1763). * Create a default market in `make run`, `localnet`, `devnet` and the `provenanced testnet` command [#1757](https://github.com/provenance-io/provenance/issues/1757). +* Remove unsupported database types [#1760](https://github.com/provenance-io/provenance/issues/1760). ### Dependencies diff --git a/Makefile b/Makefile index abccb09a5c..8416d659ef 100644 --- a/Makefile +++ b/Makefile @@ -6,23 +6,11 @@ BINDIR ?= $(GOPATH)/bin BUILDDIR ?= $(CURDIR)/build WITH_LEDGER ?= true -WITH_CLEVELDB ?= false -WITH_ROCKSDB ?= false -WITH_BADGERDB ?= false # We used to use 'yes' on these flags, so at least for now, change 'yes' into 'true' ifeq ($(WITH_LEDGER),yes) WITH_LEDGER=true endif -ifeq ($(WITH_CLEVELDB),yes) - WITH_CLEVELDB=true -endif -ifeq ($(WITH_ROCKSDB),yes) - WITH_ROCKSDB=true -endif -ifeq ($(WITH_BADGERDB),yes) - WITH_BADGERDB=true -endif BRANCH := $(shell git rev-parse --abbrev-ref HEAD 2> /dev/null) BRANCH_PRETTY := $(subst /,-,$(BRANCH)) @@ -79,21 +67,6 @@ endif # Build Flags/Tags ############################## -ifeq ($(WITH_CLEVELDB),true) - ifneq ($(have_gcc),true) - $(error gcc not installed for cleveldb support, please install or set WITH_CLEVELDB=false) - else - build_tags += gcc - build_tags += cleveldb - endif -endif -ifeq ($(WITH_ROCKSDB),true) - build_tags += rocksdb -endif -ifeq ($(WITH_BADGERDB),true) - build_tags += badgerdb -endif - ifeq ($(WITH_LEDGER),true) ifeq ($(UNAME_S),openbsd) $(warning OpenBSD detected, disabling ledger support (https://github.com/cosmos/cosmos-sdk/issues/1988)) @@ -114,20 +87,6 @@ else ifeq ($(UNAME_S),linux) cgo_ldflags += -Wl,-rpath,\$$ORIGIN endif -# cleveldb linker settings -ifeq ($(WITH_CLEVELDB),true) - ifeq ($(UNAME_S),darwin) - LEVELDB_PATH ?= $(shell brew --prefix leveldb 2> /dev/null) - # Only do stuff if that LEVELDB_PATH exists. Otherwise, leave it up to already installed libraries. - ifneq ($(wildcard $(LEVELDB_PATH)/.),) - cgo_cflags += -I$(LEVELDB_PATH)/include - cgo_ldflags += -L$(LEVELDB_PATH)/lib - endif - else ifeq ($(UNAME_S),linux) - # Intentionally left blank to leave it up to already installed libraries. - endif -endif - cgo_ldflags += $(CGO_LDFLAGS) cgo_ldflags := $(strip $(cgo_ldflags)) CGO_LDFLAGS := $(cgo_ldflags) @@ -206,17 +165,6 @@ endif .PHONY: install build build-linux run -############################## -# Build DB Migration Tools # -############################## - -install-dbmigrate: go.sum - CGO_LDFLAGS="$(CGO_LDFLAGS)" CGO_CFLAGS="$(CGO_CFLAGS)" $(GO) install $(BUILD_FLAGS) ./cmd/dbmigrate - -build-dbmigrate: validate-go-version go.sum - mkdir -p $(BUILDDIR) - CGO_LDFLAGS="$(CGO_LDFLAGS)" CGO_CFLAGS="$(CGO_CFLAGS)" $(GO) build -o $(BUILDDIR)/ $(BUILD_FLAGS) ./cmd/dbmigrate - ############################## # Release artifacts and plan # ############################## @@ -251,14 +199,10 @@ RELEASE_PIO=$(RELEASE_BIN)/provenanced RELEASE_ZIP_BASE=provenance-$(UNAME_S)-$(ARCH) RELEASE_ZIP_NAME=$(RELEASE_ZIP_BASE)-$(VERSION).zip RELEASE_ZIP=$(BUILDDIR)/$(RELEASE_ZIP_NAME) -DBMIGRATE=$(BUILDDIR)/dbmigrate -DBMIGRATE_ZIP_BASE=dbmigrate-$(UNAME_S)-$(ARCH) -DBMIGRATE_ZIP_NAME=$(DBMIGRATE_ZIP_BASE)-$(VERSION).zip -DBMIGRATE_ZIP=$(BUILDDIR)/$(DBMIGRATE_ZIP_NAME) .PHONY: build-release-clean build-release-clean: - rm -rf $(RELEASE_BIN) $(RELEASE_PLAN) $(RELEASE_CHECKSUM) $(RELEASE_ZIP) $(DBMIGRATE_ZIP) + rm -rf $(RELEASE_BIN) $(RELEASE_PLAN) $(RELEASE_CHECKSUM) $(RELEASE_ZIP) .PHONY: build-release-checksum build-release-checksum: $(RELEASE_CHECKSUM) @@ -299,17 +243,6 @@ $(RELEASE_ZIP): $(RELEASE_PIO) $(RELEASE_WASM) zip -u $(RELEASE_ZIP_NAME) bin/$(LIBWASMVM) bin/provenanced && \ cd .. -$(DBMIGRATE): - $(MAKE) build-dbmigrate - -.PHONY: build-dbmigrate-zip -build-dbmigrate-zip: $(DBMIGRATE_ZIP) - -$(DBMIGRATE_ZIP): $(DBMIGRATE) - cd $(BUILDDIR) && \ - zip -u $(DBMIGRATE_ZIP_NAME) dbmigrate && \ - cd .. - # gon packages the zip wrong. need bin/provenanced and bin/libwasmvm .PHONY: build-release-rezip build-release-rezip: ZIP_FROM = $(BUILDDIR)/$(RELEASE_ZIP_BASE).zip @@ -370,15 +303,7 @@ linkify: update-tocs: scripts/update-toc.sh x docs CONTRIBUTING.md -# Download, compile, and install rocksdb so that it can be used when doing a build. -rocksdb: - scripts/rocksdb_build_and_install.sh - -# Download, compile, and install cleveldb so that it can be used when doing a build. -cleveldb: - scripts/cleveldb_build_and_install.sh - -.PHONY: go-mod-cache go.sum lint clean format check-built linkify update-tocs rocksdb cleveldb +.PHONY: go-mod-cache go.sum lint clean format check-built linkify update-tocs validate-go-version: ## Validates the installed version of go against Provenance's minimum requirement. @@ -406,10 +331,6 @@ PACKAGES_SIMULATION := $(filter %/simulation%,$(PACKAGES)) TEST_PACKAGES=./... TEST_TARGETS := test-unit test-unit-amino test-unit-proto test-ledger-mock test-race test-ledger test-race -ifeq ($(WITH_CLEVELDB),true) - TAGS+= cleveldb -endif - # Test runs-specific rules. To add a new test target, just add # a new rule, customise TAGS, ARGS and/or TEST_PACKAGES ad libitum, and # append the new rule to the TEST_TARGETS list. diff --git a/cmd/dbmigrate/cmd/dbmigrate.go b/cmd/dbmigrate/cmd/dbmigrate.go deleted file mode 100644 index f41ecdedd4..0000000000 --- a/cmd/dbmigrate/cmd/dbmigrate.go +++ /dev/null @@ -1,214 +0,0 @@ -package cmd - -import ( - "context" - "fmt" - "os" - "strings" - - "github.com/spf13/cobra" - "github.com/spf13/viper" - - cmtcli "github.com/cometbft/cometbft/libs/cli" - - "github.com/cosmos/cosmos-sdk/client" - "github.com/cosmos/cosmos-sdk/client/flags" - "github.com/cosmos/cosmos-sdk/server" - - "github.com/provenance-io/provenance/app" - "github.com/provenance-io/provenance/cmd/dbmigrate/utils" - "github.com/provenance-io/provenance/cmd/provenanced/config" -) - -const ( - FlagBackupDir = "backup-dir" - FlagBatchSize = "batch-size" - FlagStagingDir = "staging-dir" - FlagStageOnly = "stage-only" - FlagSourceDbBackend = "source-db-backend" -) - -// NewDBMigrateCmd creates a command for migrating the provenanced database from one underlying type to another. -func NewDBMigrateCmd() *cobra.Command { - // Creating the client context early because the WithViper function - // creates a new Viper instance which wipes out the existing global one. - // Technically, it's not needed for the dbmigrate stuff, but having it - // makes loading all the rest of the config stuff easier. - clientCtx := client.Context{}. - WithInput(os.Stdin). - WithHomeDir(app.DefaultNodeHome). - WithViper("PIO") - - // Allow a user to define the log_level and log_format of this utility through the environment variables - // DBM_LOG_LEVEL and DBM_LOG_FORMAT. Otherwise, we want to default them to info and plain. - // Without this, the config's log_level and log_format are used. - // So, for example, if the config has log_level = error, this utility wouldn't output anything unless it hits an error. - // But that setting is desired mostly for the constant running of a node, as opposed to the single-time run of this utility. - logLevel := "info" - logFormat := "plain" - if v := os.Getenv("DBM_LOG_LEVEL"); v != "" { - logLevel = v - } - if v := os.Getenv("DBM_LOG_FORMAT"); v != "" { - logLevel = logFormat - } - // Ignoring any errors here. If we can't set an environment variable, oh well. - // Using the values from the config file isn't the end of the world, and is preferable to not allowing execution. - _ = os.Setenv("PIO_LOG_LEVEL", logLevel) - _ = os.Setenv("PIO_LOG_FORMAT", logFormat) - - rv := &cobra.Command{ - Use: "dbmigrate ", - Short: "Provenance Blockchain Database Migration Tool", - Long: fmt.Sprintf(`Provenance Blockchain Database Migration Tool -Converts an existing Provenance Blockchain Database to a new backend type. - -Valid values: %s - -Migration process: -1. Copy the current data directory into a staging data directory, migrating any databases appropriately. - The staging directory is named data-dbmigrate-tmp-{timestamp}-{target dbtype} - and by default will be in the {home} directory. -2. Move the current data directory to the backup location. - The backup directory is named data-dbmigrate-backup-{timestamp}-{dbtypes} - and by default will be in the {home} directoyr. -3. Move the staging data directory into place as the current data directory. -4. Update the config file to reflect the new database backend type. -`, strings.Join(utils.GetPossibleDBTypes(), ", ")), - Args: cobra.ExactArgs(1), - PersistentPreRunE: func(command *cobra.Command, args []string) error { - command.SetOut(command.OutOrStdout()) - command.SetErr(command.ErrOrStderr()) - - if command.Flags().Changed(flags.FlagHome) { - homeDir, _ := command.Flags().GetString(flags.FlagHome) - clientCtx = clientCtx.WithHomeDir(homeDir) - } - - if err := client.SetCmdClientContext(command, clientCtx); err != nil { - return err - } - - return config.InterceptConfigsPreRunHandler(command) - }, - RunE: func(command *cobra.Command, args []string) error { - batchSizeMB, err := command.Flags().GetUint(FlagBatchSize) - if err != nil { - return fmt.Errorf("could not parse --%s option: %w", FlagBatchSize, err) - } - - sourceDB, err := command.Flags().GetString(FlagSourceDbBackend) - if err != nil { - return fmt.Errorf("could not parse --%s option: %w", FlagSourceDbBackend, err) - } - - migrator := &utils.Migrator{ - TargetDBType: strings.ToLower(args[0]), - HomePath: client.GetClientContextFromCmd(command).HomeDir, - BatchSize: batchSizeMB * utils.BytesPerMB, - SourceDBType: sourceDB, - } - - migrator.StageOnly, err = command.Flags().GetBool(FlagStageOnly) - if err != nil { - return fmt.Errorf("could not parse --%s flag: %w", FlagStageOnly, err) - } - - migrator.BackupDir, err = command.Flags().GetString(FlagBackupDir) - if err != nil { - return fmt.Errorf("could not parse --%s option: %w", FlagBackupDir, err) - } - - migrator.StagingDir, err = command.Flags().GetString(FlagStagingDir) - if err != nil { - return fmt.Errorf("could not parse --%s option: %w", FlagStagingDir, err) - } - - err = DoMigrateCmd(command, migrator) - if err != nil { - server.GetServerContextFromCmd(command).Logger.Error(err.Error()) - // If this returns an error, the help is printed. But that isn't wanted here. - // But since we got an error, it shouldn't exit with code 0 either. - // So we exit 1 here instead of returning an error and letting the caller handle the exit. - os.Exit(1) - } - return nil - }, - } - rv.Flags().String(FlagBackupDir, "", "directory to hold the backup directory (default {home})") - rv.Flags().String(FlagStagingDir, "", "directory to hold the staging directory (default {home})") - rv.Flags().Uint(FlagBatchSize, 2_048, "(in megabytes) after a batch reaches this size it is written and a new one is started (0 = unlimited)") - rv.Flags().Bool(FlagStageOnly, false, "only migrate/copy the data (do not backup and replace the data directory and do not update the config)") - rv.Flags().String(FlagSourceDbBackend, "", "forces a source database type instead of trying to detect it.") - return rv -} - -// Execute sets up and executes the provided command. -func Execute(command *cobra.Command) error { - ctx := context.Background() - ctx = context.WithValue(ctx, client.ClientContextKey, &client.Context{}) - ctx = context.WithValue(ctx, server.ServerContextKey, server.NewDefaultContext()) - - command.PersistentFlags().String(cmtcli.HomeFlag, app.DefaultNodeHome, "directory for config and data") - - return command.ExecuteContext(ctx) -} - -// DoMigrateCmd does all the work associated with the dbmigrate command (assuming that inputs have been validated). -func DoMigrateCmd(command *cobra.Command, migrator *utils.Migrator) error { - logger := server.GetServerContextFromCmd(command).Logger - logger.Info("Setting up database migrations.") - - err := migrator.Initialize() - if err != nil { - return err - } - logger.Info("Starting migrations.") - err = migrator.Migrate(logger) - if err != nil { - return err - } - if !migrator.StageOnly { - logger.Info("Updating config.") - var oldValue string - oldValue, err = UpdateDBBackendConfigValue(command, migrator.TargetDBType) - if err != nil { - return err - } - logger.Info("Config Updated.", "key", "db_backend", "was", oldValue, "is now", migrator.TargetDBType) - } - logger.Info("Done migrating databases.") - return nil -} - -// UpdateDBBackendConfigValue updates the db backend value in the config file and returns the value it used to be. -func UpdateDBBackendConfigValue(command *cobra.Command, newValue string) (string, error) { - // Warning: This wipes out all the viper setup stuff up to this point. - // It needs to be done so that just the file values or defaults are loaded - // without considering environment variables. - // This is needed, at least, so that the log_level and log_format entries aren't changed. - // It can't be undone because viper.New() overwrites the global Viper instance, and there is no way to set it back to what it was. - // The contexts could get the original viper instance, but there's no guarantee that nothing uses the global functions. - // So I figure it's best to at least keep them all in sync. - // Ideally, it doesn't matter, though, since everything *should* be reloaded the same way (but who really knows). - clientCtx := client.GetClientContextFromCmd(command) - clientCtx.Viper = viper.New() - server.GetServerContextFromCmd(command).Viper = clientCtx.Viper - if err := client.SetCmdClientContext(command, clientCtx); err != nil { - return "", fmt.Errorf("could not set client context: %w", err) - } - - // Now that we have a clean viper, load the config from files again. - if err := config.LoadConfigFromFiles(command); err != nil { - return "", fmt.Errorf("could not load config from files: %w", err) - } - - tmConfig, err := config.ExtractTmConfig(command) - if err != nil { - return "", fmt.Errorf("could not extract Tendermint config: %w", err) - } - oldValue := tmConfig.DBBackend - tmConfig.DBBackend = newValue - config.SaveConfigs(command, nil, tmConfig, nil, false) - return oldValue, nil -} diff --git a/cmd/dbmigrate/main.go b/cmd/dbmigrate/main.go deleted file mode 100644 index 36508f41e9..0000000000 --- a/cmd/dbmigrate/main.go +++ /dev/null @@ -1,25 +0,0 @@ -package main - -import ( - "errors" - "os" - - "github.com/provenance-io/provenance/cmd/dbmigrate/cmd" - cmderrors "github.com/provenance-io/provenance/cmd/errors" -) - -func main() { - rootCmd := cmd.NewDBMigrateCmd() - if err := cmd.Execute(rootCmd); err != nil { - var srvErrP *cmderrors.ExitCodeError - var srvErr cmderrors.ExitCodeError - switch { - case errors.As(err, &srvErrP): - os.Exit(int(*srvErrP)) - case errors.As(err, &srvErr): - os.Exit(int(srvErr)) - default: - os.Exit(1) - } - } -} diff --git a/cmd/dbmigrate/utils/badgerdb.go b/cmd/dbmigrate/utils/badgerdb.go deleted file mode 100644 index 18f301f6d1..0000000000 --- a/cmd/dbmigrate/utils/badgerdb.go +++ /dev/null @@ -1,17 +0,0 @@ -//go:build badgerdb -// +build badgerdb - -package utils - -import ( - dbm "github.com/cometbft/cometbft-db" -) - -// This file is included when built with the badgerdb tag (which matches the tag Tendermint looks for). -// Tendermint does all the heavy lifting, but doesn't expose a way to identify which DB types are available. -// That list would also have MemDB, which we don't want in here anyway. -// That's all this is doing, just identifying that it was built with that tag and that this DB type is available. - -func init() { - AddPossibleDBType(dbm.BadgerDBBackend) -} diff --git a/cmd/dbmigrate/utils/boltdb.go b/cmd/dbmigrate/utils/boltdb.go deleted file mode 100644 index ff9379612a..0000000000 --- a/cmd/dbmigrate/utils/boltdb.go +++ /dev/null @@ -1,17 +0,0 @@ -//go:build boltdb -// +build boltdb - -package utils - -import ( - dbm "github.com/cometbft/cometbft-db" -) - -// This file is included when built with the boltdb tag (which matches the tag Tendermint looks for). -// Tendermint does all the heavy lifting, but doesn't expose a way to identify which DB types are available. -// That list would also have MemDB, which we don't want in here anyway. -// That's all this is doing, just identifying that it was built with that tag and that this DB type is available. - -func init() { - AddPossibleDBType(dbm.BoltDBBackend) -} diff --git a/cmd/dbmigrate/utils/cleveldb.go b/cmd/dbmigrate/utils/cleveldb.go deleted file mode 100644 index 576fe22339..0000000000 --- a/cmd/dbmigrate/utils/cleveldb.go +++ /dev/null @@ -1,17 +0,0 @@ -//go:build cleveldb -// +build cleveldb - -package utils - -import ( - dbm "github.com/cometbft/cometbft-db" -) - -// This file is included when built with the cleveldb tag (which matches the tag Tendermint looks for). -// Tendermint does all the heavy lifting, but doesn't expose a way to identify which DB types are available. -// That list would also have MemDB, which we don't want in here anyway. -// That's all this is doing, just identifying that it was built with that tag and that this DB type is available. - -func init() { - AddPossibleDBType(dbm.CLevelDBBackend) -} diff --git a/cmd/dbmigrate/utils/migrator.go b/cmd/dbmigrate/utils/migrator.go deleted file mode 100644 index 19ea3f806e..0000000000 --- a/cmd/dbmigrate/utils/migrator.go +++ /dev/null @@ -1,956 +0,0 @@ -package utils - -import ( - "errors" - "fmt" - "os" - "os/signal" - "path/filepath" - "sort" - "strings" - "syscall" - "time" - - copier "github.com/otiai10/copy" - - dbm "github.com/cometbft/cometbft-db" - - "cosmossdk.io/log" -) - -const ( - // BytesPerMB is the number of bytes in a megabyte. - BytesPerMB = 1_048_576 - // unknownDBBackend is mostly a dbm.BackendType used in output as a string. - // It indicates that the backend is unknown. - unknownDBBackend = dbm.BackendType("UNKNOWN") -) - -// Note: The PossibleDBTypes variable is a map instead of a slice because trying to append to it was causing one type to -// stomp out the append from another type (concurrency issue?). - -// PossibleDBTypes is a map of strings to BackendTypes representing the Backend types that can be used by this utility. -var PossibleDBTypes = map[string]dbm.BackendType{} - -func init() { - PossibleDBTypes["goleveldb"] = dbm.GoLevelDBBackend -} - -// AddPossibleDBType adds a possible db backend type. -func AddPossibleDBType(dbType dbm.BackendType) { - PossibleDBTypes[string(dbType)] = dbType -} - -// GetPossibleDBTypes gets a slice of strings listing all db types that this can use. -func GetPossibleDBTypes() []string { - rv := make([]string, len(PossibleDBTypes)) - i := 0 - for k := range PossibleDBTypes { - rv[i] = k - i++ - } - sort.Strings(rv) - return rv -} - -// IsPossibleDBType checks if the given dbType string is one that this migrator can handle. -func IsPossibleDBType(dbType string) bool { - _, ok := PossibleDBTypes[dbType] - return ok -} - -// Migrator is an object to help guide a migration. -// TargetDBType must be defined. All others can be left unset, or can use defaults from ApplyDefaults(). -// If using defaults for any directories, you probably need to set HomePath too though. -type Migrator struct { - // HomePath is the path to the home directory (should contain the config and data directories). - HomePath string - // StagingDir is the directory that will hold the staging data directory. - // Default is HomePath - StagingDir string - // BackupDir is the directory that will hold the backup data directory. - // Default is HomePath - BackupDir string - - // TargetDBType is the type of the target (new) DB. - TargetDBType string - - // SourceDBType is the type of the source (old) DB. - SourceDBType string - - // SourceDataDir is the path to the source (current) data directory. - // Default is { HomePath }/data - SourceDataDir string - // StagingDataDir is the path to the staging (new) data directory. - // Default is { StagingDir }/data-dbmigrate-tmp-{timestamp}-{ TargetDBType } - StagingDataDir string - // BackupDataDir is the path to where the current data directory will be moved when done. - // Default is { BackupDir }/data-dbmigrate-backup-{timestamp}-{dbtypes} - BackupDataDir string - - // BatchSize is the threshold (in bytes) after which a batch is written and a new batch is created. - // Batch sizes are measured using only key and value lengths (as opposed to disk space). - // Default is 0 (unlimited) - BatchSize uint - - // StageOnly indicates that only the data migration and data copying should happen. - // If true, the migrator should stop after finishing the staging data directory. - // That is, it won't move the data dir to the backup location, move the staging directory into place, or update the config. - StageOnly bool - - // ToConvert is all of the DB directories to migrate/convert. - // Each entry is relative to the data directory. - ToConvert []string - // ToCopy is all the non-DB files and directories that should be copied from the source to the new data directory. - // Each entry is relative to the data directory. - ToCopy []string - - // Permissions are the permissions to use on any directories created. - // Default is to match the source directory, or else 0700. - Permissions os.FileMode - - // StatusPeriod is the max time period between status messages. - // Must be at least 1 second. Default is 5 seconds. - StatusPeriod time.Duration - // DirDateFormat is the format string used in dated directory names. - // Default is "2006-01-02-15-04-05". - DirDateFormat string -} - -// migrationManager is a struct with information about the status of a migrator. -type migrationManager struct { - Migrator - - // Status is a short message about what's currently going on. - Status string - // TimeStarted is the time that the migration was started. - // This is set during the call to Migrate. - TimeStarted time.Time - // TimeFinished is the time that the migration was finished. - // This is set during the call to Migrate. - TimeFinished time.Time - // Summaries is a map of ToConvert entries, to a short summary string about the migration of that entry. - // Entries are set during the call to Migrate and are the return values of each MigrateDBDir call. - Summaries map[string]string - // SourceTypes is a map of ToConvert entries, to their backend type. - SourceTypes map[string]dbm.BackendType - - // Logger is the Logger to use for logging log messages. - Logger log.Logger - - // StatusTicker is the ticker used to issue regular status log messages. - StatusTicker *time.Ticker - // StopTickerChan is a channel used to stop the regular status log messages. - StopTickerChan chan bool - // StatusKeyvals is a function that returns keyvals used in status log messages. - StatusKeyvals func() []interface{} - - // LogStagingDirError indicates whether or not to log an error about the staging dir existing (for abnormal termination). - LogStagingDirError bool - // SigChan is a channel used to Notify certain os signals. - SigChan chan os.Signal - // StopSigChan is a channel used to stop the special signal handling. - StopSigChan chan bool -} - -// Initialize prepares this Migrator by doing the following: -// 1. Calls ApplyDefaults() -// 2. Checks ValidateBasic() -// 3. Calls ReadSourceDataDir() -func (m *Migrator) Initialize() error { - m.ApplyDefaults() - var err error - if err = m.ValidateBasic(); err != nil { - return err - } - return m.ReadSourceDataDir() -} - -// ApplyDefaults fills in the defaults that it can, for values that aren't set yet. -func (m *Migrator) ApplyDefaults() { - if len(m.StagingDir) == 0 && len(m.HomePath) > 0 { - m.StagingDir = m.HomePath - } - if len(m.BackupDir) == 0 && len(m.HomePath) > 0 { - m.BackupDir = m.HomePath - } - if len(m.SourceDataDir) == 0 && len(m.HomePath) > 0 { - m.SourceDataDir = filepath.Join(m.HomePath, "data") - } - if len(m.DirDateFormat) == 0 { - m.DirDateFormat = "2006-01-02-15-04" - } - if len(m.StagingDataDir) == 0 && len(m.StagingDir) > 0 { - m.StagingDataDir = filepath.Join(m.StagingDir, fmt.Sprintf("data-dbmigrate-tmp-%s-%s", time.Now().Format(m.DirDateFormat), m.TargetDBType)) - } - if len(m.BackupDataDir) == 0 && len(m.BackupDir) > 0 { - m.BackupDataDir = filepath.Join(m.BackupDir, "data-dbmigrate-backup-"+time.Now().Format(m.DirDateFormat)) - } - // If we can't source the data directory, we probably can't read it and an error will be returned from something else. - // For simplicity, we're not really going to care about that error right here, though. - if m.Permissions == 0 && len(m.SourceDataDir) > 0 { - sourceDirInfo, err := os.Stat(m.SourceDataDir) - if err == nil { - // Mask the Mode to get just the permission bits. - m.Permissions = sourceDirInfo.Mode() & 0777 - } - } - if m.Permissions == 0 { - m.Permissions = 0700 - } - if m.StatusPeriod == 0 { - m.StatusPeriod = 5 * time.Second - } -} - -// ValidateBasic makes sure that everything is set in this Migrator. -func (m Migrator) ValidateBasic() error { - if len(m.StagingDir) == 0 { - return errors.New("no StagingDir defined") - } - if len(m.BackupDir) == 0 { - return errors.New("no BackupDir defined") - } - if len(m.TargetDBType) == 0 { - return errors.New("no TargetDBType defined") - } - if !IsPossibleDBType(m.TargetDBType) { - return fmt.Errorf("invalid TargetDBType: %q - must be one of: %s", m.TargetDBType, strings.Join(GetPossibleDBTypes(), ", ")) - } - if len(m.SourceDataDir) == 0 { - return errors.New("no SourceDataDir defined") - } - if len(m.StagingDataDir) == 0 { - return errors.New("no StagingDataDir defined") - } - if len(m.BackupDataDir) == 0 { - return errors.New("no BackupDataDir defined") - } - if m.Permissions == 0 { - return errors.New("no Permissions defined") - } - if m.StatusPeriod < time.Second { - return fmt.Errorf("the StatusPeriod %s cannot be less than 1s", m.StatusPeriod) - } - if len(m.DirDateFormat) == 0 { - return errors.New("no DirDateFormat defined") - } - if len(m.SourceDBType) > 0 && !IsPossibleDBType(m.SourceDBType) { - return fmt.Errorf("invalid SourceDBType: %q - must be one of: %s", m.SourceDBType, strings.Join(GetPossibleDBTypes(), ", ")) - } - return nil -} - -// ReadSourceDataDir gets the contents of the SourceDataDir and populates ToConvert and ToCopy. -// Anything in those two fields prior to calling this, will be overwritten. -// -// Does nothing if SourceDataDir is not set. -func (m *Migrator) ReadSourceDataDir() error { - if len(m.SourceDataDir) > 0 { - var err error - m.ToConvert, m.ToCopy, err = GetDataDirContents(m.SourceDataDir) - if err != nil { - return fmt.Errorf("error reading %q: %w", m.SourceDataDir, err) - } - if len(m.ToConvert) == 0 { - return fmt.Errorf("could not identify any db directories in %s", m.SourceDataDir) - } - } - return nil -} - -// Migrate converts all database dirs in ToConvert from the source underlying type in the SourceDataDir -// to the target type in the StagingDataDir. -// It then copies everything in ToCopy from the SourceDataDir to the StagingDataDir. -// It then moves the SourceDataDir to BackupDataDir and moves StagingDataDir into place where SourceDataDir was. -func (m *Migrator) Migrate(logger log.Logger) (errRv error) { - defer func() { - if r := recover(); r != nil { - errRv = fmt.Errorf("recovered from panic: %v", r) - } - }() - - if err := m.ValidateBasic(); err != nil { - return err - } - - manager, err := m.startMigratorManager(logger) - if err != nil { - return err - } - defer func() { - manager.Logger = logger - manager.Close() - }() - - // Now we can get started. - logger.Info(manager.MakeSummaryString()) - manager.Status = "making staging dir" - err = os.MkdirAll(m.StagingDataDir, m.Permissions) - if err != nil { - return fmt.Errorf("could not create staging data directory: %w", err) - } - manager.LogStagingDirError = true - manager.LogWithRunTime(fmt.Sprintf("Converting %d Individual DBs.", len(m.ToConvert))) - for i, dbDir := range m.ToConvert { - manager.Logger = logger.With( - "db", strings.TrimSuffix(dbDir, ".db"), - "progress", fmt.Sprintf("%d/%d", i+1, len(m.ToConvert)), - ) - manager.Summaries[dbDir], err = manager.MigrateDBDir(dbDir) - if err != nil { - return err - } - } - manager.Logger = logger - - if len(manager.SourceTypes) != 0 { - m.BackupDataDir = m.BackupDataDir + "-" + strings.Join(manager.GetSourceDBTypes(), "-") - } - - manager.LogWithRunTime(fmt.Sprintf("Copying %d items.", len(m.ToCopy))) - for i, entry := range m.ToCopy { - manager.LogWithRunTime(fmt.Sprintf("%d/%d: Copying %s", i+1, len(m.ToCopy), entry)) - if err = copier.Copy(filepath.Join(m.SourceDataDir, entry), filepath.Join(m.StagingDataDir, entry)); err != nil { - return fmt.Errorf("could not copy %s: %w", entry, err) - } - } - - if m.StageOnly { - manager.LogWithRunTime("Stage Only flag provided.", "dir", m.StagingDir) - } else { - manager.Status = "moving old data dir" - manager.StatusKeyvals = func() []interface{} { - return []interface{}{ - "from", m.SourceDataDir, - "to", m.BackupDataDir, - } - } - manager.LogWithRunTime("Moving existing data directory to backup location.", manager.StatusKeyvals()...) - if err = os.Rename(m.SourceDataDir, m.BackupDataDir); err != nil { - return fmt.Errorf("could not back up existing data directory: %w", err) - } - - manager.Status = "moving new data dir" - manager.StatusKeyvals = func() []interface{} { - return []interface{}{ - "from", m.StagingDataDir, - "to", m.SourceDataDir, - } - } - manager.LogWithRunTime("Moving new data directory into place.", manager.StatusKeyvals()...) - if err = os.Rename(m.StagingDataDir, m.SourceDataDir); err != nil { - return fmt.Errorf("could not move new data directory into place: %w", err) - } - manager.StatusKeyvals = noKeyvals - } - manager.LogStagingDirError = false - manager.Finish() - - logger.Info(manager.MakeSummaryString()) - return nil -} - -// startMigratorManager creates a migrationManager and initializes it. -// It must later be closed. -func (m Migrator) startMigratorManager(logger log.Logger) (*migrationManager, error) { - rv := &migrationManager{ - Migrator: m, - Status: "starting", - TimeStarted: time.Now(), - Summaries: map[string]string{}, - SourceTypes: map[string]dbm.BackendType{}, - Logger: logger, - StatusTicker: time.NewTicker(m.StatusPeriod), - StatusKeyvals: noKeyvals, - StopTickerChan: make(chan bool, 1), - SigChan: make(chan os.Signal, 1), - StopSigChan: make(chan bool, 1), - } - // Monitor for the signals and handle them appropriately. - proc, err := os.FindProcess(os.Getpid()) - if err != nil { - return nil, fmt.Errorf("could not identify the running process: %w", err) - } - go func() { - defer func() { - if r := recover(); r != nil { - rv.LogErrorWithRunTime("The signal watcher subprocess encountered a panic.", "panic", fmt.Sprintf("%v", r)) - } - }() - select { - case s := <-rv.SigChan: - signal.Stop(rv.SigChan) - if rv.LogStagingDirError { - rv.LogErrorWithRunTime("The staging directory still exists due to early termination.", "dir", m.StagingDataDir) - } - err2 := proc.Signal(s) - if err2 != nil { - rv.LogErrorWithRunTime("Error propagating signal.", "error", err2) - } - return - case <-rv.StopSigChan: - return - } - }() - signal.Notify(rv.SigChan, os.Interrupt, syscall.SIGTERM, syscall.SIGSEGV, syscall.SIGQUIT) - // Fire up another sub-process for outputting a status message every now and then. - rv.StatusTicker = time.NewTicker(m.StatusPeriod) - go func() { - for { - select { - case <-rv.StatusTicker.C: - rv.LogWithRunTime(fmt.Sprintf("Status: %s", rv.Status), rv.StatusKeyvals()...) - case <-rv.StopTickerChan: - return - } - } - }() - return rv, nil -} - -// MigrateDBDir creates a copy of the given db directory, converting it from one underlying type to another. -func (m *migrationManager) MigrateDBDir(dbDir string) (summary string, err error) { - m.LogWithRunTime("Individual DB Migration: Setting up.") - m.Status = "setting up" - - summaryError := "error" - sourceDir, dbName := splitDBPath(m.SourceDataDir, dbDir) - targetDir, _ := splitDBPath(m.StagingDataDir, dbDir) - - // Define some counters used in log messages, and a function to make it easy to add them all to log messages. - writtenEntries := uint(0) - batchEntries := uint(0) - batchBytes := uint(0) - batchIndex := uint(1) - m.StatusKeyvals = func() []interface{} { - return []interface{}{ - "batch index", commaString(batchIndex), - "batch size (megabytes)", commaString(batchBytes / BytesPerMB), - "batch entries", commaString(batchEntries), - "db total entries", commaString(writtenEntries + batchEntries), - } - } - logWithStats := func(msg string, keyvals ...interface{}) { - m.LogWithRunTime(msg, append(m.StatusKeyvals(), keyvals...)...) - } - - // There's several things that need closing and sometimes calling close can cause a segmentation fault that, - // for some reason, doesn't reach the SigChan notification set up in the Migrate method. - // So just to have clearer control over closing order, they're all defined at once and closed in a single defer function. - var sourceDB, targetDB dbm.DB - var iter dbm.Iterator - var batch dbm.Batch - sourceDBType := unknownDBBackend - defer func() { - m.StatusKeyvals = noKeyvals - // iter before sourceDB because closing the sourceDB might remove things needed for the iterator to close. - if iter != nil { - iter.Close() - } - if sourceDB != nil { - sourceDB.Close() - } - // batch before targetDB because closing the targetDB might remove things needed for the batch to close. - if batch != nil { - batch.Close() - } - if targetDB != nil { - targetDB.Close() - } - // always wrap any error with some extra context. - if err != nil { - err = fmt.Errorf("could not convert %q from %q to %q: %w", dbDir, sourceDBType, m.TargetDBType, err) - } - }() - - m.Status = "detecting db type" - sourceDBType, ok := dbm.BackendType(m.Migrator.SourceDBType), true - if len(m.Migrator.SourceDBType) == 0 { - sourceDBType, ok = DetectDBType(dbName, sourceDir) - } - if !ok { - return summaryError, fmt.Errorf("could not determine db type: %s", filepath.Join(m.SourceDataDir, dbDir)) - } - - if !IsPossibleDBType(string(sourceDBType)) { - return summaryError, fmt.Errorf("cannot read source db of type %q", sourceDBType) - } - m.SourceTypes[dbDir] = sourceDBType - - // In at least one case (the snapshots/metadata db), there's a sub-directory that needs to be created in order to - // safely open a new database in it. - if targetDir != m.StagingDataDir { - m.Status = "making sub-dir" - err = os.MkdirAll(targetDir, m.Permissions) - if err != nil { - return summaryError, fmt.Errorf("could not create target sub-directory: %w", err) - } - } - - // If they're both the same type, just copy it and be done. - targetDBBackendType := dbm.BackendType(m.TargetDBType) - if sourceDBType == targetDBBackendType { - m.Status = "copying db" - from := filepath.Join(m.SourceDataDir, dbDir) - to := filepath.Join(m.StagingDataDir, dbDir) - m.StatusKeyvals = func() []interface{} { - return []interface{}{ - "from", from, - "to", to, - } - } - m.LogWithRunTime("Source and Target DB Types are the same. Copying instead of migrating.", "db type", m.TargetDBType) - if err = copier.Copy(from, to); err != nil { - return summaryError, fmt.Errorf("could not copy db: %w", err) - } - m.Status = "done" - m.LogWithRunTime("Individual DB Migration: Done.") - return "Copied", nil - } - - m.Status = "opening source db" - sourceDB, err = dbm.NewDB(dbName, sourceDBType, sourceDir) - if err != nil { - return summaryError, fmt.Errorf("could not open %q source db: %w", dbName, err) - } - - m.Status = "opening target db" - targetDB, err = dbm.NewDB(dbName, targetDBBackendType, targetDir) - if err != nil { - return summaryError, fmt.Errorf("could not open %q target db: %w", dbName, err) - } - - m.Status = "making iterator" - iter, err = sourceDB.Iterator(nil, nil) - if err != nil { - return summaryError, fmt.Errorf("could not create %q source iterator: %w", dbName, err) - } - - // There's a couple places in here where we need to write and close the batch. But the safety stuff (on errors) - // is needed in both places, so it's pulled out into this anonymous function. - writeAndCloseBatch := func() error { - m.Status = "writing batch" - // Using WriteSync here instead of Write because sometimes the Close was causing a segfault, and maybe this helps? - if err = batch.WriteSync(); err != nil { - // If the write fails, closing the db can sometimes cause a segmentation fault. - targetDB = nil - return fmt.Errorf("could not write %q batch: %w", dbName, err) - } - writtenEntries += batchEntries - m.Status = "closing batch" - err = batch.Close() - if err != nil { - // If closing the batch fails, closing the db can sometimes cause a segmentation fault. - targetDB = nil - // Similarly, calling close a second time can segfault. - batch = nil - return fmt.Errorf("could not close %q batch: %w", dbName, err) - } - return nil - } - summaryWrittenEntries := func() string { - // 13 characters accounts right-justifies the numbers as long as they're under 10 billion (9,999,999,999). - // Testnet's application db (as of writing this) has just over 1 billion entries. - return fmt.Sprintf("Migrated %13s entries from %s to %s.", commaString(writtenEntries), sourceDBType, m.TargetDBType) - } - - m.LogWithRunTime("Individual DB Migration: Starting.", "source db type", sourceDBType) - batch = targetDB.NewBatch() - m.Status = "starting iteration" - for ; iter.Valid(); iter.Next() { - m.Status = "getting entry key" - k := iter.Key() - m.Status = "getting entry value" - v := iter.Value() - if v == nil { - v = []byte{} - } - m.Status = "adding entry to batch" - if err = batch.Set(k, v); err != nil { - return summaryWrittenEntries(), fmt.Errorf("could not set %q key/value: %w", dbName, err) - } - m.Status = "counting" - batchEntries++ - batchBytes += uint(len(v) + len(k)) - if m.BatchSize > 0 && batchBytes >= m.BatchSize { - logWithStats("Writing intermediate batch.") - if err = writeAndCloseBatch(); err != nil { - return summaryWrittenEntries(), err - } - - m.Status = "batch reset" - batchIndex++ - batchBytes = 0 - batchEntries = 0 - logWithStats("Starting new batch.") - batch = targetDB.NewBatch() - } - m.Status = "getting next entry" - } - - m.Status = "done iterating" - if err = iter.Error(); err != nil { - return summaryWrittenEntries(), fmt.Errorf("iterator error: %w", err) - } - - logWithStats("Writing final batch.") - if err = writeAndCloseBatch(); err != nil { - return summaryWrittenEntries(), err - } - - m.Status = "done" - m.LogWithRunTime("Individual DB Migration: Done.", "total entries", commaString(writtenEntries)) - return summaryWrittenEntries(), nil -} - -func (m *migrationManager) Finish() { - m.StopSigChan <- true - m.StopTickerChan <- true - m.StatusTicker.Stop() - m.TimeFinished = time.Now() -} - -// Close closes up shop on a migrationManager. -func (m *migrationManager) Close() { - m.StatusKeyvals = noKeyvals - close(m.StopSigChan) - signal.Stop(m.SigChan) - close(m.SigChan) - if m.LogStagingDirError { - m.LogErrorWithRunTime("The staging directory still exists.", "dir", m.StagingDataDir) - } -} - -// MakeSummaryString creates a multi-line string with a summary of a migration. -func (m migrationManager) MakeSummaryString() string { - var sb strings.Builder - addLine := func(format string, a ...interface{}) { - sb.WriteString(fmt.Sprintf(format, a...) + "\n") - } - addLine("Summary:") - status := "Not Started" - copyHead := " To Copy" - migrateHead := " To Migrate" - switch { - case !m.TimeFinished.IsZero() && !m.TimeStarted.IsZero(): - status = "Finished" - copyHead = "Copied" - migrateHead = " Migrated" - case !m.TimeStarted.IsZero(): - status = "Running" - copyHead = "Copying" - migrateHead = " Migrating" - } - addLine("%16s: %s", "Status", status) - addLine("%16s: %s", "Run Time", m.GetRunTime()) - addLine("%16s: %s", "Data Dir", m.SourceDataDir) - addLine("%16s: %s", "Staging Dir", m.StagingDir) - if m.StageOnly { - addLine("%16s: %s", "Staging Only", "true") - } else { - addLine("%16s: %s", "Backup Dir", m.BackupDataDir) - } - addLine("%16s: %s megabytes", "Batch Size", commaString(m.BatchSize/BytesPerMB)) - addLine("%16s: %s", "Source DB Type", m.SourceDBType) - addLine("%16s: %s", "New DB Type", m.TargetDBType) - addLine("%16s: %s", fmt.Sprintf("%s (%d)", copyHead, len(m.ToCopy)), strings.Join(m.ToCopy, " ")) - if len(m.Summaries) == 0 { - addLine("%16s: %s", fmt.Sprintf("%s (%d)", migrateHead, len(m.ToConvert)), strings.Join(m.ToConvert, " ")) - } else { - addLine("%16s:", fmt.Sprintf("%s (%d)", migrateHead, len(m.ToConvert))) - for _, dbDir := range m.ToConvert { - s, k := m.Summaries[dbDir] - if !k { - s = "UNKNOWN" - } - addLine("%22s: %s", strings.TrimSuffix(dbDir, ".db"), s) - } - } - return sb.String() -} - -// GetRunTime gets a string of the run time of this manager. -// Output is a time.Duration string, e.g. "25m36.910647946s" -func (m migrationManager) GetRunTime() string { - if m.TimeStarted.IsZero() { - return "0.000000000s" - } - if m.TimeFinished.IsZero() { - return time.Since(m.TimeStarted).String() - } - return m.TimeFinished.Sub(m.TimeStarted).String() -} - -// LogWithRunTime is a wrapper on Logger.Info that always includes the run time. -func (m migrationManager) LogWithRunTime(msg string, keyvals ...interface{}) { - m.Logger.Info(msg, append(keyvals, "run time", m.GetRunTime())...) -} - -// LogErrorWithRunTime is a wrapper on Logger.Error that always includes the run time. -func (m migrationManager) LogErrorWithRunTime(msg string, keyvals ...interface{}) { - m.Logger.Error(msg, append(keyvals, "run time", m.GetRunTime())...) -} - -func (m migrationManager) GetSourceDBTypes() []string { - rv := []string{} - for _, dbType := range m.SourceTypes { - found := false - for _, v := range rv { - if v == string(dbType) { - found = true - break - } - } - if !found { - rv = append(rv, string(dbType)) - } - } - sort.Strings(rv) - return rv -} - -// noKeyvals returns an empty slice. It's handy for setting migrationManager.StatusKeyvals -func noKeyvals() []interface{} { - return []interface{}{} -} - -// splitDBPath combine the provided path elements into a full path to a db directory, then -// breaks it down two parts: -// 1) A path to the directory to hold the db directory, -// 2) The name of the db. -// For example: "/foo", "bar/baz.db" will return "/foo/bar" and "baz". -func splitDBPath(elem ...string) (string, string) { - base, name := filepath.Split(filepath.Join(elem...)) - return filepath.Clean(base), strings.TrimSuffix(name, ".db") -} - -// GetDataDirContents gets the contents of a directory separated into database directories and non-database entries. -// The first return value will contain an entry for each database directory (including if they are in sub-directories). -// The second return value will contain all entries (files or directories) under dataDirPath that are not part of a database directory. -// Returned strings are relative to dataDirPath. -// -// Example return values: -// -// return param 1: []string{"application.db", "blockstore.db", "evidence.db", "snapshots/metadata.db", "state.db", "tx_index.db"} -// return param 2: []string{"cs.wal", "priv_validator_state.json", "wasm"} -func GetDataDirContents(dataDirPath string) ([]string, []string, error) { - contents, err := os.ReadDir(dataDirPath) - if err != nil { - return nil, nil, err - } - dbs := make([]string, 0) - nonDBs := make([]string, 0) - // The db dirs can have a TON of files (10k+). Most of them are just numbers with an extension. - // This loop short-circuits when it finds a file that starts with "MANIFEST", which is significantly - // more likely to be closer to the back than the front. So to save lots of iterations, the contents is looped through backwards. - for i := len(contents) - 1; i >= 0; i-- { - entry := contents[i] - switch { - case entry.IsDir(): - // goleveldb, cleveldb, and rocksdb name their db directories with a .db suffix. - if filepath.Ext(entry.Name()) == ".db" { - dbs = append(dbs, entry.Name()) - } else { - subDBs, subNonDBs, err := GetDataDirContents(filepath.Join(dataDirPath, entry.Name())) - if err != nil { - return nil, nil, err - } - if len(subDBs) == 1 && subDBs[0] == "." { - dbs = append(dbs, entry.Name()) - } else { - for _, dbDir := range subDBs { - dbs = append(dbs, filepath.Join(entry.Name(), dbDir)) - } - } - if len(subDBs) > 0 { - for _, nonDBDir := range subNonDBs { - nonDBs = append(nonDBs, filepath.Join(entry.Name(), nonDBDir)) - } - } else { - nonDBs = append(nonDBs, entry.Name()) - } - } - case strings.HasPrefix(entry.Name(), "MANIFEST"): - // badger db does not use the .db suffix on their database directories. - // So to identify them, we have to look for the MANIFEST files. - // HasPrefix is used here instead of == because the other DB types have files that start with MANIFEST- - // and so hopefully this will catch other db types that dont use the .db suffix on their directories. - // The .db test is still also used to save some recursive calls and extra processing. - return []string{"."}, nil, nil - case filepath.Ext(entry.Name()) == ".db": - // boltdb has executable files with a .db extension. - info, err := entry.Info() - if err != nil { - return nil, nil, err - } - // Check if the file has at least one executable bit set. - if info.Mode()&0111 != 1 { - dbs = append(dbs, entry.Name()) - } else { - nonDBs = append(nonDBs, entry.Name()) - } - default: - nonDBs = append(nonDBs, entry.Name()) - } - } - sort.Strings(dbs) - sort.Strings(nonDBs) - return dbs, nonDBs, nil -} - -// DetectDBType attempts to identify the type database in the given dir with the given name. -// The name and dir would be the same things that would be provided to dbm.NewDB. -// -// The return bool indicates whether or not the DB type was identified. -// -// The only types this detects are LevelDB, RocksDB, and BadgerDB. -// If the DB is another type, the behavior of this is unknown. -// There's a chance this will return false, but there's also a chance it is falsely identified as something else. -func DetectDBType(name, dir string) (dbm.BackendType, bool) { - // Here are the key differences used to differentiate the DB types. - // badgerdb: - // * In a directory named "dir/name". - // * There are numbered files with the extension ".vlog". - // * There are numbered files with the extension ".sst" (might possibly be missing if the db is empty). - // * Has the following files: KEYREGISTRY, MANIFEST - // * Might also have files: LOCK - // rocksdb: - // * In a directory named "dir/name.db". - // * There are numbered files with the extension ".log". - // * There are numbered files with the extension ".sst" (might possibly be missing if the db is empty). - // * Has the following files: CURRENT, IDENTITY, LOG, MANIFEST-{6 digits} (multiple), OPTIONS-{6 digits} (multiple) - // * Might also have files: LOCK, LOG.old, LOG.old.{16 digits} (multiple) - // leveldb: - // * In a directory named "dir/name.db". - // * There are numbered files with the extension ".log". - // * There are numbered files with the extension ".ldb" (might possibly be missing if the db is empty). - // * Has the following files: CURRENT, LOG, MANIFEST-{6 digits} (multiple) - // * Might also have files: LOCK, LOG.old - // boltdb: - // * Is an executable file named "dir/name.db" - - // Note: I'm not sure of an easy way to look for files that start or end with certain strings (e.g files ending in ".sst"). - // The only way I know of is to get the entire dir contents and loop through the entries. - // However, specially for large DBs, that can be well over 10k files. - // If the list is sorted (e.g. from os.ReadDir), the ".sst" or ".ldb" files would be one of the first few. - // And stuff like MANIFEST-{numbers} and OPTIONS-{numbers} would be one of the last few. - // But just getting that list is a whole lot of work that should be avoided if possible. - // Additionally, this is only being written with badgerdb, rocksdb, and leveldb in mind. - // - // So in here, rather than being more certain and checking for those types of files, we'll skip those checks and - // put up with possible false positives. Hopefully a false positive would error out at some later point (open or reading). - - // Let's first check for badgerdb since it's the easiest. - dbDir := filepath.Join(dir, name) - if dirExists(dbDir) { - // Since that's a pretty standard dir name, do an easy check for a couple files that should be there. - if !fileExists(filepath.Join(dbDir, "KEYREGISTRY")) || !fileExists(filepath.Join(dbDir, "MANIFEST")) { - return unknownDBBackend, false - } - // Could also check for a numbered files with the extension ".vlog", but that's expensive. - // And for the types involved in here, what's been done should be enough to hopefully prevent false positives. - return dbm.BadgerDBBackend, true - } - - // Now lets check for boltdb. It's a file instead of directory with the same name used by rocksdb and leveldb. - dbDir = filepath.Join(dir, name+".db") - if fileExists(dbDir) { - return dbm.BoltDBBackend, true - } - - // The other two (rocksdb and leveldb) should be in directories named "dir/name.db". - // and should have files CURRENT and LOG - if !dirExists(dbDir) || !fileExists(filepath.Join(dbDir, "CURRENT")) || !fileExists(filepath.Join(dbDir, "LOG")) { - return unknownDBBackend, false - } - - // Okay, assuming it's either a rocksdb or leveldb directory now. - - // The only statically named file difference between rocksdb and leveldb is IDENTITY with rocksdb. - if fileExists(filepath.Join(dbDir, "IDENTITY")) { - return dbm.RocksDBBackend, true - } - - // At this point, we assume it's either cleveldb or goleveldb. - // Unfortunately, they both use the same files, but possibly with different formats. - // Sometimes you can treat a goleveldb as cleveldb and vice versa, but sometimes you can't. - // The only way I can think of to differentiate them here is to just try to open them. - // I didn't test like this with the other types because the dbm.NewDB function will create - // a db if it doesn't exist which can cause weird behavior if trying with the wrong db type. - // Goleveldb and cleveldb are close enough, though that it won't cause problems. - canOpenDB := func(backend dbm.BackendType) (rv bool) { - defer func() { - if r := recover(); r != nil { - rv = false - } - }() - db, err := dbm.NewDB(name, backend, dir) - if err != nil { - return false - } - iter, err := db.Iterator(nil, nil) - if err != nil { - return false - } - // Check up to the first 10 entries. 10 randomly picked to not be t0o big, but bigger than 0 or 1. - i := 0 - for ; iter.Valid(); iter.Next() { - _ = iter.Key() - _ = iter.Value() - i++ - if i >= 10 { - break - } - } - if iter.Error() != nil { - return false - } - if iter.Close() != nil { - return false - } - if db.Close() != nil { - return false - } - return true - } - - if canOpenDB(dbm.GoLevelDBBackend) { - return dbm.GoLevelDBBackend, true - } - if IsPossibleDBType(string(dbm.CLevelDBBackend)) && canOpenDB(dbm.CLevelDBBackend) { - return dbm.CLevelDBBackend, true - } - - return unknownDBBackend, false -} - -// dirExists returns true if the path exists and is a directory. -func dirExists(path string) bool { - info, err := os.Stat(path) - return err == nil && info.IsDir() -} - -// fileExists returns true if the path exists and is a file. -func fileExists(path string) bool { - info, err := os.Stat(path) - return err == nil && !info.IsDir() -} - -// commaString converts a positive integer to a string and adds commas. -func commaString(v uint) string { - str := fmt.Sprintf("%d", v) - if len(str) <= 3 { - return str - } - rv := make([]rune, len(str)+(len(str)-1)/3) - added := 0 - for i, c := range str { - if i != 0 && (len(str)-i)%3 == 0 { - rv[i+added] = ',' - added++ - } - rv[i+added] = c - } - return string(rv) -} diff --git a/cmd/dbmigrate/utils/migrator_test.go b/cmd/dbmigrate/utils/migrator_test.go deleted file mode 100644 index 00ad17b7c0..0000000000 --- a/cmd/dbmigrate/utils/migrator_test.go +++ /dev/null @@ -1,998 +0,0 @@ -package utils - -import ( - "fmt" - "os" - "path/filepath" - "testing" - "time" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - "github.com/stretchr/testify/suite" - - dbm "github.com/cometbft/cometbft-db" -) - -type MigratorTestSuite struct { - suite.Suite -} - -func (s *MigratorTestSuite) SetupTest() { - -} - -func TestMigratorTestSuite(t *testing.T) { - suite.Run(t, new(MigratorTestSuite)) -} - -func (s *MigratorTestSuite) TestInitialize() { - tdir := s.T().TempDir() - dbdir := "some.db" - someFile := "somefile.txt" - s.Require().NoError(os.MkdirAll(filepath.Join(tdir, "data", dbdir), 0700), "making dbdir") - s.Require().NoError(os.WriteFile(filepath.Join(tdir, "data", someFile), []byte{}, 0600), "making somefile") - - s.T().Run("ApplyDefaults called before ValidateBasic", func(t *testing.T) { - m := &Migrator{ - TargetDBType: "", // Will cause error. - HomePath: tdir, - } - err := m.Initialize() - require.Error(t, err) - assert.Contains(t, err.Error(), "TargetDBType") - assert.Equal(t, m.SourceDataDir, filepath.Join(tdir, "data")) - }) - - s.T().Run("ReadSourceDataDir not called if ValidateBasic gives error", func(t *testing.T) { - m := &Migrator{ - TargetDBType: "", // Will cause error. - HomePath: tdir, - } - err := m.Initialize() - require.Error(t, err) - assert.Contains(t, err.Error(), "TargetDBType", "err") - assert.Len(t, m.ToConvert, 0, "ToConvert") - assert.Len(t, m.ToCopy, 0, "ToCopy") - }) - - s.T().Run("ReadSourceDataDir called if valid", func(t *testing.T) { - m := &Migrator{ - TargetDBType: "goleveldb", - HomePath: tdir, - SourceDBType: "goleveldb", - } - err := m.Initialize() - require.NoError(t, err) - assert.Len(t, m.ToConvert, 1, "ToConvert") - assert.Contains(t, m.ToConvert, dbdir, "ToConvert") - assert.Len(t, m.ToCopy, 1, "ToCopy") - assert.Contains(t, m.ToCopy, someFile, "ToCopy") - }) -} - -func (s *MigratorTestSuite) TestApplyDefaults() { - defaultDateFormat := "2006-01-02-15-04" - tdir := s.T().TempDir() - dirForPermTest := filepath.Join(tdir, "permissions-test") - permForPermTest := os.FileMode(0750) - s.Require().NoError(os.MkdirAll(dirForPermTest, permForPermTest), "making permissions test dir") - var tests = []struct { - name string - migrator *Migrator - getter func(m *Migrator) interface{} - expected interface{} - }{ - { - name: "staging dir empty home path empty", - migrator: &Migrator{ - HomePath: "", - StagingDir: "", - }, - getter: func(m *Migrator) interface{} { return m.StagingDir }, - expected: "", - }, - { - name: "staging dir empty home path not empty", - migrator: &Migrator{ - HomePath: "homepath", - StagingDir: "", - }, - getter: func(m *Migrator) interface{} { return m.StagingDir }, - expected: "homepath", - }, - { - name: "staging dir not empty home path not", - migrator: &Migrator{ - HomePath: "", - StagingDir: "stagingdir", - }, - getter: func(m *Migrator) interface{} { return m.StagingDir }, - expected: "stagingdir", - }, - { - name: "staging dir not empty home path not empty", - migrator: &Migrator{ - HomePath: "homepath", - StagingDir: "stagingdir", - }, - getter: func(m *Migrator) interface{} { return m.StagingDir }, - expected: "stagingdir", - }, - - { - name: "backup dir empty home path empty", - migrator: &Migrator{ - HomePath: "", - BackupDir: "", - }, - getter: func(m *Migrator) interface{} { return m.BackupDir }, - expected: "", - }, - { - name: "backup dir empty home path not empty", - migrator: &Migrator{ - HomePath: "homepath", - BackupDir: "", - }, - getter: func(m *Migrator) interface{} { return m.BackupDir }, - expected: "homepath", - }, - { - name: "backup dir not empty home path not", - migrator: &Migrator{ - HomePath: "", - BackupDir: "backupdir", - }, - getter: func(m *Migrator) interface{} { return m.BackupDir }, - expected: "backupdir", - }, - { - name: "backup dir not empty home path not empty", - migrator: &Migrator{ - HomePath: "homepath", - BackupDir: "backupdir", - }, - getter: func(m *Migrator) interface{} { return m.BackupDir }, - expected: "backupdir", - }, - - { - name: "source data dir empty home path empty", - migrator: &Migrator{ - HomePath: "", - SourceDataDir: "", - }, - getter: func(m *Migrator) interface{} { return m.SourceDataDir }, - expected: "", - }, - { - name: "source data dir empty home path not empty", - migrator: &Migrator{ - HomePath: "homepath", - SourceDataDir: "", - }, - getter: func(m *Migrator) interface{} { return m.SourceDataDir }, - expected: filepath.Join("homepath", "data"), - }, - { - name: "source data dir not empty home path not", - migrator: &Migrator{ - HomePath: "", - SourceDataDir: "sourcedatadir", - }, - getter: func(m *Migrator) interface{} { return m.SourceDataDir }, - expected: "sourcedatadir", - }, - { - name: "source data dir not empty home path not empty", - migrator: &Migrator{ - HomePath: "homepath", - SourceDataDir: "sourcedatadir", - }, - getter: func(m *Migrator) interface{} { return m.SourceDataDir }, - expected: "sourcedatadir", - }, - - { - name: "dir date format empty", - migrator: &Migrator{ - DirDateFormat: "", - }, - getter: func(m *Migrator) interface{} { return m.DirDateFormat }, - expected: defaultDateFormat, - }, - { - name: "dir date format not empty", - migrator: &Migrator{ - DirDateFormat: "04-15-02-01-2006", - }, - getter: func(m *Migrator) interface{} { return m.DirDateFormat }, - expected: "04-15-02-01-2006", - }, - - { - name: "staging data dir empty staging dir empty", - migrator: &Migrator{ - StagingDir: "", - StagingDataDir: "", - }, - getter: func(m *Migrator) interface{} { return m.StagingDataDir }, - expected: "", - }, - { - name: "staging data dir empty staging dir not empty", - migrator: &Migrator{ - TargetDBType: "targetdb", - StagingDir: "stagingdir", - StagingDataDir: "", - }, - getter: func(m *Migrator) interface{} { return m.StagingDataDir }, - expected: filepath.Join("stagingdir", fmt.Sprintf("data-dbmigrate-tmp-%s-%s", time.Now().Format(defaultDateFormat), "targetdb")), - }, - { - name: "staging data dir not empty staging dir empty", - migrator: &Migrator{ - StagingDir: "", - StagingDataDir: "stagingdatadir", - }, - getter: func(m *Migrator) interface{} { return m.StagingDataDir }, - expected: "stagingdatadir", - }, - { - name: "staging data dir not empty staging dir empty not", - migrator: &Migrator{ - StagingDir: "homepath", - StagingDataDir: "stagingdatadir", - }, - getter: func(m *Migrator) interface{} { return m.StagingDataDir }, - expected: "stagingdatadir", - }, - - { - name: "backup data dir empty staging dir empty", - migrator: &Migrator{ - BackupDir: "", - BackupDataDir: "", - }, - getter: func(m *Migrator) interface{} { return m.BackupDataDir }, - expected: "", - }, - { - name: "backup data dir empty staging dir not empty", - migrator: &Migrator{ - BackupDir: "backupdir", - BackupDataDir: "", - }, - getter: func(m *Migrator) interface{} { return m.BackupDataDir }, - expected: filepath.Join("backupdir", "data-dbmigrate-backup-"+time.Now().Format(defaultDateFormat)), - }, - { - name: "backup data dir not empty staging dir empty", - migrator: &Migrator{ - BackupDir: "", - BackupDataDir: "backupdatadir", - }, - getter: func(m *Migrator) interface{} { return m.BackupDataDir }, - expected: "backupdatadir", - }, - { - name: "backup data dir not empty staging dir empty not", - migrator: &Migrator{ - BackupDir: "homepath", - BackupDataDir: "backupdatadir", - }, - getter: func(m *Migrator) interface{} { return m.BackupDataDir }, - expected: "backupdatadir", - }, - - { - name: "permissions not set source data dir does not exist", - migrator: &Migrator{ - Permissions: 0, - SourceDataDir: "this-definitely-does-not-exist", - }, - getter: func(m *Migrator) interface{} { return m.Permissions }, - expected: os.FileMode(0700), - }, - { - name: "permissions not set source data dir exists", - migrator: &Migrator{ - Permissions: 0, - SourceDataDir: dirForPermTest, - }, - getter: func(m *Migrator) interface{} { return m.Permissions }, - expected: permForPermTest, - }, - { - name: "permissions set source data dir does not exist", - migrator: &Migrator{ - Permissions: 0777, - SourceDataDir: "this-definitely-does-not-exist", - }, - getter: func(m *Migrator) interface{} { return m.Permissions }, - expected: os.FileMode(0777), - }, - { - name: "permissions set source data dir exists", - migrator: &Migrator{ - Permissions: 0775, - SourceDataDir: dirForPermTest, - }, - getter: func(m *Migrator) interface{} { return m.Permissions }, - expected: os.FileMode(0775), - }, - - { - name: "status period not set", - migrator: &Migrator{ - StatusPeriod: 0, - }, - getter: func(m *Migrator) interface{} { return m.StatusPeriod }, - expected: 5 * time.Second, - }, - { - name: "status period set", - migrator: &Migrator{ - StatusPeriod: 10 * time.Second, - }, - getter: func(m *Migrator) interface{} { return m.StatusPeriod }, - expected: 10 * time.Second, - }, - - { - name: "target db type not set unchanged", - migrator: &Migrator{ - TargetDBType: "", - }, - getter: func(m *Migrator) interface{} { return m.TargetDBType }, - expected: "", - }, - { - name: "target db type set unchanged", - migrator: &Migrator{ - TargetDBType: "target type", - }, - getter: func(m *Migrator) interface{} { return m.TargetDBType }, - expected: "target type", - }, - - { - name: "source db type not set unchanged", - migrator: &Migrator{ - SourceDBType: "", - }, - getter: func(m *Migrator) interface{} { return m.SourceDBType }, - expected: "", - }, - { - name: "source db type set unchanged", - migrator: &Migrator{ - SourceDBType: "source type", - }, - getter: func(m *Migrator) interface{} { return m.SourceDBType }, - expected: "source type", - }, - - { - name: "batch size not set unchanged", - migrator: &Migrator{ - BatchSize: 0, - }, - getter: func(m *Migrator) interface{} { return m.BatchSize }, - expected: uint(0), - }, - { - name: "batch size set unchanged", - migrator: &Migrator{ - BatchSize: 1234, - }, - getter: func(m *Migrator) interface{} { return m.BatchSize }, - expected: uint(1234), - }, - - { - name: "to convert not set unchanged", - migrator: &Migrator{ - ToConvert: nil, - }, - getter: func(m *Migrator) interface{} { return m.ToConvert }, - expected: []string(nil), - }, - { - name: "to convert set unchanged", - migrator: &Migrator{ - ToConvert: []string{"foo"}, - }, - getter: func(m *Migrator) interface{} { return m.ToConvert }, - expected: []string{"foo"}, - }, - - { - name: "to copy not set unchanged", - migrator: &Migrator{ - ToCopy: nil, - }, - getter: func(m *Migrator) interface{} { return m.ToCopy }, - expected: []string(nil), - }, - { - name: "to copy set unchanged", - migrator: &Migrator{ - ToCopy: []string{"bar"}, - }, - getter: func(m *Migrator) interface{} { return m.ToCopy }, - expected: []string{"bar"}, - }, - } - - for _, tc := range tests { - s.T().Run(tc.name, func(t *testing.T) { - tc.migrator.ApplyDefaults() - actual := tc.getter(tc.migrator) - assert.Equal(t, tc.expected, actual) - }) - } -} - -func (s *MigratorTestSuite) TestValidateBasic() { - makeValidMigrator := func() *Migrator { - rv := &Migrator{ - HomePath: "testing", - TargetDBType: "goleveldb", - SourceDBType: "goleveldb", - } - rv.ApplyDefaults() - return rv - } - tests := []struct { - name string - modifier func(m *Migrator) - expInError []string - }{ - { - name: "all valid", - modifier: func(m *Migrator) {}, - expInError: nil, - }, - { - name: "StagingDir empty", - modifier: func(m *Migrator) { m.StagingDir = "" }, - expInError: []string{"StagingDir"}, - }, - { - name: "BackupDir empty", - modifier: func(m *Migrator) { m.BackupDir = "" }, - expInError: []string{"BackupDir"}, - }, - { - name: "TargetDBType empty", - modifier: func(m *Migrator) { m.TargetDBType = "" }, - expInError: []string{"TargetDBType"}, - }, - { - name: "TargetDBType not possible", - modifier: func(m *Migrator) { m.TargetDBType = "not-possible" }, - expInError: []string{"TargetDBType", "goleveldb", "\"not-possible\""}, - }, - { - name: "SourceDBType empty", - modifier: func(m *Migrator) { m.SourceDBType = "" }, - expInError: nil, - }, - { - name: "SourceDBType not possible", - modifier: func(m *Migrator) { m.SourceDBType = "not-possible" }, - expInError: []string{"SourceDBType", "goleveldb", "\"not-possible\""}, - }, - { - name: "SourceDataDir empty", - modifier: func(m *Migrator) { m.SourceDataDir = "" }, - expInError: []string{"SourceDataDir"}, - }, - { - name: "StagingDataDir empty", - modifier: func(m *Migrator) { m.StagingDataDir = "" }, - expInError: []string{"StagingDataDir"}, - }, - { - name: "BackupDataDir empty", - modifier: func(m *Migrator) { m.BackupDataDir = "" }, - expInError: []string{"BackupDataDir"}, - }, - { - name: "Permissions empty", - modifier: func(m *Migrator) { m.Permissions = 0 }, - expInError: []string{"Permissions"}, - }, - { - name: "StatusPeriod empty", - modifier: func(m *Migrator) { m.StatusPeriod = 0 }, - expInError: []string{"StatusPeriod"}, - }, - { - name: "StatusPeriod just under 1 second", - modifier: func(m *Migrator) { m.StatusPeriod = time.Second - time.Nanosecond }, - expInError: []string{"StatusPeriod", "999.999999ms", "1s"}, - }, - { - name: "DirDateFormat empty", - modifier: func(m *Migrator) { m.DirDateFormat = "" }, - expInError: []string{"DirDateFormat"}, - }, - } - - for _, tc := range tests { - s.T().Run(tc.name, func(t *testing.T) { - m := makeValidMigrator() - tc.modifier(m) - actual := m.ValidateBasic() - if len(tc.expInError) > 0 { - require.Error(t, actual) - for _, exp := range tc.expInError { - assert.Contains(t, actual.Error(), exp) - } - } else { - require.NoError(t, actual) - } - }) - } -} - -func (s *MigratorTestSuite) TestReadSourceDataDir() { - - s.T().Run("no source data dir", func(t *testing.T) { - m := &Migrator{ - SourceDataDir: "", - ToConvert: []string{"something"}, - ToCopy: []string{"anotherthing"}, - } - err := m.ReadSourceDataDir() - // It shouldn't give an error. - require.NoError(t, err) - // And the ToConvert and ToCopy slices shouldn't have changed. - assert.Len(t, m.ToConvert, 1, "ToConvert") - assert.Contains(t, m.ToConvert, "something", "ToConvert") - assert.Len(t, m.ToCopy, 1, "ToCopy") - assert.Contains(t, m.ToCopy, "anotherthing", "ToCopy") - }) - - s.T().Run("source data dir does not exist", func(t *testing.T) { - m := &Migrator{ - SourceDataDir: "not-gonna-find-me", - ToConvert: []string{"something"}, - ToCopy: []string{"anotherthing"}, - } - err := m.ReadSourceDataDir() - require.Error(t, err) - require.Contains(t, err.Error(), "error reading \"not-gonna-find-me\":", "err") - // And the ToConvert and ToCopy slices should be gone. - assert.Len(t, m.ToConvert, 0, "ToConvert") - assert.Len(t, m.ToCopy, 0, "ToCopy") - }) - - s.T().Run("source data dir has a file but no db", func(t *testing.T) { - tdir := t.TempDir() - someFile := "somefile.txt" - dataDir := filepath.Join(tdir, "data") - require.NoError(t, os.MkdirAll(dataDir, 0700), "making dbdir") - require.NoError(t, os.WriteFile(filepath.Join(dataDir, someFile), []byte{}, 0600), "making somefile") - m := &Migrator{ - SourceDataDir: dataDir, - ToConvert: []string{"something"}, - ToCopy: []string{"anotherthing"}, - } - err := m.ReadSourceDataDir() - require.Error(t, err) - assert.Contains(t, err.Error(), "could not identify any db directories in") - assert.Contains(t, err.Error(), dataDir) - // And the ToConvert and ToCopy slices should be changed. - assert.Len(t, m.ToConvert, 0, "ToConvert") - assert.Len(t, m.ToCopy, 1, "ToCopy") - assert.Contains(t, m.ToCopy, someFile, "ToCopy") - }) - - s.T().Run("source data dir has a db", func(t *testing.T) { - tdir := s.T().TempDir() - dbdir := "some.db" - someFile := "somefile.txt" - dataDir := filepath.Join(tdir, "data") - require.NoError(t, os.MkdirAll(filepath.Join(dataDir, dbdir), 0700), "making dbdir") - require.NoError(t, os.WriteFile(filepath.Join(dataDir, someFile), []byte{}, 0600), "making somefile") - m := &Migrator{ - SourceDataDir: dataDir, - ToConvert: []string{"something"}, - ToCopy: []string{"anotherthing"}, - } - err := m.ReadSourceDataDir() - require.NoError(t, err) - // And the ToConvert and ToCopy slices should be changed. - assert.Len(t, m.ToConvert, 1, "ToConvert") - assert.Contains(t, m.ToConvert, dbdir, "ToConvert") - assert.Len(t, m.ToCopy, 1, "ToCopy") - assert.Contains(t, m.ToCopy, someFile, "ToCopy") - }) -} - -// TODO: Migrate tests -// TODO: migrationManager tests - -func (s *MigratorTestSuite) TestNoKeyvals() { - f := noKeyvals() - s.Require().NotNil(f) - s.Assert().Len(f, 0) -} - -func (s *MigratorTestSuite) TestSplitDBPath() { - tests := []struct { - name string - elem []string - dbPath string - dbName string - }{ - { - name: "absolute path and simple db name", - elem: []string{"/foo/bar", "baz.db"}, - dbPath: "/foo/bar", - dbName: "baz", - }, - { - name: "absolute path and simple db name no suffix", - elem: []string{"/foo/bar", "baz"}, - dbPath: "/foo/bar", - dbName: "baz", - }, - { - name: "absolute path and simple db name weird suffix", - elem: []string{"/foo/bar", "baz.db2"}, - dbPath: "/foo/bar", - dbName: "baz.db2", - }, - { - name: "absolute path and db in sub dir", - elem: []string{"/foo", "bar/baz.db"}, - dbPath: "/foo/bar", - dbName: "baz", - }, - { - name: "absolute path and db in sub dir no suffix", - elem: []string{"/foo", "bar/baz"}, - dbPath: "/foo/bar", - dbName: "baz", - }, - { - name: "absolute path and db in sub dir weird suffix", - elem: []string{"/foo", "bar/baz.db2"}, - dbPath: "/foo/bar", - dbName: "baz.db2", - }, - { - name: "relative path and simple db name", - elem: []string{"foo/bar", "baz.db"}, - dbPath: "foo/bar", - dbName: "baz", - }, - { - name: "relative path and simple db name no suffix", - elem: []string{"foo/bar", "baz"}, - dbPath: "foo/bar", - dbName: "baz", - }, - { - name: "relative path and simple db name weird suffix", - elem: []string{"foo/bar", "baz.db2"}, - dbPath: "foo/bar", - dbName: "baz.db2", - }, - { - name: "relative path and db in sub dir", - elem: []string{"foo", "bar/baz.db"}, - dbPath: "foo/bar", - dbName: "baz", - }, - { - name: "relative path and db in sub dir no suffix", - elem: []string{"foo", "bar/baz"}, - dbPath: "foo/bar", - dbName: "baz", - }, - { - name: "relative path and db in sub dir weird suffix", - elem: []string{"foo", "bar/baz.db2"}, - dbPath: "foo/bar", - dbName: "baz.db2", - }, - } - - for _, tc := range tests { - s.T().Run(tc.name, func(t *testing.T) { - dbPath, dbName := splitDBPath(tc.elem...) - assert.Equal(t, tc.dbPath, dbPath, "dbPath") - assert.Equal(t, tc.dbName, dbName, "dbName") - }) - } -} - -func (s *MigratorTestSuite) TestGetDataDirContents() { - // Setup a temp directory with the following: - // 1) A directory named dbdir1.db with nothing in it - // 2) A directory named dbdir2 with files named MANIFEST, other1.txt, other2.log - // 3) A directory named subdir1 with: - // a) a directory named dbdir3.db with nothing in it - // b) a directory named dbdir4 with files named MANIFEST, other3.txt, other4.log - // c) A file named not-a-db-1.txt - // 4) A directory named subdir2 with files: other5.txt, other6.log - // 5) a file named not-a-db-2.txt - // 6) A directory named subdir3 with: - // a) a directory named subsubdir1 with a file named other7.txt - // b) a directory named subsubdir2 with a file named other8.txt - - tDir := s.T().TempDir() - - s.Require().NoError(os.MkdirAll(filepath.Join(tDir, "dbdir1.db"), 0700), "making dbdir1.db") - - s.Require().NoError(os.MkdirAll(filepath.Join(tDir, "dbdir2"), 0700), "making dbdir2") - s.Require().NoError(os.WriteFile(filepath.Join(tDir, "dbdir2", "MANIFEST"), []byte{}, 0700), "making dbdir2/MANIFEST") - s.Require().NoError(os.WriteFile(filepath.Join(tDir, "dbdir2", "other1.txt"), []byte{}, 0700), "making dbdir2/other1.txt") - s.Require().NoError(os.WriteFile(filepath.Join(tDir, "dbdir2", "other2.log"), []byte{}, 0700), "making dbdir2/other2.log") - - s.Require().NoError(os.MkdirAll(filepath.Join(tDir, "subdir1", "dbdir3.db"), 0700), "making subdir1/dbdir3.db") - s.Require().NoError(os.MkdirAll(filepath.Join(tDir, "subdir1", "dbdir4"), 0700), "making subdir1/dbdir4") - s.Require().NoError(os.WriteFile(filepath.Join(tDir, "subdir1", "dbdir4", "MANIFEST"), []byte{}, 0700), "making subdir1/dbdir4/MANIFEST") - s.Require().NoError(os.WriteFile(filepath.Join(tDir, "subdir1", "dbdir4", "other3.txt"), []byte{}, 0700), "making subdir1/dbdir4/other3.txt") - s.Require().NoError(os.WriteFile(filepath.Join(tDir, "subdir1", "dbdir4", "other4.log"), []byte{}, 0700), "making subdir1/dbdir4/other4.log") - s.Require().NoError(os.WriteFile(filepath.Join(tDir, "subdir1", "not-a-db-1.txt"), []byte{}, 0700), "making subdir1/not-a-db-1.txt") - - s.Require().NoError(os.MkdirAll(filepath.Join(tDir, "subdir2"), 0700), "making subdir2") - s.Require().NoError(os.WriteFile(filepath.Join(tDir, "subdir2", "other5.txt"), []byte{}, 0700), "making subdir2/other5.txt") - s.Require().NoError(os.WriteFile(filepath.Join(tDir, "subdir2", "other6.log"), []byte{}, 0700), "making subdir2/other6.log") - - s.Require().NoError(os.WriteFile(filepath.Join(tDir, "not-a-db-2.txt"), []byte{}, 0700), "making not-a-db-2.txt") - - s.Require().NoError(os.MkdirAll(filepath.Join(tDir, "subdir3", "subsubdir1"), 0700), "making subsubdir1") - s.Require().NoError(os.WriteFile(filepath.Join(tDir, "subdir3", "subsubdir1", "other7.txt"), []byte{}, 0700), "making subdir2/other5.txt") - s.Require().NoError(os.MkdirAll(filepath.Join(tDir, "subdir3", "subsubdir2"), 0700), "making subsubdir2") - s.Require().NoError(os.WriteFile(filepath.Join(tDir, "subdir3", "subsubdir2", "other8.txt"), []byte{}, 0700), "making subdir2/other5.txt") - - s.T().Run("standard use case", func(t *testing.T) { - expectedDbs := []string{"dbdir1.db", "dbdir2", "subdir1/dbdir3.db", "subdir1/dbdir4"} - expectedNonDBs := []string{"subdir1/not-a-db-1.txt", "subdir2", "not-a-db-2.txt", "subdir3"} - - dbs, nonDBs, err := GetDataDirContents(tDir) - - require.NoError(t, err, "calling GetDataDirContents") - - assert.Len(t, dbs, len(expectedDbs), "dbs") - for _, eDB := range expectedDbs { - assert.Contains(t, dbs, eDB, "dbs") - } - - assert.Len(t, nonDBs, len(expectedNonDBs), "nonDBs") - for _, eNonDB := range expectedNonDBs { - assert.Contains(t, nonDBs, eNonDB, "nonDBs") - } - }) - - s.T().Run("directory does not exist", func(t *testing.T) { - _, _, err := GetDataDirContents(tDir + "-nope-not-gonna-exist") - require.Error(t, err, "GetDataDirContents on directory that doesn't exist.") - assert.Contains(t, err.Error(), "no such file or directory", "err") - }) -} - -func (s *MigratorTestSuite) TestDetectDBType() { - tDir := s.T().TempDir() - - s.T().Run("badger", func(t *testing.T) { - expected := dbm.BadgerDBBackend - name := "badger1" - dataDir := filepath.Join(tDir, "badger") - dbDir := filepath.Join(dataDir, name) - require.NoError(t, os.MkdirAll(dbDir, 0700), "making dbDir") - require.NoError(t, os.WriteFile(filepath.Join(dbDir, "KEYREGISTRY"), []byte{}, 0600), "making KEYREGISTRY") - require.NoError(t, os.WriteFile(filepath.Join(dbDir, "MANIFEST"), []byte{}, 0600), "making KEYREGISTRY") - actual, ok := DetectDBType(name, dataDir) - assert.True(t, ok, "DetectDBType bool") - assert.Equal(t, expected, actual, "DetectDBType BackendType") - }) - - s.T().Run("rocks", func(t *testing.T) { - expected := dbm.RocksDBBackend - name := "rocks2" - dataDir := filepath.Join(tDir, "rocks") - dbDir := filepath.Join(dataDir, name+".db") - require.NoError(t, os.MkdirAll(dbDir, 0700), "making dbDir") - require.NoError(t, os.WriteFile(filepath.Join(dbDir, "CURRENT"), []byte{}, 0600), "making CURRENT") - require.NoError(t, os.WriteFile(filepath.Join(dbDir, "LOG"), []byte{}, 0600), "making LOG") - require.NoError(t, os.WriteFile(filepath.Join(dbDir, "IDENTITY"), []byte{}, 0600), "making IDENTITY") - actual, ok := DetectDBType(name, dataDir) - assert.True(t, ok, "DetectDBType bool") - assert.Equal(t, expected, actual, "DetectDBType BackendType") - }) - - // To run this test, you'll need to provide the tag 'cleveldb' to the test command. - // Both make test and the github action should have that tag, but you might need - // to tell your IDE about it in order to use it to run this test. - if IsPossibleDBType("cleveldb") { - s.T().Run("clevel", func(t *testing.T) { - // As far as I can tell, you can always open a cleveldb using goleveldb, but not vice versa. - // Since DetectDBType checks for goleveldb first, it should return as goleveldb in this test. - expected := dbm.GoLevelDBBackend - name := "clevel3" - dataDir := filepath.Join(tDir, "clevel") - require.NoError(t, os.MkdirAll(dataDir, 0700), "making data dir") - // The reason the other db types aren't done this way (creating the db with NewDB) is that - // I didn't want to cause confusion with regard to build tags and external library dependencies. - db, err := dbm.NewDB(name, dbm.CLevelDBBackend, dataDir) - require.NoError(t, err, "NewDB") - for i := 0; i < 15; i++ { - assert.NoError(t, db.Set([]byte(fmt.Sprintf("%s-key-%d", name, i)), []byte(fmt.Sprintf("%s-value-%d", name, i))), "setting key/value %d", i) - } - require.NoError(t, db.Close(), "closing db") - actual, ok := DetectDBType(name, dataDir) - assert.True(t, ok, "DetectDBType bool") - assert.Equal(t, expected, actual, "DetectDBType BackendType") - }) - } - - s.T().Run("golevel", func(t *testing.T) { - expected := dbm.GoLevelDBBackend - name := "golevel8" - dataDir := filepath.Join(tDir, "golevel") - require.NoError(t, os.MkdirAll(dataDir, 0700), "making data dir") - // The reason the other db types aren't done this way (creating the db with NewDB) is that - // I didn't want to cause confusion with regard to build tags and external library dependencies. - db, err := dbm.NewDB(name, expected, dataDir) - require.NoError(t, err, "NewDB") - for i := 0; i < 15; i++ { - assert.NoError(t, db.Set([]byte(fmt.Sprintf("%s-key-%d", name, i)), []byte(fmt.Sprintf("%s-value-%d", name, i))), "setting key/value %d", i) - } - require.NoError(t, db.Close(), "closing db") - actual, ok := DetectDBType(name, dataDir) - assert.True(t, ok, "DetectDBType bool") - assert.Equal(t, expected, actual, "DetectDBType BackendType") - }) - - s.T().Run("boltdb", func(t *testing.T) { - expected := dbm.BoltDBBackend - name := "bolt7" - dataDir := filepath.Join(tDir, "bolt") - dbFile := filepath.Join(dataDir, name+".db") - require.NoError(t, os.MkdirAll(dataDir, 0700), "making dataDir") - require.NoError(t, os.WriteFile(dbFile, []byte{}, 0700), "making dbFile") - actual, ok := DetectDBType(name, dataDir) - assert.True(t, ok, "DetectDBType bool") - assert.Equal(t, expected, actual, "DetectDBType BackendType") - }) - - s.T().Run("empty", func(t *testing.T) { - expected := unknownDBBackend - name := "empty4" - dataDir := filepath.Join(tDir, "empty") - dbDir := filepath.Join(dataDir, name) - require.NoError(t, os.MkdirAll(dbDir, 0700), "making dbDir") - actual, ok := DetectDBType(name, dataDir) - assert.False(t, ok, "DetectDBType bool") - assert.Equal(t, expected, actual, "DetectDBType BackendType") - }) - - s.T().Run("only current", func(t *testing.T) { - expected := unknownDBBackend - name := "only-current5" - dataDir := filepath.Join(tDir, "only-current") - dbDir := filepath.Join(dataDir, name+".db") - require.NoError(t, os.MkdirAll(dbDir, 0700), "making dbDir") - require.NoError(t, os.WriteFile(filepath.Join(dbDir, "CURRENT"), []byte{}, 0600), "making CURRENT") - actual, ok := DetectDBType(name, dataDir) - assert.False(t, ok, "DetectDBType bool") - assert.Equal(t, expected, actual, "DetectDBType BackendType") - }) - - s.T().Run("does not exist", func(t *testing.T) { - expected := unknownDBBackend - name := "does-not-exist6" - dataDir := filepath.Join(tDir, "only-current") - actual, ok := DetectDBType(name, dataDir) - assert.False(t, ok, "DetectDBType bool") - assert.Equal(t, expected, actual, "DetectDBType BackendType") - }) -} - -func (s *MigratorTestSuite) TestDirExists() { - s.T().Run("does not exist", func(t *testing.T) { - assert.False(t, dirExists("does not exist")) - }) - - s.T().Run("containing dir exists", func(t *testing.T) { - tdir := t.TempDir() - dir := filepath.Join(tdir, "nope") - assert.False(t, dirExists(dir)) - }) - - s.T().Run("is file", func(t *testing.T) { - tdir := t.TempDir() - file := filepath.Join(tdir, "filiename.txt") - require.NoError(t, os.WriteFile(file, []byte{}, 0600), "making file") - assert.False(t, dirExists(file)) - }) - - s.T().Run("is dir", func(t *testing.T) { - tdir := t.TempDir() - dir := filepath.Join(tdir, "immadir") - require.NoError(t, os.MkdirAll(dir, 0700), "making dir") - assert.True(t, dirExists(dir)) - }) -} - -func (s *MigratorTestSuite) TestFileExists() { - s.T().Run("does not exist", func(t *testing.T) { - assert.False(t, fileExists("does not exist")) - }) - - s.T().Run("containing dir exists", func(t *testing.T) { - tdir := t.TempDir() - file := filepath.Join(tdir, "nope.tar") - assert.False(t, fileExists(file)) - }) - - s.T().Run("is file", func(t *testing.T) { - tdir := t.TempDir() - file := filepath.Join(tdir, "filiename.txt") - require.NoError(t, os.WriteFile(file, []byte{}, 0600), "making file") - assert.True(t, fileExists(file)) - }) - - s.T().Run("is dir", func(t *testing.T) { - tdir := t.TempDir() - dir := filepath.Join(tdir, "immadir") - require.NoError(t, os.MkdirAll(dir, 0700), "making dir") - assert.False(t, fileExists(dir)) - }) -} - -func (s *MigratorTestSuite) TestCommaString() { - tests := []struct { - v uint - exp string - }{ - {v: 0, exp: "0"}, - {v: 1, exp: "1"}, - {v: 22, exp: "22"}, - {v: 333, exp: "333"}, - {v: 999, exp: "999"}, - {v: 1_000, exp: "1,000"}, - {v: 4_444, exp: "4,444"}, - {v: 55_555, exp: "55,555"}, - {v: 666_666, exp: "666,666"}, - {v: 999_999, exp: "999,999"}, - {v: 1_000_000, exp: "1,000,000"}, - {v: 7_777_777, exp: "7,777,777"}, - {v: 88_888_888, exp: "88,888,888"}, - {v: 999_999_999, exp: "999,999,999"}, - {v: 1_000_000_000, exp: "1,000,000,000"}, - {v: 1_010_101_010, exp: "1,010,101,010"}, - {v: 11_011_011_011, exp: "11,011,011,011"}, - {v: 120_120_120_120, exp: "120,120,120,120"}, - {v: 999_999_999_999, exp: "999,999,999,999"}, - {v: 1_000_000_000_000, exp: "1,000,000,000,000"}, - {v: 1_301_301_301_301, exp: "1,301,301,301,301"}, - {v: 14_814_714_614_514, exp: "14,814,714,614,514"}, - {v: 150_151_152_153_154, exp: "150,151,152,153,154"}, - {v: 999_999_999_999_999, exp: "999,999,999,999,999"}, - {v: 1_000_000_000_000_000, exp: "1,000,000,000,000,000"}, - {v: 1_651_651_651_651_651, exp: "1,651,651,651,651,651"}, - {v: 17_017_017_017_017_017, exp: "17,017,017,017,017,017"}, - {v: 189_189_189_189_189_189, exp: "189,189,189,189,189,189"}, - {v: 999_999_999_999_999_999, exp: "999,999,999,999,999,999"}, - {v: 1_000_000_000_000_000_000, exp: "1,000,000,000,000,000,000"}, - {v: 1_981_981_981_981_981_981, exp: "1,981,981,981,981,981,981"}, - {v: 18_446_744_073_709_551_615, exp: "18,446,744,073,709,551,615"}, - } - - for _, tc := range tests { - s.T().Run(tc.exp, func(t *testing.T) { - act := commaString(tc.v) - assert.Equal(t, tc.exp, act) - }) - } -} diff --git a/cmd/dbmigrate/utils/rocksdb.go b/cmd/dbmigrate/utils/rocksdb.go deleted file mode 100644 index 83c113c273..0000000000 --- a/cmd/dbmigrate/utils/rocksdb.go +++ /dev/null @@ -1,17 +0,0 @@ -//go:build rocksdb -// +build rocksdb - -package utils - -import ( - dbm "github.com/cometbft/cometbft-db" -) - -// This file is included when built with the rocksdb tag (which matches the tag Tendermint looks for). -// Tendermint does all the heavy lifting, but doesn't expose a way to identify which DB types are available. -// That list would also have MemDB, which we don't want in here anyway. -// That's all this is doing, just identifying that it was built with that tag and that this DB type is available. - -func init() { - AddPossibleDBType(dbm.RocksDBBackend) -} diff --git a/docs/Building.md b/docs/Building.md index 267576918b..2d896e4ef2 100644 --- a/docs/Building.md +++ b/docs/Building.md @@ -4,11 +4,8 @@ - [Overview](#overview) - [Prerequisites](#prerequisites) - [Go](#go) - - [CLevelDB](#cleveldb) - - [RocksDB](#rocksdb) - [Building or Installing `provenanced`](#building-or-installing-provenanced) - [Build Options](#build-options) - - [Building `dbmigrate`](#building-dbmigrate) @@ -23,78 +20,6 @@ Built executables are placed in the `build/` directory. Building `provenanced` requires [Go 1.21+](https://golang.org/dl/) (or higher). -### CLevelDB - -By default, `provenanced` is built without CLevelDB support. -Building with CLevelDB support is also possible. See `WITH_CLEVELDB` in [Build Options](#build-options) below. - -To download, build, and install the C LevelDB library on your system: -```console -$ make cleveldb -``` - -
-Environment variables that can control the behavior of this command: - -* `CLEVELDBDB_VERSION` will install a version other than the one defined in the `Makefile`. - Do not include the `v` at the beginning of the version number. - Example: `CLEVELDBDB_VERSION=1.22 make cleveldb`. - The default is `1.23` -* `CLEVELDB_JOBS` will control the number of parallel jobs used to build the library. - The default is the result of the `nproc` command. - More parallel jobs can speed up the build. - Fewer parallel jobs can alleviate memory problems/crashes that can be encountered during a build. -* `CLEVELDB_DO_BUILD` defines whether to build cleveldb. - The default is `true`. -* `CLEVELDB_DO_INSTALL` defines whether to install cleveldb. - The default is `true`. -* `CLEVELDB_SUDO` defines whether to use `sudo` for the installation of the built library. - The difference between `sudo make cleveldb` and `CLEVELDB_SUDO=true make cleveldb` - is that the latter will use `sudo` only for the installation (the download and build still use your current user). - Some systems (e.g. Ubuntu) might require this. - The default is `true` if the `sudo` command is found, or `false` otherwise. -* `CLEVELDB_DO_CLEANUP` defines whether to delete the downloaded and unpacked repo when done. - The default is `true`. -
- -### RocksDB - -By default, `provenanced` is built without RocksDB support. -Building with RocksDB support is also possible. See `WITH_ROCKSDB` in [Build Options](#build-options) below. - -To download, build, and install the RocksDB library on your system: -```console -$ make rocksdb -``` - -
-Environment variables that can control the behavior of this command: - -* `ROCKSDB_VERSION` will install a version other than the one defined in the `Makefile`. - Do not include the `v` at the beginning of the version number. - Example: `ROCKSDB_VERSION=6.17.3 make rocksdb`. - The default is `6.29.4` -* `ROCKSDB_JOBS` will control the number of parallel jobs used to build the library. - The default is the result of the `nproc` command. - More parallel jobs can speed up the build. - Fewer parallel jobs can alleviate memory problems/crashes that can be encountered during a build. -* `ROCKSDB_WITH_SHARED` defines whether to build and install the shared (dynamic) library. - The default is `true`. -* `ROCKSDB_WITH_STATIC` defines whether to build and install the static library. - The default is `false`. -* `ROCKSDB_DO_BUILD` defines whether to build rocksdb. - The default is `true`. -* `ROCKSDB_DO_INSTALL` defines whether to install rocksdb. - The default is `true`. -* `ROCKSDB_SUDO` defines whether to use `sudo` for the installation of the built library. - The difference between `sudo make rocksdb` and `ROCKSDB_SUDO=true make rocksdb` - is that the latter will use `sudo` only for the installation (the download and build still use your current user). - Some systems (e.g. Ubuntu) might require this. - The default is `true` if the `sudo` command is found, or `false` otherwise. -* `ROCKSDB_DO_CLEANUP` defines whether to delete the downloaded and unpacked repo when done. - The default is `true`. -
- ## Building or Installing `provenanced` To build the `provenanced` executable and place it in the `build/` directory: @@ -118,18 +43,6 @@ $ make install A few aspects of `make build` and `make install` can be controlled through environment variables. -* `WITH_CLEVELDB`: Enables/Disables building with CLevelDB support. - The default is `false`. - If this is not `true` the built `provenanced`, executable will not be able to use CLevelDB as a database backend. -* `LEVELDB_PATH`: Defines the location of the leveldb library and includes. - This is only used if compiling with CLevelDB support on a Mac. - The default is the result of `brew --prefix leveldb`. -* `WITH_ROCKSDB`: Enables/Disables building with RocksDB support. - The default is `false`. - If this is not `true` the built `provenanced`, executable will not be able to use RocksDB as a database backend. -* `WITH_BADGERDB`: Enables/Disables building with BadgerDB support. - The default is `false`. - If this is not `true` the built `provenanced`, executable will not be able to use BadgerDB as a database backend. * `WITH_LEDGER`: Enables/Disables building with Ledger hardware wallet support. The default is `true`. If this is not `true` the built `provenanced`, executable will not work with Ledger hardware wallets. @@ -151,28 +64,3 @@ A few aspects of `make build` and `make install` can be controlled through envir These are appended to a list constructed by the Makefile. * `BUILD_FLAGS`: Any extra flags to include when invoking `go build` or `go install.`. These are appended to a list constructed by the Makefile. - -## Building `dbmigrate` - -The `dbmigrate` utility can be used to migrate a node's data directory to a use a different db backend. - -To build the `dbmigrate` executable and place it in the `build/` directory: -```console -$ make build-dbmigrate -``` - -To build the `dbmigrate` executable and place it in your system's default Go `bin/` directory. -```console -$ make install-dbmigrate -``` - -Building `dbmigrate` uses the same [Build Options](#build-options) as `provenanced`. - -The dbmigrate program will: -1. Create a new `data/` directory, and copy the contents of the existing `data/` directory into it, converting the database files appropriately. -2. Back up the existing `data/` directory to `${home}/data-dbmigrate-backup-{timestamp}-{dbtypes}/`. -3. Move the newly created `data/` directory into place. -4. Update the config's `db_backend` value to the new db backend type. - -The `dbmigrate` utility uses the same configs, environment variables, and flags as `provenanced`. -For example, if you have the environment variable PIO_HOME defined, then `dbmigrate` will use that as the `--home` directory (unless a `--home` is provided in the command line arguments). diff --git a/scripts/cleveldb_build_and_install.sh b/scripts/cleveldb_build_and_install.sh deleted file mode 100755 index 5aff3264a0..0000000000 --- a/scripts/cleveldb_build_and_install.sh +++ /dev/null @@ -1,120 +0,0 @@ -#!/bin/bash - -# This script will download, compile, and install leveldb, then clean up. -DEFAULT_CLEVELDB_VERSION='1.23' - -can_sudo='false' -command -v sudo > /dev/null 2>&1 && can_sudo='true' - -if [[ "$1" == '-h' || "$1" == '--help' || "$1" == 'help' ]]; then - echo "Usage: $( basename $0 ) []" - echo 'See https://github.com/facebook/leveldb/releases for version info.' - echo 'The arguments can also be defined using environment variables:' - echo " CLEVELDBDB_VERSION for the . Default is $DEFAULT_CLEVELDB_VERSION." - echo 'Additional parameters definable using environment variables:' - echo " CLEVELDB_JOBS is the number of parallel jobs for make to use. Default is the result of nproc (=$( nproc )), or 1 if nproc isn't availble." - echo ' CLEVELDB_DO_BUILD controls whether to build. Default is true.' - echo ' CLEVELDB_DO_INSTALL controls whether to install. Default is true.' - echo " CLEVELDB_SUDO controls whether to use sudo when installing the built libraries. Default is $can_sudo." - echo ' CLEVELDB_DO_CLEANUP controls whether to delete the downloaded and unpacked repo. Default is true.' - exit 0 -fi - -# Order of precedence for leveldb version: command line arg 1, env var, default. -if [[ -n "$1" ]]; then - CLEVELDB_VERSION="$1" -elif [[ -z "$CLEVELDB_VERSION" ]]; then - CLEVELDB_VERSION="$DEFAULT_CLEVELDB_VERSION" -fi -if [[ -n "$CLEVELDB_VERSION" && "$CLEVELDB_VERSION" =~ ^v ]]; then - echo "Illegal version: [$CLEVELDB_VERSION]. Must not start with 'v'." >&2 - exit 1 -fi - -if [[ -z "$CLEVELDB_JOBS" ]]; then - if command -v nproc > /dev/null 2>&1; then - CLEVELDB_JOBS="$( nproc )" - else - CLEVELDB_JOBS=1 - fi -fi - -if [[ -n "$CLEVELDB_JOBS" && ( "$CLEVELDB_JOBS" =~ [^[:digit:]] || $CLEVELDB_JOBS -lt '1' ) ]]; then - echo "Illegal jobs count: [$CLEVELDB_JOBS]. Must only contain digits. Must be at least 1." >&2 - exit 1 -fi - -# Usage: trueFalseOrDefault -trueFalseOrDefault () { - local k v d - k="$1" - v="${!1}" - d="$2" - if [[ -n "$v" ]]; then - if [[ "$v" =~ ^[tT]([rR][uU][eE])?$ ]]; then - printf 'true' - elif [[ "$v" =~ ^[fF]([aA][lL][sS][eE])?$ ]]; then - printf 'false' - else - echo "Illegal $k value: '$v'. Must be either 'true' or 'false'." >&2 - printf '%s' "$v" - return 1 - fi - else - printf '%s' "$d" - fi - return 0 -} - -CLEVELDB_SUDO="$( trueFalseOrDefault CLEVELDB_SUDO "$can_sudo" )" || exit $? -CLEVELDB_DO_CLEANUP="$( trueFalseOrDefault CLEVELDB_DO_CLEANUP true )" || exit $? -CLEVELDB_DO_BUILD="$( trueFalseOrDefault CLEVELDB_DO_BUILD true )" || exit $? -CLEVELDB_DO_INSTALL="$( trueFalseOrDefault CLEVELDB_DO_INSTALL true )" || exit $? - -# The github action runners need sudo when installing libraries. Brew sometimes does (even though it complains). -# In some situations, though, the sudo program isn't availble. If you've got sudo, this'll default to using it. -# You'll need sudo if the install command fails due to permissions (might manifest as a file does not exist error). -SUDO='' -if [[ "$CLEVELDB_SUDO" == 'true' ]]; then - SUDO='sudo' -fi - -# These are defined in the leveldb CMakeLists.txt file. We don't care about them for this. -LEVELDB_BUILD_TESTS="${LEVELDB_BUILD_TESTS:-OFF}" -LEVELDB_BUILD_BENCHMARKS="${LEVELDB_BUILD_BENCHMARKS:-OFF}" - -set -ex - -# These lines look dumb, but they're here so that the values are clearly in the output (because of set -x). -CLEVELDB_VERSION="$CLEVELDB_VERSION" -CLEVELDB_JOBS="$CLEVELDB_JOBS" -CLEVELDB_SUDO="$CLEVELDB_SUDO" -CLEVELDB_DO_CLEANUP="$CLEVELDB_DO_CLEANUP" -CLEVELDB_DO_BUILD="$CLEVELDB_DO_BUILD" -CLEVELDB_DO_INSTALL="$CLEVELDB_DO_INSTALL" -export LEVELDB_BUILD_TESTS="$LEVELDB_BUILD_TESTS" -export LEVELDB_BUILD_BENCHMARKS="$LEVELDB_BUILD_BENCHMARKS" -TAR_FILE="leveldb-${CLEVELDB_VERSION}.tar.gz" - -if [[ ! -e "$TAR_FILE" ]]; then - wget "https://github.com/google/leveldb/archive/${CLEVELDB_VERSION}.tar.gz" -O "$TAR_FILE" - tar zxf "$TAR_FILE" -fi -TAR_DIR="$( tar --exclude='./*/*/*' -tf "$TAR_FILE" | head -n 1 )" -cd "$TAR_DIR" -[[ -d 'build' ]] || mkdir build -cd build -[[ "$CLEVELDB_DO_BUILD" == 'true' ]] && \ - cmake -DCMAKE_BUILD_TYPE=Release \ - -DLEVELDB_BUILD_TESTS="$LEVELDB_BUILD_TESTS" \ - -DLEVELDB_BUILD_BENCHMARKS="$LEVELDB_BUILD_BENCHMARKS" \ - -DBUILD_SHARED_LIBS=ON \ - .. && \ - cmake --build . -j$CLEVELDB_JOBS -[[ "$CLEVELDB_DO_INSTALL" == 'true' ]] && $SUDO cmake --install . -cd .. -cd .. -if [[ "$CLEVELDB_DO_CLEANUP" == 'true' ]]; then - rm "$TAR_FILE" - rm -rf "$TAR_DIR" -fi diff --git a/scripts/no-now-lint.sh b/scripts/no-now-lint.sh index 571254c649..e9ad585c09 100755 --- a/scripts/no-now-lint.sh +++ b/scripts/no-now-lint.sh @@ -92,8 +92,6 @@ filters+=( '^x/marker/client/cli/tx\.go:' ) # Since it's setting a variable more specifically named than 'now', # we can ignore the specific line, but let it be on any line number. filters+=( '^cmd/provenanced/cmd/testnet\.go:[[:digit:]]+:[[:space:]]+genTime := [[:alnum:]]+\.Now\(\)$' ) -# The dbmigrate migrator has several legitimate uses, and there's nothing in there that affects block processing. -filters+=( '^cmd/dbmigrate/utils/migrator\.go:' ) for filters in "${filters[@]}"; do now_uses="$( grep -vE "$filters" <<< "$now_uses" )" diff --git a/scripts/rocksdb_build_and_install.sh b/scripts/rocksdb_build_and_install.sh deleted file mode 100755 index a61d101e13..0000000000 --- a/scripts/rocksdb_build_and_install.sh +++ /dev/null @@ -1,123 +0,0 @@ -#!/bin/bash - -# This script will download, compile, and install rocksdb, then clean up. -DEFAULT_ROCKSDB_VERSION='6.29.5' - -can_sudo='false' -command -v sudo > /dev/null 2>&1 && can_sudo='true' - -if [[ "$1" == '-h' || "$1" == '--help' || "$1" == 'help' ]]; then - echo "Usage: $( basename $0 ) []" - echo 'See https://github.com/facebook/rocksdb/releases for version info.' - echo 'The arguments can also be defined using environment variables:' - echo " ROCKSDB_VERSION for the . Default is $DEFAULT_ROCKSDB_VERSION." - echo 'Additional parameters definable using environment variables:' - echo " ROCKSDB_JOBS is the number of parallel jobs for make to use. Default is the result of nproc (=$( nproc )), or 1 if nproc isn't availble." - echo ' ROCKSDB_WITH_SHARED controls whether to build and install the shared library. Default is true.' - echo ' ROCKSDB_WITH_STATIC controls whether to build and install the static library. Default is false.' - echo ' ROCKSDB_DO_BUILD controls whether to build. Default is true.' - echo ' ROCKSDB_DO_INSTALL controls whether to install. Default is true.' - echo " ROCKSDB_SUDO controls whether to use sudo when installing the built libraries. Default is $can_sudo." - echo ' ROCKSDB_DO_CLEANUP controls whether to delete the downloaded and unpacked repo. Default is true.' - exit 0 -fi - -# Order of precedence for rocksdb version: command line arg 1, env var, default. -if [[ -n "$1" ]]; then - ROCKSDB_VERSION="$1" -elif [[ -z "$ROCKSDB_VERSION" ]]; then - ROCKSDB_VERSION="$DEFAULT_ROCKSDB_VERSION" -fi -if [[ -n "$ROCKSDB_VERSION" && "$ROCKSDB_VERSION" =~ ^v ]]; then - echo "Illegal version: [$ROCKSDB_VERSION]. Must not start with 'v'." >&2 - exit 1 -fi - -if [[ -z "$ROCKSDB_JOBS" ]]; then - if command -v nproc > /dev/null 2>&1; then - ROCKSDB_JOBS="$( nproc )" - else - ROCKSDB_JOBS=1 - fi -fi - -if [[ -n "$ROCKSDB_JOBS" && ( "$ROCKSDB_JOBS" =~ [^[:digit:]] || $ROCKSDB_JOBS -lt '1' ) ]]; then - echo "Illegal jobs count: [$ROCKSDB_JOBS]. Must only contain digits. Must be at least 1." >&2 - exit 1 -fi - -# Usage: trueFalseOrDefault -trueFalseOrDefault () { - local k v d - k="$1" - v="${!1}" - d="$2" - if [[ -n "$v" ]]; then - if [[ "$v" =~ ^[tT]([rR][uU][eE])?$ ]]; then - printf 'true' - elif [[ "$v" =~ ^[fF]([aA][lL][sS][eE])?$ ]]; then - printf 'false' - else - echo "Illegal $k value: '$v'. Must be either 'true' or 'false'." >&2 - printf '%s' "$v" - return 1 - fi - else - printf '%s' "$d" - fi - return 0 -} - -ROCKSDB_SUDO="$( trueFalseOrDefault ROCKSDB_SUDO "$can_sudo" )" || exit $? -ROCKSDB_WITH_SHARED="$( trueFalseOrDefault ROCKSDB_WITH_SHARED true )" || exit $? -ROCKSDB_WITH_STATIC="$( trueFalseOrDefault ROCKSDB_WITH_STATIC false )" || exit $? -ROCKSDB_DO_CLEANUP="$( trueFalseOrDefault ROCKSDB_DO_CLEANUP true )" || exit $? -ROCKSDB_DO_BUILD="$( trueFalseOrDefault ROCKSDB_DO_BUILD true )" || exit $? -ROCKSDB_DO_INSTALL="$( trueFalseOrDefault ROCKSDB_DO_INSTALL true )" || exit $? - -# The github action runners need sudo when installing libraries. Brew sometimes does (even though it complains). -# In some situations, though, the sudo program isn't availble. If you've got sudo, this'll default to using it. -# You'll need sudo if the install command fails due to permissions (might manifest as a file does not exist error). -SUDO='' -if [[ "$ROCKSDB_SUDO" == 'true' ]]; then - SUDO='sudo' -fi - -BUILD_TARGETS=() -INSTALL_TARGETS=() -if [[ "$ROCKSDB_WITH_SHARED" == 'true' ]]; then - BUILD_TARGETS+=( shared_lib ) - INSTALL_TARGETS+=( install-shared ) -fi -if [[ "$ROCKSDB_WITH_STATIC" == 'true' ]]; then - BUILD_TARGETS+=( static_lib ) - INSTALL_TARGETS+=( install-static ) -fi - -set -ex - -# These lines look dumb, but they're here so that the values are clearly in the output (because of set -x). -ROCKSDB_VERSION="$ROCKSDB_VERSION" -ROCKSDB_JOBS="$ROCKSDB_JOBS" -ROCKSDB_WITH_SHARED="$ROCKSDB_WITH_SHARED" -ROCKSDB_WITH_STATIC="$ROCKSDB_WITH_STATIC" -ROCKSDB_SUDO="$ROCKSDB_SUDO" -ROCKSDB_DO_CLEANUP="$ROCKSDB_DO_CLEANUP" -ROCKSDB_DO_BUILD="$ROCKSDB_DO_BUILD" -ROCKSDB_DO_INSTALL="$ROCKSDB_DO_INSTALL" -TAR_FILE="rocksdb-${ROCKSDB_VERSION}.tar.gz" - -if [[ ! -e "$TAR_FILE" ]]; then - wget "https://github.com/facebook/rocksdb/archive/refs/tags/v${ROCKSDB_VERSION}.tar.gz" -O "$TAR_FILE" - tar zxf "$TAR_FILE" -fi -TAR_DIR="$( tar --exclude='./*/*/*' -tf "$TAR_FILE" | head -n 1 )" -cd "$TAR_DIR" -export DEBUG_LEVEL=0 -[[ "$ROCKSDB_DO_BUILD" == 'true' && "${#BUILD_TARGETS[@]}" > '0' ]] && make -j${ROCKSDB_JOBS} "${BUILD_TARGETS[@]}" -[[ "$ROCKSDB_DO_INSTALL" == 'true' && "${#INSTALL_TARGETS[@]}" > '0' ]] && $SUDO make "${INSTALL_TARGETS[@]}" -cd .. -if [[ "$ROCKSDB_DO_CLEANUP" == 'true' ]]; then - rm "$TAR_FILE" - rm -rf "$TAR_DIR" -fi diff --git a/scripts/run-sims-with-all-dbs.sh b/scripts/run-sims-with-all-dbs.sh index 9ac4529172..cfb0a5babe 100755 --- a/scripts/run-sims-with-all-dbs.sh +++ b/scripts/run-sims-with-all-dbs.sh @@ -1,9 +1,9 @@ #!/usr/bin/env bash # This script will run some sim tests: simple, import-export, multi-seed-short, nondeterminism -# using each of the db backends: goleveldb, cleveldb, rocksdb, badgerdb. +# using each of the db backends: goleveldb default_sims='simple import-export multi-seed-short nondeterminism' -default_db_types='goleveldb cleveldb rocksdb badgerdb' +default_db_types='goleveldb' default_output_dir='build/sim-times' if [[ "$#" -ne '0' ]]; then diff --git a/sims.mk b/sims.mk index 5112b54e40..18294feaeb 100644 --- a/sims.mk +++ b/sims.mk @@ -8,7 +8,7 @@ ### ### Environment Variables: ### GO: The command to use to execute go. Default: go -### DB_BACKEND: Dictates which db backend to use: goleveldb, cleveldb, rocksdb, badgerdb. +### DB_BACKEND: Dictates which db backend to use: goleveldb. ### The test-sim-nondeterminism is hard-coded to use memdb though. ### BINDIR: The Go bin directory, defaults to $GOPATH/bin ### SIM_GENESIS: Defines the path to the custom genesis file used by @@ -22,14 +22,8 @@ GO ?= go BINDIR ?= $(GOPATH)/bin SIMAPP = ./app DB_BACKEND ?= goleveldb -ifeq ($(DB_BACKEND),cleveldb) - db_tag = cleveldb -else ifeq ($(DB_BACKEND),rocksdb) - db_tag = rocksdb -else ifeq ($(DB_BACKEND),badgerdb) - db_tag = badgerdb -else ifneq ($(DB_BACKEND),goleveldb) - $(error unknown DB_BACKEND value [$(DB_BACKEND)]. Must be one of goleveldb, cleveldb, rocksdb, badgerdb) +ifneq ($(DB_BACKEND),goleveldb) + $(error unknown DB_BACKEND value [$(DB_BACKEND)]. Must be goleveldb) endif # We have to use a hack to provide -tags with the runsim stuff, but it only allows us to provide one tag.