diff --git a/.github/workflows/lava.yml b/.github/workflows/lava.yml index 6ee6658571..ec591595b7 100644 --- a/.github/workflows/lava.yml +++ b/.github/workflows/lava.yml @@ -16,69 +16,69 @@ jobs: test-consensus: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v4 - - uses: actions/setup-go@v5 - with: - go-version-file: go.mod - cache-dependency-path: go.sum - - name: cp lava - run: cp -r ~/work/lava/lava ~/go/lava - - name: export PATH - run: export PATH=$PATH:/go:/go/bin:$(go env GOPATH)/bin:/usr/local:`pwd`:/home/runner/work/lava/lava/ - - name: export GOPATH - run: export GOPATH=$GOPATH:$(go env GOPATH):/go:/go/lava:/usr/local:`pwd` - - name: export LAVA - run: export LAVA=/home/runner/work/lava/lava - - ###################################################### - ### Run Consensus unitests - ###################################################### - - name: Lava Unit Tests - run: | - go install github.com/jstemmer/go-junit-report/v2@latest - go test -v ./utils/... | go-junit-report -iocopy -set-exit-code -out utils-report.xml - go test -v ./common/... | go-junit-report -iocopy -set-exit-code -out common-report.xml - go test -v ./x/... | go-junit-report -iocopy -set-exit-code -out x-report.xml - - - name: Upload Test Results - if: always() - uses: actions/upload-artifact@v4 - with: - name: Test Results (Consensus) - path: "*-report.xml" + - uses: actions/checkout@v4 + - uses: actions/setup-go@v5 + with: + go-version-file: go.mod + cache-dependency-path: go.sum + - name: cp lava + run: cp -r ~/work/lava/lava ~/go/lava + - name: export PATH + run: export PATH=$PATH:/go:/go/bin:$(go env GOPATH)/bin:/usr/local:`pwd`:/home/runner/work/lava/lava/ + - name: export GOPATH + run: export GOPATH=$GOPATH:$(go env GOPATH):/go:/go/lava:/usr/local:`pwd` + - name: export LAVA + run: export LAVA=/home/runner/work/lava/lava + + ###################################################### + ### Run Consensus unitests + ###################################################### + - name: Lava Unit Tests + run: | + go install github.com/jstemmer/go-junit-report/v2@latest + go test -v ./utils/... | go-junit-report -iocopy -set-exit-code -out utils-report.xml + go test -v ./common/... | go-junit-report -iocopy -set-exit-code -out common-report.xml + go test -v ./x/... | go-junit-report -iocopy -set-exit-code -out x-report.xml + + - name: Upload Test Results + if: always() + uses: actions/upload-artifact@v4 + with: + name: Test Results (Consensus) + path: "*-report.xml" test-protocol: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v4 - - uses: actions/setup-go@v5 - with: - go-version-file: go.mod - cache-dependency-path: go.sum - - name: cp lava - run: cp -r ~/work/lava/lava ~/go/lava - - name: export GOPATH - run: export GOPATH=$GOPATH:$(go env GOPATH):/go:/go/lava:/usr/local:`pwd` - - name: export PATH - run: export PATH=$PATH:/go:/go/bin:$(go env GOPATH)/bin:/usr/local:`pwd`:/home/runner/work/lava/lava/:$GOPATH/bin - - name: export LAVA - run: export LAVA=/home/runner/work/lava/lava - - ###################################################### - ### Run protocol unitests - ###################################################### - - name: Run Lava Protocol Tests - run: | - go install github.com/jstemmer/go-junit-report/v2@latest - go test -v ./protocol/... | go-junit-report -iocopy -set-exit-code -out protocol-report.xml - go test -v ./ecosystem/cache/... | go-junit-report -iocopy -set-exit-code -out cache-report.xml - - - name: Upload Test Results - if: always() - uses: actions/upload-artifact@v4 - with: - name: Test Results (Protocol) - path: "*-report.xml" + - uses: actions/checkout@v4 + - uses: actions/setup-go@v5 + with: + go-version-file: go.mod + cache-dependency-path: go.sum + - name: cp lava + run: cp -r ~/work/lava/lava ~/go/lava + - name: export GOPATH + run: export GOPATH=$GOPATH:$(go env GOPATH):/go:/go/lava:/usr/local:`pwd` + - name: export PATH + run: export PATH=$PATH:/go:/go/bin:$(go env GOPATH)/bin:/usr/local:`pwd`:/home/runner/work/lava/lava/:$GOPATH/bin + - name: export LAVA + run: export LAVA=/home/runner/work/lava/lava + + ###################################################### + ### Run protocol unitests + ###################################################### + - name: Run Lava Protocol Tests + run: | + go install github.com/jstemmer/go-junit-report/v2@latest + go test -v ./protocol/... | go-junit-report -iocopy -set-exit-code -out protocol-report.xml + go test -v ./ecosystem/cache/... | go-junit-report -iocopy -set-exit-code -out cache-report.xml + + - name: Upload Test Results + if: always() + uses: actions/upload-artifact@v4 + with: + name: Test Results (Protocol) + path: "*-report.xml" test-protocol-e2e: runs-on: ubuntu-latest @@ -96,7 +96,7 @@ jobs: run: | go install github.com/jstemmer/go-junit-report/v2@latest go test ./testutil/e2e/ -run ^TestLavaProtocol$ -v -timeout 1200s | go-junit-report -iocopy -set-exit-code -out protocol-e2e-report.xml # 20mins - + - name: Upload Test Results if: always() uses: actions/upload-artifact@v4 @@ -107,7 +107,7 @@ jobs: - name: tail -n 1000 Lavad Logs if: always() run: tail -n 1000 testutil/e2e/protocolLogs/00_StartLava.log - + - name: Print all warnings and errors from lavad continue-on-error: true if: always() @@ -179,7 +179,7 @@ jobs: if: always() continue-on-error: true run: grep "" testutil/e2e/protocolLogs/06_RPCConsumer* --include="*errors*" - + - name: Upload Protocol E2E Logs if: always() uses: actions/upload-artifact@v4 @@ -187,7 +187,7 @@ jobs: name: Protocol E2E Logs path: "testutil/e2e/protocolLogs/*" - # Temporarly disabled due to a bug in the signature. + # Temporarly disabled due to a bug in the signature. # test-sdk-e2e: # runs-on: ubuntu-latest # steps: @@ -297,7 +297,7 @@ jobs: # if: always() # continue-on-error: true # run: grep "" testutil/e2e/sdkLogs/01_sdkTest* --include="*errors*" - + # - name: Upload SDK E2E Logs # if: always() # uses: actions/upload-artifact@v4 @@ -305,6 +305,47 @@ jobs: # name: SDK E2E Logs # path: "testutil/e2e/sdkLogs/*" + # This part came from lava_sdk_tests.yml that was removed. just not to lose functionality it moved here. + # name: Lava SDK Tests + + # on: + # pull_request + + # jobs: + # main: + # runs-on: ubuntu-latest + # steps: + # - name: Checkout code + # uses: actions/checkout@v4 + + # - name: Cache dependencies + # uses: actions/cache@v4 + # with: + # path: ~/.yarn + # key: yarn-${{ hashFiles('yarn.lock') }} + # restore-keys: yarn- + + # - uses: actions/setup-go@v5 + # with: + # go-version-file: go.mod + # cache-dependency-path: go.sum + + # - uses: actions/setup-node@v4 + # with: + # node-version: "21.2.0" + + # - name: Init the SDK + # run: GOPATH=~/go ./scripts/init_sdk.sh -s + # working-directory: ./ecosystem/lava-sdk + + # - name: ESLint + # run: ./node_modules/.bin/eslint '**/*.ts' + # working-directory: ./ecosystem/lava-sdk + + # - name: Test + # run: ./node_modules/.bin/jest ./src --ci + # working-directory: ./ecosystem/lava-sdk + test-payment-e2e: runs-on: ubuntu-latest steps: @@ -321,7 +362,7 @@ jobs: run: | go install github.com/jstemmer/go-junit-report/v2@latest go test ./testutil/e2e/ -run ^TestLavaProtocolPayment$ -v -timeout 1200s | go-junit-report -iocopy -set-exit-code -out payment-e2e-report.xml # 20mins - + - name: Upload Test Results if: always() uses: actions/upload-artifact@v4 @@ -359,8 +400,8 @@ jobs: if: always() continue-on-error: true run: grep "" testutil/e2e/protocolLogs/06_RPCConsumer* --include="*errors*" - - - name: Upload Payment E2E Logs + + - name: Upload Payment E2E Logs if: always() uses: actions/upload-artifact@v4 with: @@ -369,8 +410,7 @@ jobs: report-tests-results: runs-on: ubuntu-latest - needs: - [ + needs: [ test-consensus, test-protocol, test-protocol-e2e, @@ -406,13 +446,13 @@ jobs: - name: Set up QEMU uses: docker/setup-qemu-action@v3 - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v3 + uses: docker/setup-buildx-action@v3 - name: Log into registry ${{ env.REGISTRY }} uses: docker/login-action@v3 with: registry: ${{ env.REGISTRY }} username: ${{ github.actor }} - password: ${{ secrets.GITHUB_TOKEN }} + password: ${{ secrets.GITHUB_TOKEN }} - name: Extract metadata id: meta uses: docker/metadata-action@v5 @@ -468,9 +508,9 @@ jobs: - name: Build ${{ matrix.binary }} run: | GOWRK=off go build -o out/${{ matrix.binary }} cmd/${{ matrix.binary }}/main.go - + - name: Upload Lava Artifacts uses: actions/upload-artifact@v4 with: name: ${{ matrix.binary }}-${{ matrix.targetos }}-${{ matrix.arch }} - path: out/${{ matrix.binary }} \ No newline at end of file + path: out/${{ matrix.binary }} diff --git a/.github/workflows/lava_sdk_tests.yml b/.github/workflows/lava_sdk_tests.yml deleted file mode 100644 index a13c83c348..0000000000 --- a/.github/workflows/lava_sdk_tests.yml +++ /dev/null @@ -1,38 +0,0 @@ -name: Lava SDK Tests - -on: - pull_request - -jobs: - main: - runs-on: ubuntu-latest - steps: - - name: Checkout code - uses: actions/checkout@v4 - - - name: Cache dependencies - uses: actions/cache@v4 - with: - path: ~/.yarn - key: yarn-${{ hashFiles('yarn.lock') }} - restore-keys: yarn- - - - uses: actions/setup-go@v5 - with: - go-version: "1.20.5" - - - uses: actions/setup-node@v4 - with: - node-version: "21.2.0" - - - name: Init the SDK - run: GOPATH=~/go ./scripts/init_sdk.sh -s - working-directory: ./ecosystem/lava-sdk - - - name: ESLint - run: ./node_modules/.bin/eslint '**/*.ts' - working-directory: ./ecosystem/lava-sdk - - - name: Test - run: ./node_modules/.bin/jest ./src --ci - working-directory: ./ecosystem/lava-sdk diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index 9de92e14ca..38499c275b 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -18,7 +18,8 @@ jobs: - name: Install Go uses: actions/setup-go@v5 with: - go-version: 1.20.5 + go-version-file: go.mod + cache-dependency-path: go.sum - name: Lint uses: golangci/golangci-lint-action@v4 diff --git a/.golangci.yml b/.golangci.yml index e362fef5d2..c89230b521 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -2,21 +2,13 @@ run: tests: true # timeout for analysis, e.g. 30s, 5m, default is 1m timeout: 7m - skip-files: - - "protocol/chainlib/chainproxy/rpcInterfaceMessages/grpcMessage.go" - - "protocol/chainlib/grpc.go" - - "protocol/chainlib/grpcproxy/dyncodec/remote_grpc_reflection.go" - - "protocol/chainlib/grpcproxy/dyncodec/remote_relayer.go" - - "protocol/chainlib/grpcproxy/dyncodec/remotes_test.go" - - "ecosystem/lavajs/*" - - "ecosystem/lava-sdk/*" linters: disable-all: true #for list of linters and what they do: https://golangci-lint.run/usage/linters/ enable: - dogsled - - exportloopref + - copyloopvar - gocritic - gofumpt - gosimple @@ -41,6 +33,14 @@ linters: - whitespace issues: + exclude-files: + - "protocol/chainlib/chainproxy/rpcInterfaceMessages/grpcMessage.go" + - "protocol/chainlib/grpc.go" + - "protocol/chainlib/grpcproxy/dyncodec/remote_grpc_reflection.go" + - "protocol/chainlib/grpcproxy/dyncodec/remote_relayer.go" + - "protocol/chainlib/grpcproxy/dyncodec/remotes_test.go" + - "ecosystem/lavajs/*" + - "ecosystem/lava-sdk/*" exclude-rules: - text: "singleCaseSwitch" linters: @@ -56,7 +56,7 @@ issues: - text: "ST1016:" linters: - stylecheck - - text: "SA1019:.*\"github.com/golang/protobuf/proto\" is deprecated.*" # proto is deprecated, but some places couldn't be removed + - text: 'SA1019:.*"github.com/golang/protobuf/proto" is deprecated.*' # proto is deprecated, but some places couldn't be removed linters: - staticcheck - path: "migrations" @@ -70,9 +70,6 @@ issues: linters-settings: dogsled: max-blank-identifiers: 5 - maligned: - # print struct with more effective memory layout or not, false by default - suggest-new: true nolintlint: allow-unused: false require-explanation: false diff --git a/README.md b/README.md index 5e0794637f..40ca800b2b 100644 --- a/README.md +++ b/README.md @@ -30,7 +30,7 @@ The best way to start working with lava is to use docker, for additional reading ## Contributing -See [CONTRIBUTING.md](./CONTRIBUTING.md) for details on how to contribute. If you want to follow the updates or learn more about the latest design then join our [Discord](https://discord.gg/lavanetxyz). +See [CONTRIBUTING.md](./CONTRIBUTING.md) for details on how to contribute. If you want to follow the updates or learn more about the latest design then join our [Discord](https://discord.com/invite/Tbk5NxTCdA). ## Developing @@ -71,7 +71,7 @@ Or check out the latest [release](https://github.com/lavanet/lava/releases). ### Add `lavad`/`lavap` autocomplete -You can add a useful autocomplete feature to `lavad` & `lavap` with a simple bash [script](https://github.com/lavanet/lava/blob/main/scripts/lava_auto_completion_install.sh). +You can add a useful autocomplete feature to `lavad` & `lavap` with a simple bash [script](https://github.com/lavanet/lava/blob/main/scripts/automation_scripts/lava_auto_completion_install.sh). ## Join Lava @@ -82,5 +82,5 @@ Join Lava's testnet, [read instructions here](https://docs.lavanet.xyz/testnet?u ## Community - [Github Discussions](https://github.com/lavanet/lava/discussions) -- [Discord](https://discord.gg/lavanetxyz) -- [Twitter](https://twitter.com/lavanetxyz) +- [Discord](https://discord.com/invite/Tbk5NxTCdA) +- [X (formerly Twitter)](https://x.com/lavanetxyz) diff --git a/app/app.go b/app/app.go index ee6eba092f..9a86e4e8cf 100644 --- a/app/app.go +++ b/app/app.go @@ -46,7 +46,6 @@ import ( "github.com/cosmos/cosmos-sdk/x/auth" "github.com/cosmos/cosmos-sdk/x/auth/ante" authkeeper "github.com/cosmos/cosmos-sdk/x/auth/keeper" - authsims "github.com/cosmos/cosmos-sdk/x/auth/simulation" authtx "github.com/cosmos/cosmos-sdk/x/auth/tx" authtypes "github.com/cosmos/cosmos-sdk/x/auth/types" "github.com/cosmos/cosmos-sdk/x/auth/vesting" @@ -874,34 +873,7 @@ func New( app.setupUpgradeHandlers() // create the simulation manager and define the order of the modules for deterministic simulations - app.sm = module.NewSimulationManager( - auth.NewAppModule(appCodec, app.AccountKeeper, authsims.RandomGenesisAccounts, app.GetSubspace(authtypes.ModuleName)), - bank.NewAppModule(appCodec, app.BankKeeper, app.AccountKeeper, app.GetSubspace(banktypes.ModuleName)), - capability.NewAppModule(appCodec, *app.CapabilityKeeper, false), - feegrantmodule.NewAppModule(appCodec, app.AccountKeeper, app.BankKeeper, app.FeeGrantKeeper, app.interfaceRegistry), - gov.NewAppModule(appCodec, &app.GovKeeper, app.AccountKeeper, app.BankKeeper, app.GetSubspace(govtypes.ModuleName)), - staking.NewAppModule(appCodec, app.StakingKeeper, app.AccountKeeper, app.BankKeeper, app.GetSubspace(stakingtypes.ModuleName)), - distr.NewAppModule(appCodec, app.DistrKeeper, app.AccountKeeper, app.BankKeeper, app.StakingKeeper, app.GetSubspace(distrtypes.ModuleName)), - slashing.NewAppModule(appCodec, app.SlashingKeeper, app.AccountKeeper, app.BankKeeper, app.StakingKeeper, app.GetSubspace(slashingtypes.ModuleName)), - params.NewAppModule(app.ParamsKeeper), - evidence.NewAppModule(app.EvidenceKeeper), - ibc.NewAppModule(app.IBCKeeper), - groupmodule.NewAppModule(appCodec, app.GroupKeeper, app.AccountKeeper, app.BankKeeper, app.interfaceRegistry), - authzmodule.NewAppModule(appCodec, app.AuthzKeeper, app.AccountKeeper, app.BankKeeper, app.interfaceRegistry), - transferModule, - specModule, - epochstorageModule, - dualstakingModule, - subscriptionModule, - pairingModule, - conflictModule, - projectsModule, - protocolModule, - plansModule, - rewardsModule, - // this line is used by starport scaffolding # stargate/app/appModule - ) - app.sm.RegisterStoreDecoders() + app.sm = module.NewSimulationManager() // initialize stores app.MountKVStores(keys) diff --git a/app/simulation_test.go b/app/simulation_test.go deleted file mode 100644 index 73fb5dc02f..0000000000 --- a/app/simulation_test.go +++ /dev/null @@ -1,92 +0,0 @@ -package app_test - -import ( - "os" - "testing" - "time" - - "github.com/cosmos/cosmos-sdk/testutil/sims" - - tmproto "github.com/cometbft/cometbft/proto/tendermint/types" - tmtypes "github.com/cometbft/cometbft/types" - simulationtypes "github.com/cosmos/cosmos-sdk/types/simulation" - "github.com/cosmos/cosmos-sdk/x/simulation" - "github.com/cosmos/cosmos-sdk/x/simulation/client/cli" - "github.com/cosmos/ibc-go/v7/testing/simapp" - "github.com/lavanet/lava/v4/app" - "github.com/stretchr/testify/require" -) - -func init() { - cli.GetSimulatorFlags() -} - -var defaultConsensusParams = &tmproto.ConsensusParams{ - Block: &tmproto.BlockParams{ - MaxBytes: 200000, - MaxGas: 2000000, - }, - Evidence: &tmproto.EvidenceParams{ - MaxAgeNumBlocks: 302400, - MaxAgeDuration: 504 * time.Hour, // 3 weeks is the max duration - MaxBytes: 10000, - }, - Validator: &tmproto.ValidatorParams{ - PubKeyTypes: []string{ - tmtypes.ABCIPubKeyTypeEd25519, - }, - }, -} - -// BenchmarkSimulation run the chain simulation -// Running using starport command: -// `starport chain simulate -v --numBlocks 200 --blockSize 50` -// Running as go benchmark test: -// `go test -benchmem -run=^$ -bench ^BenchmarkSimulation ./app -NumBlocks=200 -BlockSize 50 -Commit=true -Verbose=true -Enabled=true` -func BenchmarkSimulation(b *testing.B) { - simapp.FlagEnabledValue = true - simapp.FlagCommitValue = true - config, db, dir, logger, _, err := simapp.SetupSimulation("goleveldb-app-sim", "Simulation") - require.NoError(b, err, "simulation setup failed") - - b.Cleanup(func() { - db.Close() - err = os.RemoveAll(dir) - require.NoError(b, err) - }) - - encoding := app.MakeEncodingConfig() - - app := app.New( - logger, - db, - nil, - true, - map[int64]bool{}, - app.DefaultNodeHome, - 0, - encoding, - sims.EmptyAppOptions{}, - ) - - // Run randomized simulations - _, simParams, simErr := simulation.SimulateFromSeed( - b, - os.Stdout, - app.BaseApp, - simapp.AppStateFn(app.AppCodec(), app.SimulationManager()), - simulationtypes.RandomAccounts, - sims.SimulationOperations(app, app.AppCodec(), config), - app.ModuleAccountAddrs(), - config, - app.AppCodec(), - ) - - err = sims.CheckExportSimulation(app, config, simParams) - require.NoError(b, err) - require.NoError(b, simErr) - - if config.Commit { - simapp.PrintStats(db) - } -} diff --git a/config/provider_examples/avalanch_internal_paths_example.yml b/config/provider_examples/avalanch_internal_paths_example.yml index bf69abb6ad..1dbec1ee1a 100644 --- a/config/provider_examples/avalanch_internal_paths_example.yml +++ b/config/provider_examples/avalanch_internal_paths_example.yml @@ -2,10 +2,14 @@ endpoints: - api-interface: jsonrpc chain-id: AVAX - network-address: 127.0.0.1:2221 + network-address: + address: 127.0.0.1:2221 node-urls: - - url: ws://127.0.0.1:3333/C/rpc/ws + - url: ws://127.0.0.1:3333/C/ws internal-path: "/C/rpc" # c chain like specified in the spec + + - url: https://127.0.0.1:3334/C/rpc + internal-path: "/C/rpc" # c/rpc like specified in the spec - url: https://127.0.0.1:3334/C/avax internal-path: "/C/avax" # c/avax like specified in the spec - url: https://127.0.0.1:3335/X diff --git a/config/provider_examples/lava_example_archive_methodroute.yml b/config/provider_examples/lava_example_archive_method_route.yml similarity index 84% rename from config/provider_examples/lava_example_archive_methodroute.yml rename to config/provider_examples/lava_example_archive_method_route.yml index e8cbf3bad9..5e4f76b9dc 100644 --- a/config/provider_examples/lava_example_archive_methodroute.yml +++ b/config/provider_examples/lava_example_archive_method_route.yml @@ -2,14 +2,14 @@ endpoints: - api-interface: tendermintrpc chain-id: LAV1 network-address: - address: "127.0.0.1:2220" + address: "127.0.0.1:2224" node-urls: - url: ws://127.0.0.1:26657/websocket - url: http://127.0.0.1:26657 - url: http://127.0.0.1:26657 addons: - archive - - url: https://trustless-api.com + - url: http://127.0.0.1:4444 methods: - block - block_by_hash @@ -18,7 +18,7 @@ endpoints: - api-interface: grpc chain-id: LAV1 network-address: - address: "127.0.0.1:2220" + address: "127.0.0.1:2224" node-urls: - url: 127.0.0.1:9090 - url: 127.0.0.1:9090 @@ -27,7 +27,7 @@ endpoints: - api-interface: rest chain-id: LAV1 network-address: - address: "127.0.0.1:2220" + address: "127.0.0.1:2224" node-urls: - url: http://127.0.0.1:1317 - url: http://127.0.0.1:1317 diff --git a/config/provider_examples/strk_example.yml b/config/provider_examples/strk_example.yml new file mode 100644 index 0000000000..257bacb6b8 --- /dev/null +++ b/config/provider_examples/strk_example.yml @@ -0,0 +1,21 @@ +endpoints: + - api-interface: jsonrpc + chain-id: STRK + network-address: + address: "127.0.0.1:2220" + node-urls: + - url: /ws + internal-path: "/ws" + - url: /ws/rpc/v0_6 + internal-path: "/ws/rpc/v0_6" + - url: /ws/rpc/v0_7 + internal-path: "/ws/rpc/v0_7" + + - url: + internal-path: "" + - url: /rpc/v0_5 + internal-path: "/rpc/v0_5" + - url: /rpc/v0_6 + internal-path: "/rpc/v0_6" + - url: /rpc/v0_7 + internal-path: "/rpc/v0_7" diff --git a/cookbook/README.md b/cookbook/README.md index 2fa9633235..2fc5f829bf 100644 --- a/cookbook/README.md +++ b/cookbook/README.md @@ -26,7 +26,7 @@ Lava has many specs and participants can add and modify specs using governance p | reliability_threshold | Threshold for VRF to decide when to do a data reliability check (i.e. re-execute query with another provider). Currently set to `268435455` on all specs resulting in a `1/16` ratio.| | data_reliability_enabled | True/False for data reliability on/off for this spec. | | block_distance_for_finalized_data | Blockchains like Ethereum have probabilistic finality, this threshold sets what we expect to be a safe distance from the latest block (In eth it’s 7: i.e. any block bigger in distance than 7 from the latest block we consider final).| -| blocks_in_finalization_proof | Number of finalized blocks the provider keeps (from the chain he provides service for, not always Lava) for data reliability. | +| blocks_in_finalization_proof | Number of finalized blocks the provider keeps (from the chain he provides service for, not always Lava) for data reliability. Normally, this value should be: 1sec / average_block_time | | average_block_time | Average block time on this blockchain, used for estimating time of future blocks. | | allowed_block_lag_for_qos_sync | Lag used to calculate QoS for providers. this should be `(10000 (10 seconds) / average_block_time) AND bigger than 1`, beyond this distance the data is considered stale and irrelevant. | | block_last_updated | The latest block in which the spec was updated. | @@ -65,7 +65,7 @@ Lava has many specs and participants can add and modify specs using governance p | deterministic| True/False. If an API is deterministic (executing the API twice in the same block will have the same result, which means different providers are supposed to get the same result), we can run data reliability checks on it. | | local | True/False. Marks an API that is local to the node (like subscription APIs, which are not relevant to other nodes) | | subscription | True/False. Marks a subscription API. Requires an active connection to a node to get data pushed from a provider. | -| stateful | Requires local storage on the provider’s node. | +| stateful | True for transaction APIs. | | hanging_api | True/False. Marks an API that is dependent on a creation of a new block (so the API hangs until this happens). | ### How to propose a new spec? diff --git a/cookbook/projects/policy_all_chains_with_extension.yml b/cookbook/projects/policy_all_chains_with_extension.yml index 491e9bd047..59c25bcb00 100644 --- a/cookbook/projects/policy_all_chains_with_extension.yml +++ b/cookbook/projects/policy_all_chains_with_extension.yml @@ -110,6 +110,26 @@ Policy: extensions: - "archive" mixed: true + - chain_id: OSMOSIS + requirements: + - collection: + api_interface: "rest" + type: "GET" + extensions: + - "archive" + mixed: true + - collection: + api_interface: "grpc" + type: "" + extensions: + - "archive" + mixed: true + - collection: + api_interface: "tendermintrpc" + type: "" + extensions: + - "archive" + mixed: true - chain_id: COSMOSHUB requirements: - collection: diff --git a/cookbook/specs/avalanche.json b/cookbook/specs/avalanche.json index a7acedc3d0..3633cf938f 100644 --- a/cookbook/specs/avalanche.json +++ b/cookbook/specs/avalanche.json @@ -11,15 +11,15 @@ "data_reliability_enabled": true, "block_distance_for_finalized_data": 4, "blocks_in_finalization_proof": 3, - "average_block_time": 2500, - "allowed_block_lag_for_qos_sync": 4, + "average_block_time": 2000, + "allowed_block_lag_for_qos_sync": 5, "imports": [ "ETH1" ], "shares": 1, "min_stake_provider": { "denom": "ulava", - "amount": "47500000000" + "amount": "50000000" }, "api_collections": [ { @@ -735,12 +735,12 @@ "data_reliability_enabled": true, "block_distance_for_finalized_data": 4, "blocks_in_finalization_proof": 3, - "average_block_time": 2500, - "allowed_block_lag_for_qos_sync": 4, + "average_block_time": 2000, + "allowed_block_lag_for_qos_sync": 5, "shares": 1, "min_stake_provider": { "denom": "ulava", - "amount": "47500000000" + "amount": "50000000" }, "api_collections": [ { diff --git a/cookbook/specs/axelar.json b/cookbook/specs/axelar.json index f3ff6211d4..c0888849ed 100644 --- a/cookbook/specs/axelar.json +++ b/cookbook/specs/axelar.json @@ -805,6 +805,24 @@ "stateful": 0 }, "extra_compute_units": 0 + }, + { + "name": "/axelar/reward/v1beta1/inflation_rate/{validator}", + "block_parsing": { + "parser_arg": [ + "latest" + ], + "parser_func": "DEFAULT" + }, + "compute_units": 10, + "enabled": true, + "category": { + "deterministic": true, + "local": false, + "subscription": false, + "stateful": 0 + }, + "extra_compute_units": 0 } ], "headers": [], @@ -2300,6 +2318,132 @@ "stateful": 0 }, "extra_compute_units": 0 + }, + { + "name": "axelar.nexus.v1beta1.QueryService/Params", + "block_parsing": { + "parser_arg": [ + "latest" + ], + "parser_func": "DEFAULT" + }, + "compute_units": 10, + "enabled": true, + "category": { + "deterministic": true, + "local": false, + "subscription": false, + "stateful": 0 + }, + "extra_compute_units": 0 + }, + { + "name": "axelar.permission.v1beta1.Query/Params", + "block_parsing": { + "parser_arg": [ + "latest" + ], + "parser_func": "DEFAULT" + }, + "compute_units": 10, + "enabled": true, + "category": { + "deterministic": true, + "local": false, + "subscription": false, + "stateful": 0 + }, + "extra_compute_units": 0 + }, + { + "name": "axelar.evm.v1beta1.QueryService/Params", + "block_parsing": { + "parser_arg": [ + "latest" + ], + "parser_func": "DEFAULT" + }, + "compute_units": 10, + "enabled": true, + "category": { + "deterministic": true, + "local": false, + "subscription": false, + "stateful": 0 + }, + "extra_compute_units": 0 + }, + { + "name": "axelar.vote.v1beta1.QueryService/Params", + "block_parsing": { + "parser_arg": [ + "latest" + ], + "parser_func": "DEFAULT" + }, + "compute_units": 10, + "enabled": true, + "category": { + "deterministic": true, + "local": false, + "subscription": false, + "stateful": 0 + }, + "extra_compute_units": 0 + }, + { + "name": "axelar.multisig.v1beta1.QueryService/Params", + "block_parsing": { + "parser_arg": [ + "latest" + ], + "parser_func": "DEFAULT" + }, + "compute_units": 10, + "enabled": true, + "category": { + "deterministic": true, + "local": false, + "subscription": false, + "stateful": 0 + }, + "extra_compute_units": 0 + }, + { + "name": "axelar.tss.v1beta1.QueryService/Params", + "block_parsing": { + "parser_arg": [ + "latest" + ], + "parser_func": "DEFAULT" + }, + "compute_units": 10, + "enabled": true, + "category": { + "deterministic": true, + "local": false, + "subscription": false, + "stateful": 0 + }, + "extra_compute_units": 0 + }, + { + "name": "axelar.snapshot.v1beta1.QueryService/Params", + "block_parsing": { + "parser_arg": [ + "latest" + ], + "parser_func": "DEFAULT" + }, + "compute_units": 10, + "enabled": true, + "category": { + "deterministic": true, + "local": false, + "subscription": false, + "stateful": 0 + }, + "extra_compute_units": 0 } ], "headers": [], diff --git a/cookbook/specs/celestia.json b/cookbook/specs/celestia.json index a4117cd787..e4a1323c4d 100644 --- a/cookbook/specs/celestia.json +++ b/cookbook/specs/celestia.json @@ -14,7 +14,7 @@ "data_reliability_enabled": true, "block_distance_for_finalized_data": 0, "blocks_in_finalization_proof": 1, - "average_block_time": 6500, + "average_block_time": 12000, "allowed_block_lag_for_qos_sync": 2, "shares": 1, "min_stake_provider": { @@ -344,6 +344,15 @@ "inheritance_apis": [], "parse_directives": [], "verifications": [ + { + "name": "minimum-gas-price", + "values": [ + { + "expected_value": "0.002000000000000000utia", + "severity": "Warning" + } + ] + }, { "name": "chain-id", "values": [ @@ -711,6 +720,15 @@ "inheritance_apis": [], "parse_directives": [], "verifications": [ + { + "name": "minimum-gas-price", + "values": [ + { + "expected_value": "0.002000000000000000utia", + "severity": "Warning" + } + ] + }, { "name": "chain-id", "values": [ @@ -1458,7 +1476,7 @@ "data_reliability_enabled": true, "block_distance_for_finalized_data": 0, "blocks_in_finalization_proof": 1, - "average_block_time": 6500, + "average_block_time": 5100, "allowed_block_lag_for_qos_sync": 2, "shares": 1, "min_stake_provider": { @@ -1479,6 +1497,15 @@ "inheritance_apis": [], "parse_directives": [], "verifications": [ + { + "name": "minimum-gas-price", + "values": [ + { + "expected_value": "0.002000000000000000utia", + "severity": "Warning" + } + ] + }, { "name": "chain-id", "values": [ @@ -1502,6 +1529,15 @@ "inheritance_apis": [], "parse_directives": [], "verifications": [ + { + "name": "minimum-gas-price", + "values": [ + { + "expected_value": "0.002000000000000000utia", + "severity": "Warning" + } + ] + }, { "name": "chain-id", "values": [ @@ -1593,6 +1629,15 @@ "inheritance_apis": [], "parse_directives": [], "verifications": [ + { + "name": "minimum-gas-price", + "values": [ + { + "expected_value": "0.002000000000000000utia", + "severity": "Warning" + } + ] + }, { "name": "chain-id", "values": [ @@ -1616,6 +1661,15 @@ "inheritance_apis": [], "parse_directives": [], "verifications": [ + { + "name": "minimum-gas-price", + "values": [ + { + "expected_value": "0.002000000000000000utia", + "severity": "Warning" + } + ] + }, { "name": "chain-id", "values": [ diff --git a/cookbook/specs/celo.json b/cookbook/specs/celo.json index 02fc646a2a..2d57721d9a 100644 --- a/cookbook/specs/celo.json +++ b/cookbook/specs/celo.json @@ -19,7 +19,7 @@ "shares": 1, "min_stake_provider": { "denom": "ulava", - "amount": "47500000000" + "amount": "5000000000" }, "api_collections": [ { @@ -79,12 +79,12 @@ "data_reliability_enabled": true, "block_distance_for_finalized_data": 1, "blocks_in_finalization_proof": 3, - "average_block_time": 5000, - "allowed_block_lag_for_qos_sync": 2, + "average_block_time": 1000, + "allowed_block_lag_for_qos_sync": 10, "shares": 1, "min_stake_provider": { "denom": "ulava", - "amount": "47500000000" + "amount": "5000000000" }, "api_collections": [ { @@ -107,6 +107,18 @@ "expected_value": "0xaef3" } ] + }, + { + "name": "pruning", + "values": [ + { + "latest_distance": 86400 + }, + { + "extension": "archive", + "expected_value": "0x0" + } + ] } ] } diff --git a/cookbook/specs/evmos.json b/cookbook/specs/evmos.json index c0324f3cae..7bc68cfafd 100644 --- a/cookbook/specs/evmos.json +++ b/cookbook/specs/evmos.json @@ -1495,15 +1495,6 @@ "inheritance_apis": [], "parse_directives": [], "verifications": [ - { - "name": "minimum-gas-price", - "values": [ - { - "expected_value": "CiQ1MDAwMDAwMDAwMC4wMDAwMDAwMDAwMDAwMDAwMDBhZXZtb3M", - "severity": "Warning" - } - ] - }, { "name": "chain-id", "values": [ diff --git a/cookbook/specs/fantom.json b/cookbook/specs/fantom.json index 2ce468de09..01470e7bfa 100644 --- a/cookbook/specs/fantom.json +++ b/cookbook/specs/fantom.json @@ -32,17 +32,17 @@ }, "apis": [ { - "name": "ftm_chainId", + "name": "ftm_accounts", "block_parsing": { "parser_arg": [ "latest" ], "parser_func": "DEFAULT" }, - "compute_units": 1, + "compute_units": 10, "enabled": true, "category": { - "deterministic": true, + "deterministic": false, "local": false, "subscription": false, "stateful": 0 @@ -50,25 +50,25 @@ "extra_compute_units": 0 }, { - "name": "ftm_blockNumber", + "name": "ftm_chainId", "block_parsing": { "parser_arg": [ - "" + "latest" ], - "parser_func": "EMPTY" + "parser_func": "DEFAULT" }, - "compute_units": 10, + "compute_units": 1, "enabled": true, "category": { - "deterministic": false, - "local": true, + "deterministic": true, + "local": false, "subscription": false, "stateful": 0 }, "extra_compute_units": 0 }, { - "name": "ftm_subscribe", + "name": "ftm_blockNumber", "block_parsing": { "parser_arg": [ "" @@ -86,35 +86,35 @@ "extra_compute_units": 0 }, { - "name": "ftm_unsubscribe", + "name": "ftm_coinbase", "block_parsing": { "parser_arg": [ - "" + "latest" ], - "parser_func": "EMPTY" + "parser_func": "DEFAULT" }, - "compute_units": 19, + "compute_units": 10, "enabled": true, "category": { "deterministic": false, - "local": true, - "subscription": true, + "local": false, + "subscription": false, "stateful": 0 }, "extra_compute_units": 0 }, { - "name": "ftm_feeHistory", + "name": "ftm_syncing", "block_parsing": { "parser_arg": [ - "1" + "latest" ], - "parser_func": "PARSE_BY_ARG" + "parser_func": "DEFAULT" }, - "compute_units": 19, + "compute_units": 10, "enabled": true, "category": { - "deterministic": true, + "deterministic": false, "local": false, "subscription": false, "stateful": 0 @@ -122,50 +122,50 @@ "extra_compute_units": 0 }, { - "name": "ftm_maxPriorityFeePerGas", + "name": "ftm_subscribe", "block_parsing": { "parser_arg": [ "" ], "parser_func": "EMPTY" }, - "compute_units": 21, + "compute_units": 10, "enabled": true, "category": { - "deterministic": true, - "local": false, + "deterministic": false, + "local": true, "subscription": false, "stateful": 0 }, "extra_compute_units": 0 }, { - "name": "ftm_createAccessList", + "name": "ftm_unsubscribe", "block_parsing": { "parser_arg": [ - "0" + "" ], - "parser_func": "PARSE_BY_ARG" + "parser_func": "EMPTY" }, - "compute_units": 16, + "compute_units": 19, "enabled": true, "category": { - "deterministic": true, - "local": false, - "subscription": false, + "deterministic": false, + "local": true, + "subscription": true, "stateful": 0 }, "extra_compute_units": 0 }, { - "name": "ftm_getTransactionReceipt", + "name": "ftm_feeHistory", "block_parsing": { "parser_arg": [ - "" + "1" ], - "parser_func": "EMPTY" + "parser_func": "PARSE_BY_ARG" }, - "compute_units": 20, + "compute_units": 19, "enabled": true, "category": { "deterministic": true, @@ -176,14 +176,14 @@ "extra_compute_units": 0 }, { - "name": "ftm_getTransactionByBlockHashAndIndex", + "name": "ftm_maxPriorityFeePerGas", "block_parsing": { "parser_arg": [ - "0" + "" ], - "parser_func": "PARSE_BY_ARG" + "parser_func": "EMPTY" }, - "compute_units": 20, + "compute_units": 21, "enabled": true, "category": { "deterministic": true, @@ -194,17 +194,17 @@ "extra_compute_units": 0 }, { - "name": "ftm_getTransactionByBlockNumberAndIndex", + "name": "ftm_newBlockFilter", "block_parsing": { "parser_arg": [ - "1" + "" ], - "parser_func": "PARSE_BY_ARG" + "parser_func": "EMPTY" }, - "compute_units": 19, + "compute_units": 20, "enabled": true, "category": { - "deterministic": true, + "deterministic": false, "local": false, "subscription": false, "stateful": 0 @@ -212,36 +212,35 @@ "extra_compute_units": 0 }, { - "name": "ftm_getBlockByNumber", + "name": "ftm_newFilter", "block_parsing": { "parser_arg": [ "" ], "parser_func": "EMPTY" }, - "compute_units": 75, - "enabled": false, + "compute_units": 20, + "enabled": true, "category": { "deterministic": false, - "local": true, + "local": false, "subscription": false, "stateful": 0 }, "extra_compute_units": 0 }, { - "name": "ftm_getStorageAt", + "name": "ftm_newPendingTransactionFilter", "block_parsing": { "parser_arg": [ - "0", - "toBlock" + "latest" ], - "parser_func": "PARSE_CANONICAL" + "parser_func": "DEFAULT" }, - "compute_units": 75, + "compute_units": 20, "enabled": true, "category": { - "deterministic": true, + "deterministic": false, "local": false, "subscription": false, "stateful": 0 @@ -249,32 +248,32 @@ "extra_compute_units": 0 }, { - "name": "ftm_getTransactionByHash", + "name": "ftm_uninstallFilter", "block_parsing": { "parser_arg": [ - "" + "latest" ], - "parser_func": "EMPTY" + "parser_func": "DEFAULT" }, - "compute_units": 21, + "compute_units": 10, "enabled": true, "category": { - "deterministic": true, - "local": false, + "deterministic": false, + "local": true, "subscription": false, "stateful": 0 }, "extra_compute_units": 0 }, { - "name": "ftm_gasPrice", + "name": "ftm_createAccessList", "block_parsing": { "parser_arg": [ - "2" + "0" ], "parser_func": "PARSE_BY_ARG" }, - "compute_units": 17, + "compute_units": 16, "enabled": true, "category": { "deterministic": true, @@ -285,14 +284,14 @@ "extra_compute_units": 0 }, { - "name": "ftm_getBalance", + "name": "ftm_getTransactionReceipt", "block_parsing": { "parser_arg": [ "" ], "parser_func": "EMPTY" }, - "compute_units": 15, + "compute_units": 20, "enabled": true, "category": { "deterministic": true, @@ -303,14 +302,14 @@ "extra_compute_units": 0 }, { - "name": "ftm_getCode", + "name": "ftm_getTransactionByBlockHashAndIndex", "block_parsing": { "parser_arg": [ "0" ], "parser_func": "PARSE_BY_ARG" }, - "compute_units": 15, + "compute_units": 20, "enabled": true, "category": { "deterministic": true, @@ -321,14 +320,14 @@ "extra_compute_units": 0 }, { - "name": "ftm_sign", + "name": "ftm_getTransactionByBlockNumberAndIndex", "block_parsing": { "parser_arg": [ - "" + "1" ], - "parser_func": "EMPTY" + "parser_func": "PARSE_BY_ARG" }, - "compute_units": 17, + "compute_units": 19, "enabled": true, "category": { "deterministic": true, @@ -339,32 +338,32 @@ "extra_compute_units": 0 }, { - "name": "ftm_signTransaction", + "name": "ftm_getBlockByNumber", "block_parsing": { "parser_arg": [ - "1" + "" ], - "parser_func": "PARSE_BY_ARG" + "parser_func": "EMPTY" }, - "compute_units": 26, - "enabled": true, + "compute_units": 75, + "enabled": false, "category": { - "deterministic": true, - "local": false, + "deterministic": false, + "local": true, "subscription": false, "stateful": 0 }, "extra_compute_units": 0 }, { - "name": "ftm_sendTransaction", + "name": "ftm_getBlockReceipts", "block_parsing": { "parser_arg": [ - "" + "0" ], - "parser_func": "EMPTY" + "parser_func": "PARSE_BY_ARG" }, - "compute_units": 15, + "compute_units": 20, "enabled": true, "category": { "deterministic": true, @@ -375,68 +374,69 @@ "extra_compute_units": 0 }, { - "name": "ftm_getBlockTransactionCountByHash", + "name": "ftm_getStorageAt", "block_parsing": { "parser_arg": [ - "" + "0", + "toBlock" ], - "parser_func": "EMPTY" + "parser_func": "PARSE_CANONICAL" }, - "compute_units": 20, + "compute_units": 75, "enabled": true, "category": { - "deterministic": false, - "local": true, + "deterministic": true, + "local": false, "subscription": false, "stateful": 0 }, "extra_compute_units": 0 }, { - "name": "ftm_getBlockTransactionCountByNumber", + "name": "ftm_getTransactionByHash", "block_parsing": { "parser_arg": [ "" ], "parser_func": "EMPTY" }, - "compute_units": 250, + "compute_units": 21, "enabled": true, "category": { - "deterministic": false, - "local": true, + "deterministic": true, + "local": false, "subscription": false, "stateful": 0 }, "extra_compute_units": 0 }, { - "name": "ftm_getProof", + "name": "ftm_gasPrice", "block_parsing": { "parser_arg": [ - "" + "2" ], - "parser_func": "EMPTY" + "parser_func": "PARSE_BY_ARG" }, - "compute_units": 10, + "compute_units": 17, "enabled": true, "category": { - "deterministic": false, - "local": true, + "deterministic": true, + "local": false, "subscription": false, "stateful": 0 }, "extra_compute_units": 0 }, { - "name": "ftm_getBlockByHash", + "name": "ftm_getBalance", "block_parsing": { "parser_arg": [ "" ], "parser_func": "EMPTY" }, - "compute_units": 10, + "compute_units": 15, "enabled": true, "category": { "deterministic": true, @@ -447,14 +447,14 @@ "extra_compute_units": 0 }, { - "name": "ftm_getTransactionCount", + "name": "ftm_getCode", "block_parsing": { "parser_arg": [ - "" + "0" ], - "parser_func": "EMPTY" + "parser_func": "PARSE_BY_ARG" }, - "compute_units": 10, + "compute_units": 15, "enabled": true, "category": { "deterministic": true, @@ -465,32 +465,32 @@ "extra_compute_units": 0 }, { - "name": "ftm_call", + "name": "ftm_getFilterChanges", "block_parsing": { "parser_arg": [ - "" + "1" ], - "parser_func": "EMPTY" + "parser_func": "PARSE_BY_ARG" }, - "compute_units": 10, + "compute_units": 20, "enabled": true, "category": { "deterministic": false, - "local": true, + "local": false, "subscription": false, "stateful": 0 }, "extra_compute_units": 0 }, { - "name": "ftm_estimateGas", + "name": "ftm_getFilterLogs", "block_parsing": { "parser_arg": [ - "" + "latest" ], - "parser_func": "EMPTY" + "parser_func": "DEFAULT" }, - "compute_units": 87, + "compute_units": 60, "enabled": true, "category": { "deterministic": false, @@ -501,32 +501,32 @@ "extra_compute_units": 0 }, { - "name": "ftm_sendRawTransaction", + "name": "ftm_getLogs", "block_parsing": { "parser_arg": [ - "1" + "latest" ], - "parser_func": "PARSE_BY_ARG" + "parser_func": "DEFAULT" }, - "compute_units": 26, + "compute_units": 60, "enabled": true, "category": { - "deterministic": true, - "local": false, + "deterministic": false, + "local": true, "subscription": false, "stateful": 0 }, "extra_compute_units": 0 }, { - "name": "net_version", + "name": "ftm_getUncleByBlockHashAndIndex", "block_parsing": { "parser_arg": [ "" ], "parser_func": "EMPTY" }, - "compute_units": 10, + "compute_units": 15, "enabled": true, "category": { "deterministic": true, @@ -537,14 +537,14 @@ "extra_compute_units": 0 }, { - "name": "net_listening", + "name": "ftm_getUncleByBlockNumberAndIndex", "block_parsing": { "parser_arg": [ "" ], "parser_func": "EMPTY" }, - "compute_units": 10, + "compute_units": 15, "enabled": true, "category": { "deterministic": true, @@ -555,35 +555,35 @@ "extra_compute_units": 0 }, { - "name": "rpc_modules", + "name": "ftm_getUncleCountByBlockHash", "block_parsing": { "parser_arg": [ "" ], "parser_func": "EMPTY" }, - "compute_units": 1, + "compute_units": 15, "enabled": true, "category": { - "deterministic": false, - "local": true, + "deterministic": true, + "local": false, "subscription": false, "stateful": 0 }, "extra_compute_units": 0 }, { - "name": "eth_accounts", + "name": "ftm_getUncleCountByBlockNumber", "block_parsing": { "parser_arg": [ "" ], "parser_func": "EMPTY" }, - "compute_units": 10, - "enabled": false, + "compute_units": 15, + "enabled": true, "category": { - "deterministic": false, + "deterministic": true, "local": false, "subscription": false, "stateful": 0 @@ -591,17 +591,17 @@ "extra_compute_units": 0 }, { - "name": "eth_coinbase", + "name": "ftm_sign", "block_parsing": { "parser_arg": [ "" ], "parser_func": "EMPTY" }, - "compute_units": 1, - "enabled": false, + "compute_units": 17, + "enabled": true, "category": { - "deterministic": false, + "deterministic": true, "local": false, "subscription": false, "stateful": 0 @@ -609,15 +609,15 @@ "extra_compute_units": 0 }, { - "name": "eth_compileLLL", + "name": "ftm_signTransaction", "block_parsing": { "parser_arg": [ "1" ], "parser_func": "PARSE_BY_ARG" }, - "compute_units": 10, - "enabled": false, + "compute_units": 26, + "enabled": true, "category": { "deterministic": true, "local": false, @@ -627,17 +627,17 @@ "extra_compute_units": 0 }, { - "name": "eth_getCompilers", + "name": "ftm_sendTransaction", "block_parsing": { "parser_arg": [ "" ], "parser_func": "EMPTY" }, - "compute_units": 10, - "enabled": false, + "compute_units": 15, + "enabled": true, "category": { - "deterministic": false, + "deterministic": true, "local": false, "subscription": false, "stateful": 0 @@ -645,87 +645,69 @@ "extra_compute_units": 0 }, { - "name": "eth_getFilterChanges", + "name": "ftm_getBlockTransactionCountByHash", "block_parsing": { "parser_arg": [ - "0" + "" ], - "parser_func": "PARSE_BY_ARG" + "parser_func": "EMPTY" }, "compute_units": 20, - "enabled": false, + "enabled": true, "category": { "deterministic": false, - "local": false, + "local": true, "subscription": false, "stateful": 0 }, "extra_compute_units": 0 }, { - "name": "eth_getUncleByBlockHashAndIndex", + "name": "ftm_getBlockTransactionCountByNumber", "block_parsing": { "parser_arg": [ "" ], "parser_func": "EMPTY" }, - "compute_units": 20, - "enabled": false, - "category": { - "deterministic": true, - "local": false, - "subscription": false, - "stateful": 0 - }, - "extra_compute_units": 0 - }, - { - "name": "eth_getUncleByBlockNumberAndIndex", - "block_parsing": { - "parser_arg": [ - "0" - ], - "parser_func": "PARSE_BY_ARG" - }, - "compute_units": 20, - "enabled": false, + "compute_units": 250, + "enabled": true, "category": { - "deterministic": true, - "local": false, + "deterministic": false, + "local": true, "subscription": false, "stateful": 0 }, "extra_compute_units": 0 }, { - "name": "eth_getUncleCountByBlockHash", + "name": "ftm_getProof", "block_parsing": { "parser_arg": [ - "2" + "" ], - "parser_func": "PARSE_BY_ARG" + "parser_func": "EMPTY" }, - "compute_units": 20, - "enabled": false, + "compute_units": 10, + "enabled": true, "category": { - "deterministic": true, - "local": false, + "deterministic": false, + "local": true, "subscription": false, "stateful": 0 }, "extra_compute_units": 0 }, { - "name": "eth_getUncleCountByBlockNumber", + "name": "ftm_getBlockByHash", "block_parsing": { "parser_arg": [ - "0" + "" ], - "parser_func": "PARSE_BY_ARG" + "parser_func": "EMPTY" }, "compute_units": 10, - "enabled": false, + "enabled": true, "category": { "deterministic": true, "local": false, @@ -735,15 +717,15 @@ "extra_compute_units": 0 }, { - "name": "eth_getWork", + "name": "ftm_getTransactionCount", "block_parsing": { "parser_arg": [ - "1" + "" ], - "parser_func": "PARSE_BY_ARG" + "parser_func": "EMPTY" }, "compute_units": 10, - "enabled": false, + "enabled": true, "category": { "deterministic": true, "local": false, @@ -753,25 +735,25 @@ "extra_compute_units": 0 }, { - "name": "eth_hashrate", + "name": "ftm_call", "block_parsing": { "parser_arg": [ "" ], "parser_func": "EMPTY" }, - "compute_units": 1, - "enabled": false, + "compute_units": 10, + "enabled": true, "category": { "deterministic": false, - "local": false, + "local": true, "subscription": false, "stateful": 0 }, "extra_compute_units": 0 }, { - "name": "eth_mining", + "name": "ftm_hashrate", "block_parsing": { "parser_arg": [ "" @@ -779,44 +761,25 @@ "parser_func": "EMPTY" }, "compute_units": 10, - "enabled": false, - "category": { - "deterministic": false, - "local": false, - "subscription": false, - "stateful": 0 - }, - "extra_compute_units": 0 - }, - { - "name": "eth_newFilter", - "block_parsing": { - "parser_arg": [ - "0", - "toBlock" - ], - "parser_func": "PARSE_CANONICAL" - }, - "compute_units": 20, - "enabled": false, + "enabled": true, "category": { "deterministic": false, - "local": false, + "local": true, "subscription": false, "stateful": 0 }, "extra_compute_units": 0 }, { - "name": "eth_protocolVersion", + "name": "ftm_estimateGas", "block_parsing": { "parser_arg": [ "" ], "parser_func": "EMPTY" }, - "compute_units": 10, - "enabled": false, + "compute_units": 87, + "enabled": true, "category": { "deterministic": false, "local": true, @@ -826,17 +789,17 @@ "extra_compute_units": 0 }, { - "name": "eth_syncing", + "name": "ftm_sendRawTransaction", "block_parsing": { "parser_arg": [ - "" + "1" ], - "parser_func": "EMPTY" + "parser_func": "PARSE_BY_ARG" }, - "compute_units": 10, - "enabled": false, + "compute_units": 26, + "enabled": true, "category": { - "deterministic": false, + "deterministic": true, "local": false, "subscription": false, "stateful": 0 diff --git a/cookbook/specs/fvm.json b/cookbook/specs/fvm.json index 67054cbc79..2df44819d4 100644 --- a/cookbook/specs/fvm.json +++ b/cookbook/specs/fvm.json @@ -5,21 +5,19 @@ "specs": [ { "index": "FVM", - "name": "fvm mainnet", + "name": "filecoin mainnet", "enabled": true, - "imports": [ - "ETH1" - ], + "imports": ["ETH1"], "reliability_threshold": 268435455, "data_reliability_enabled": true, "block_distance_for_finalized_data": 1, "blocks_in_finalization_proof": 3, - "average_block_time": 70000, + "average_block_time": 30000, "allowed_block_lag_for_qos_sync": 2, "shares": 1, "min_stake_provider": { "denom": "ulava", - "amount": "47500000000" + "amount": "5000000000" }, "api_collections": [ { @@ -32,7 +30,7 @@ }, "apis": [ { - "name": "Filecoin.ChainGetBlock", + "name": "Filecoin.AuthVerify", "block_parsing": { "parser_arg": [ "latest" @@ -50,7 +48,7 @@ "extra_compute_units": 0 }, { - "name": "Filecoin.ChainGetBlockMessages", + "name": "Filecoin.ChainBlockstoreInfo", "block_parsing": { "parser_arg": [ "latest" @@ -68,12 +66,12 @@ "extra_compute_units": 0 }, { - "name": "Filecoin.ChainGetGenesis", + "name": "ChainExport", "block_parsing": { "parser_arg": [ - "latest" + "0" ], - "parser_func": "DEFAULT" + "parser_func": "PARSE_BY_ARG" }, "compute_units": 10, "enabled": true, @@ -86,7 +84,7 @@ "extra_compute_units": 0 }, { - "name": "Filecoin.ChainGetMessage", + "name": "Filecoin.ChainGetBlock", "block_parsing": { "parser_arg": [ "latest" @@ -104,7 +102,7 @@ "extra_compute_units": 0 }, { - "name": "Filecoin.ChainGetParentMessages", + "name": "Filecoin.ChainGetBlockMessages", "block_parsing": { "parser_arg": [ "latest" @@ -122,13 +120,8 @@ "extra_compute_units": 0 }, { - "name": "Filecoin.ChainGetParentReceipts", - "block_parsing": { - "parser_arg": [ - "latest" - ], - "parser_func": "DEFAULT" - }, + "name": "Filecoin.ChainGetEvents", + "block_parsing": {}, "compute_units": 10, "enabled": true, "category": { @@ -140,7 +133,7 @@ "extra_compute_units": 0 }, { - "name": "Filecoin.ChainGetPath", + "name": "Filecoin.ChainGetGenesis", "block_parsing": { "parser_arg": [ "latest" @@ -158,7 +151,7 @@ "extra_compute_units": 0 }, { - "name": "Filecoin.ChainGetTipSet", + "name": "Filecoin.ChainGetMessage", "block_parsing": { "parser_arg": [ "latest" @@ -176,12 +169,12 @@ "extra_compute_units": 0 }, { - "name": "Filecoin.ChainGetTipSetByHeight", + "name": "Filecoin.ChainGetMessagesInTipset", "block_parsing": { "parser_arg": [ - "0" + "latest" ], - "parser_func": "PARSE_BY_ARG" + "parser_func": "DEFAULT" }, "compute_units": 10, "enabled": true, @@ -194,7 +187,7 @@ "extra_compute_units": 0 }, { - "name": "Filecoin.ChainHasObj", + "name": "Filecoin.ChainGetNode", "block_parsing": { "parser_arg": [ "latest" @@ -212,7 +205,7 @@ "extra_compute_units": 0 }, { - "name": "Filecoin.ChainHead", + "name": "Filecoin.ChainGetParentMessages", "block_parsing": { "parser_arg": [ "latest" @@ -230,7 +223,7 @@ "extra_compute_units": 0 }, { - "name": "Filecoin.ChainNotify", + "name": "Filecoin.ChainGetParentReceipts", "block_parsing": { "parser_arg": [ "latest" @@ -240,7 +233,7 @@ "compute_units": 10, "enabled": true, "category": { - "deterministic": false, + "deterministic": true, "local": false, "subscription": false, "stateful": 0 @@ -248,7 +241,7 @@ "extra_compute_units": 0 }, { - "name": "Filecoin.ChainReadObj", + "name": "Filecoin.ChainGetPath", "block_parsing": { "parser_arg": [ "latest" @@ -266,7 +259,7 @@ "extra_compute_units": 0 }, { - "name": "Filecoin.ChainStatObj", + "name": "Filecoin.ChainGetTipSet", "block_parsing": { "parser_arg": [ "latest" @@ -284,12 +277,12 @@ "extra_compute_units": 0 }, { - "name": "Filecoin.ChainTipSetWeight", + "name": "Filecoin.ChainGetTipSetAfterHeight", "block_parsing": { "parser_arg": [ - "latest" + "0" ], - "parser_func": "DEFAULT" + "parser_func": "PARSE_BY_ARG" }, "compute_units": 10, "enabled": true, @@ -302,17 +295,17 @@ "extra_compute_units": 0 }, { - "name": "Filecoin.ClientQueryAsk", + "name": "Filecoin.ChainGetTipSetByHeight", "block_parsing": { "parser_arg": [ - "latest" + "0" ], - "parser_func": "DEFAULT" + "parser_func": "PARSE_BY_ARG" }, "compute_units": 10, "enabled": true, "category": { - "deterministic": false, + "deterministic": true, "local": false, "subscription": false, "stateful": 0 @@ -320,7 +313,7 @@ "extra_compute_units": 0 }, { - "name": "Filecoin.GasEstimateFeeCap", + "name": "Filecoin.ChainHasObj", "block_parsing": { "parser_arg": [ "latest" @@ -330,7 +323,7 @@ "compute_units": 10, "enabled": true, "category": { - "deterministic": false, + "deterministic": true, "local": false, "subscription": false, "stateful": 0 @@ -338,7 +331,7 @@ "extra_compute_units": 0 }, { - "name": "Filecoin.GasEstimateGasLimit", + "name": "Filecoin.ChainHead", "block_parsing": { "parser_arg": [ "latest" @@ -348,7 +341,7 @@ "compute_units": 10, "enabled": true, "category": { - "deterministic": false, + "deterministic": true, "local": false, "subscription": false, "stateful": 0 @@ -356,7 +349,7 @@ "extra_compute_units": 0 }, { - "name": "Filecoin.GasEstimateGasPremium", + "name": "Filecoin.ChainNotify", "block_parsing": { "parser_arg": [ "latest" @@ -374,7 +367,7 @@ "extra_compute_units": 0 }, { - "name": "Filecoin.GasEstimateMessageGas", + "name": "Filecoin.ChainReadObj", "block_parsing": { "parser_arg": [ "latest" @@ -384,7 +377,7 @@ "compute_units": 10, "enabled": true, "category": { - "deterministic": false, + "deterministic": true, "local": false, "subscription": false, "stateful": 0 @@ -392,7 +385,7 @@ "extra_compute_units": 0 }, { - "name": "Filecoin.MpoolGetNonce", + "name": "Filecoin.ChainStatObj", "block_parsing": { "parser_arg": [ "latest" @@ -410,7 +403,7 @@ "extra_compute_units": 0 }, { - "name": "Filecoin.MpoolPending", + "name": "Filecoin.ChainTipSetWeight", "block_parsing": { "parser_arg": [ "latest" @@ -420,7 +413,7 @@ "compute_units": 10, "enabled": true, "category": { - "deterministic": false, + "deterministic": true, "local": false, "subscription": false, "stateful": 0 @@ -428,7 +421,7 @@ "extra_compute_units": 0 }, { - "name": "Filecoin.MpoolPush", + "name": "Filecoin.EthAccounts", "block_parsing": { "parser_arg": [ "latest" @@ -438,16 +431,15 @@ "compute_units": 10, "enabled": true, "category": { - "deterministic": false, + "deterministic": true, "local": false, "subscription": false, - "stateful": 1, - "hanging_api": true + "stateful": 0 }, "extra_compute_units": 0 }, { - "name": "Filecoin.MpoolSub", + "name": "Filecoin.EthAddressToFilecoinAddress", "block_parsing": { "parser_arg": [ "latest" @@ -457,16 +449,15 @@ "compute_units": 10, "enabled": true, "category": { - "deterministic": false, + "deterministic": true, "local": false, "subscription": false, - "stateful": 1, - "hanging_api": true + "stateful": 0 }, "extra_compute_units": 0 }, { - "name": "Filecoin.StateAccountKey", + "name": "Filecoin.EthBlockNumber", "block_parsing": { "parser_arg": [ "latest" @@ -484,12 +475,12 @@ "extra_compute_units": 0 }, { - "name": "Filecoin.StateAllMinerFaults", - "block_parsing": { + "name": "Filecoin.EthCall", + "block_parsing": { "parser_arg": [ - "latest" + "1" ], - "parser_func": "DEFAULT" + "parser_func": "PARSE_BY_ARG" }, "compute_units": 10, "enabled": true, @@ -502,7 +493,7 @@ "extra_compute_units": 0 }, { - "name": "Filecoin.StateCall", + "name": "Filecoin.EthChainId", "block_parsing": { "parser_arg": [ "latest" @@ -520,7 +511,7 @@ "extra_compute_units": 0 }, { - "name": "Filecoin.StateChangedActors", + "name": "Filecoin.EthEstimateGas", "block_parsing": { "parser_arg": [ "latest" @@ -538,7 +529,7 @@ "extra_compute_units": 0 }, { - "name": "Filecoin.StateCirculatingSupply", + "name": "Filecoin.EthFeeHistory", "block_parsing": { "parser_arg": [ "latest" @@ -556,7 +547,7 @@ "extra_compute_units": 0 }, { - "name": "Filecoin.StateCompute", + "name": "Filecoin.EthGasPrice", "block_parsing": { "parser_arg": [ "latest" @@ -574,12 +565,12 @@ "extra_compute_units": 0 }, { - "name": "Filecoin.StateDealProviderCollateralBounds", + "name": "Filecoin.EthGetBalance", "block_parsing": { "parser_arg": [ - "latest" + "1" ], - "parser_func": "DEFAULT" + "parser_func": "PARSE_BY_ARG" }, "compute_units": 10, "enabled": true, @@ -592,7 +583,7 @@ "extra_compute_units": 0 }, { - "name": "Filecoin.StateDecodeParams", + "name": "Filecoin.EthGetBlockByHash", "block_parsing": { "parser_arg": [ "latest" @@ -610,12 +601,12 @@ "extra_compute_units": 0 }, { - "name": "Filecoin.StateGetActor", + "name": "Filecoin.EthGetBlockByNumber", "block_parsing": { "parser_arg": [ - "latest" + "0" ], - "parser_func": "DEFAULT" + "parser_func": "PARSE_BY_ARG" }, "compute_units": 10, "enabled": true, @@ -628,12 +619,12 @@ "extra_compute_units": 0 }, { - "name": "Filecoin.StateGetReceipt", + "name": "Filecoin.EthGetBlockReceipts", "block_parsing": { "parser_arg": [ - "latest" + "0" ], - "parser_func": "DEFAULT" + "parser_func": "PARSE_BY_ARG" }, "compute_units": 10, "enabled": true, @@ -646,7 +637,7 @@ "extra_compute_units": 0 }, { - "name": "Filecoin.StateListActors", + "name": "Filecoin.EthGetBlockTransactionCountByHash", "block_parsing": { "parser_arg": [ "latest" @@ -664,12 +655,12 @@ "extra_compute_units": 0 }, { - "name": "Filecoin.StateListMessages", + "name": "Filecoin.EthGetBlockTransactionCountByNumber", "block_parsing": { "parser_arg": [ - "latest" + "0" ], - "parser_func": "DEFAULT" + "parser_func": "PARSE_BY_ARG" }, "compute_units": 10, "enabled": true, @@ -682,12 +673,12 @@ "extra_compute_units": 0 }, { - "name": "Filecoin.StateListMiners", + "name": "Filecoin.EthGetCode", "block_parsing": { "parser_arg": [ - "latest" + "1" ], - "parser_func": "DEFAULT" + "parser_func": "PARSE_BY_ARG" }, "compute_units": 10, "enabled": true, @@ -700,7 +691,7 @@ "extra_compute_units": 0 }, { - "name": "Filecoin.StateLookupID", + "name": "Filecoin.EthGetFilterChanges", "block_parsing": { "parser_arg": [ "latest" @@ -718,7 +709,7 @@ "extra_compute_units": 0 }, { - "name": "Filecoin.StateMarketBalance", + "name": "Filecoin.EthGetFilterLogs", "block_parsing": { "parser_arg": [ "latest" @@ -728,7 +719,7 @@ "compute_units": 10, "enabled": true, "category": { - "deterministic": true, + "deterministic": false, "local": false, "subscription": false, "stateful": 0 @@ -736,17 +727,18 @@ "extra_compute_units": 0 }, { - "name": "Filecoin.StateMarketDeals", + "name": "Filecoin.EthGetLogs", "block_parsing": { "parser_arg": [ - "latest" + "0", + "fromBlock" ], - "parser_func": "DEFAULT" + "parser_func": "PARSE_CANONICAL" }, "compute_units": 10, "enabled": true, "category": { - "deterministic": true, + "deterministic": false, "local": false, "subscription": false, "stateful": 0 @@ -754,7 +746,7 @@ "extra_compute_units": 0 }, { - "name": "Filecoin.StateMarketParticipants", + "name": "Filecoin.EthGetMessageCidByTransactionHash", "block_parsing": { "parser_arg": [ "latest" @@ -770,14 +762,14 @@ "stateful": 0 }, "extra_compute_units": 0 - }, + }, { - "name": "Filecoin.StateMarketStorageDeal", + "name": "Filecoin.EthGetStorageAt", "block_parsing": { "parser_arg": [ - "latest" + "2" ], - "parser_func": "DEFAULT" + "parser_func": "PARSE_BY_ARG" }, "compute_units": 10, "enabled": true, @@ -790,7 +782,7 @@ "extra_compute_units": 0 }, { - "name": "Filecoin.StateMinerActiveSectors", + "name": "Filecoin.EthGetTransactionByBlockHashAndIndex", "block_parsing": { "parser_arg": [ "latest" @@ -808,12 +800,12 @@ "extra_compute_units": 0 }, { - "name": "Filecoin.StateMinerAvailableBalance", + "name": "Filecoin.EthGetTransactionByBlockNumberAndIndex", "block_parsing": { "parser_arg": [ - "latest" + "0" ], - "parser_func": "DEFAULT" + "parser_func": "PARSE_BY_ARG" }, "compute_units": 10, "enabled": true, @@ -826,7 +818,7 @@ "extra_compute_units": 0 }, { - "name": "Filecoin.StateMinerDeadlines", + "name": "Filecoin.EthGetTransactionByHash", "block_parsing": { "parser_arg": [ "latest" @@ -844,7 +836,7 @@ "extra_compute_units": 0 }, { - "name": "Filecoin.StateMinerFaults", + "name": "Filecoin.EthGetTransactionByHashLimited", "block_parsing": { "parser_arg": [ "latest" @@ -862,12 +854,12 @@ "extra_compute_units": 0 }, { - "name": "Filecoin.StateMinerInfo", + "name": "Filecoin.EthGetTransactionCount", "block_parsing": { "parser_arg": [ - "latest" + "1" ], - "parser_func": "DEFAULT" + "parser_func": "PARSE_BY_ARG" }, "compute_units": 10, "enabled": true, @@ -880,7 +872,7 @@ "extra_compute_units": 0 }, { - "name": "Filecoin.StateMinerInitialPledgeCollateral", + "name": "Filecoin.EthGetTransactionHashByCid", "block_parsing": { "parser_arg": [ "latest" @@ -898,7 +890,7 @@ "extra_compute_units": 0 }, { - "name": "Filecoin.StateMinerPartitions", + "name": "Filecoin.EthGetTransactionReceipt", "block_parsing": { "parser_arg": [ "latest" @@ -916,7 +908,7 @@ "extra_compute_units": 0 }, { - "name": "Filecoin.StateMinerPower", + "name": "Filecoin.EthMaxPriorityFeePerGas", "block_parsing": { "parser_arg": [ "latest" @@ -926,7 +918,7 @@ "compute_units": 10, "enabled": true, "category": { - "deterministic": true, + "deterministic": false, "local": false, "subscription": false, "stateful": 0 @@ -934,17 +926,17 @@ "extra_compute_units": 0 }, { - "name": "Filecoin.StateMinerPreCommitDepositForPower", + "name": "Filecoin.EthNewBlockFilter", "block_parsing": { "parser_arg": [ - "latest" + "" ], - "parser_func": "DEFAULT" + "parser_func": "EMPTY" }, - "compute_units": 10, + "compute_units": 20, "enabled": true, "category": { - "deterministic": true, + "deterministic": false, "local": false, "subscription": false, "stateful": 0 @@ -952,17 +944,18 @@ "extra_compute_units": 0 }, { - "name": "Filecoin.StateMinerProvingDeadline", + "name": "Filecoin.EthNewFilter", "block_parsing": { "parser_arg": [ - "latest" + "0", + "fromBlock" ], - "parser_func": "DEFAULT" + "parser_func": "PARSE_CANONICAL" }, - "compute_units": 10, + "compute_units": 20, "enabled": true, "category": { - "deterministic": true, + "deterministic": false, "local": false, "subscription": false, "stateful": 0 @@ -970,17 +963,17 @@ "extra_compute_units": 0 }, { - "name": "Filecoin.StateMinerRecoveries", + "name": "Filecoin.EthNewPendingTransactionFilter", "block_parsing": { "parser_arg": [ "latest" ], "parser_func": "DEFAULT" }, - "compute_units": 10, + "compute_units": 20, "enabled": true, "category": { - "deterministic": true, + "deterministic": false, "local": false, "subscription": false, "stateful": 0 @@ -988,7 +981,7 @@ "extra_compute_units": 0 }, { - "name": "Filecoin.StateMinerSectorAllocated", + "name": "Filecoin.EthProtocolVersion", "block_parsing": { "parser_arg": [ "latest" @@ -1006,7 +999,7 @@ "extra_compute_units": 0 }, { - "name": "Filecoin.StateMinerSectorCount", + "name": "Filecoin.EthSendRawTransaction", "block_parsing": { "parser_arg": [ "latest" @@ -1016,43 +1009,44 @@ "compute_units": 10, "enabled": true, "category": { - "deterministic": true, + "deterministic": false, "local": false, "subscription": false, - "stateful": 0 + "stateful": 1, + "hanging_api": true }, "extra_compute_units": 0 }, { - "name": "Filecoin.StateMinerSectors", + "name": "Filecoin.EthSubscribe", "block_parsing": { "parser_arg": [ "latest" ], "parser_func": "DEFAULT" }, - "compute_units": 10, + "compute_units": 1000, "enabled": true, "category": { - "deterministic": true, - "local": false, - "subscription": false, + "deterministic": false, + "local": true, + "subscription": true, "stateful": 0 }, "extra_compute_units": 0 }, { - "name": "Filecoin.StateNetworkName", + "name": "Filecoin.EthSyncing", "block_parsing": { "parser_arg": [ - "latest" + "" ], - "parser_func": "DEFAULT" + "parser_func": "EMPTY" }, "compute_units": 10, "enabled": true, "category": { - "deterministic": true, + "deterministic": false, "local": false, "subscription": false, "stateful": 0 @@ -1060,43 +1054,44 @@ "extra_compute_units": 0 }, { - "name": "Filecoin.StateNetworkVersion", + "name": "Filecoin.EthTraceBlock", "block_parsing": { "parser_arg": [ - "latest" + "0" ], - "parser_func": "DEFAULT" + "parser_func": "PARSE_BY_ARG" }, "compute_units": 10, "enabled": true, "category": { - "deterministic": true, - "local": false, + "deterministic": false, + "local": true, "subscription": false, "stateful": 0 }, "extra_compute_units": 0 }, { - "name": "Filecoin.StateReadState", + "name": "Filecoin.EthTraceFilter", "block_parsing": { "parser_arg": [ - "latest" + "0", + "fromBlock" ], - "parser_func": "DEFAULT" + "parser_func": "PARSE_CANONICAL" }, "compute_units": 10, "enabled": true, "category": { - "deterministic": true, - "local": false, + "deterministic": false, + "local": true, "subscription": false, "stateful": 0 }, "extra_compute_units": 0 }, { - "name": "Filecoin.StateReplay", + "name": "Filecoin.EthTraceReplayBlockTransactions", "block_parsing": { "parser_arg": [ "latest" @@ -1106,33 +1101,33 @@ "compute_units": 10, "enabled": true, "category": { - "deterministic": true, - "local": false, + "deterministic": false, + "local": true, "subscription": false, "stateful": 0 }, "extra_compute_units": 0 }, { - "name": "Filecoin.StateSearchMsg", + "name": "Filecoin.EthTraceTransaction", "block_parsing": { "parser_arg": [ - "latest" + "0" ], - "parser_func": "DEFAULT" + "parser_func": "PARSE_BY_ARG" }, "compute_units": 10, "enabled": true, "category": { - "deterministic": true, - "local": false, + "deterministic": false, + "local": true, "subscription": false, "stateful": 0 }, "extra_compute_units": 0 }, { - "name": "Filecoin.StateSearchMsgLimited", + "name": "Filecoin.EthUninstallFilter", "block_parsing": { "parser_arg": [ "latest" @@ -1142,15 +1137,15 @@ "compute_units": 10, "enabled": true, "category": { - "deterministic": true, - "local": false, + "deterministic": false, + "local": true, "subscription": false, "stateful": 0 }, "extra_compute_units": 0 }, { - "name": "Filecoin.StateSectorExpiration", + "name": "Filecoin.EthUnsubscribe", "block_parsing": { "parser_arg": [ "latest" @@ -1160,15 +1155,15 @@ "compute_units": 10, "enabled": true, "category": { - "deterministic": true, - "local": false, - "subscription": false, + "deterministic": false, + "local": true, + "subscription": true, "stateful": 0 }, "extra_compute_units": 0 }, { - "name": "Filecoin.StateSectorGetInfo", + "name": "Filecoin.FilecoinAddressToEthAddress", "block_parsing": { "parser_arg": [ "latest" @@ -1186,7 +1181,7 @@ "extra_compute_units": 0 }, { - "name": "Filecoin.StateSectorPartition", + "name": "Filecoin.GetActorEventsRaw", "block_parsing": { "parser_arg": [ "latest" @@ -1196,7 +1191,7 @@ "compute_units": 10, "enabled": true, "category": { - "deterministic": true, + "deterministic": false, "local": false, "subscription": false, "stateful": 0 @@ -1204,12 +1199,966 @@ "extra_compute_units": 0 }, { - "name": "Filecoin.StateSectorPreCommitInfo", + "name": "Filecoin.MinerGetBaseInfo", "block_parsing": { "parser_arg": [ - "latest" + "1" ], - "parser_func": "DEFAULT" + "parser_func": "PARSE_BY_ARG" + }, + "compute_units": 10, + "enabled": true, + "category": { + "deterministic": false, + "local": false, + "subscription": false, + "stateful": 0 + }, + "extra_compute_units": 0 + }, + { + "name": "Filecoin.MpoolCheckMessages", + "block_parsing": { + "parser_arg": [ + "latest" + ], + "parser_func": "DEFAULT" + }, + "compute_units": 10, + "enabled": true, + "category": { + "deterministic": false, + "local": false, + "subscription": false, + "stateful": 0 + }, + "extra_compute_units": 0 + }, + { + "name": "Filecoin.MpoolBatchPush", + "block_parsing": { + "parser_arg": [ + "latest" + ], + "parser_func": "DEFAULT" + }, + "compute_units": 10, + "enabled": true, + "category": { + "deterministic": false, + "local": false, + "subscription": false, + "stateful": 1 + }, + "extra_compute_units": 0 + }, + { + "name": "Filecoin.MpoolBatchPushUntrusted", + "block_parsing": { + "parser_arg": [ + "latest" + ], + "parser_func": "DEFAULT" + }, + "compute_units": 10, + "enabled": true, + "category": { + "deterministic": false, + "local": false, + "subscription": false, + "stateful": 1 + }, + "extra_compute_units": 0 + }, + { + "name": "Filecoin.MpoolCheckPendingMessages", + "block_parsing": { + "parser_arg": [ + "latest" + ], + "parser_func": "DEFAULT" + }, + "compute_units": 10, + "enabled": true, + "category": { + "deterministic": false, + "local": false, + "subscription": false, + "stateful": 0 + }, + "extra_compute_units": 0 + }, + { + "name": "Filecoin.MpoolCheckReplaceMessages", + "block_parsing": { + "parser_arg": [ + "latest" + ], + "parser_func": "DEFAULT" + }, + "compute_units": 10, + "enabled": true, + "category": { + "deterministic": true, + "local": false, + "subscription": false, + "stateful": 0 + }, + "extra_compute_units": 0 + }, + { + "name": "Filecoin.MpoolGetConfig", + "block_parsing": { + "parser_arg": [ + "latest" + ], + "parser_func": "DEFAULT" + }, + "compute_units": 10, + "enabled": true, + "category": { + "deterministic": false, + "local": false, + "subscription": false, + "stateful": 0 + }, + "extra_compute_units": 0 + }, + { + "name": "Filecoin.MpoolGetNonce", + "block_parsing": { + "parser_arg": [ + "latest" + ], + "parser_func": "DEFAULT" + }, + "compute_units": 10, + "enabled": true, + "category": { + "deterministic": false, + "local": false, + "subscription": false, + "stateful": 0 + }, + "extra_compute_units": 0 + }, + { + "name": "Filecoin.MpoolPending", + "block_parsing": { + "parser_arg": [ + "latest" + ], + "parser_func": "DEFAULT" + }, + "compute_units": 10, + "enabled": true, + "category": { + "deterministic": false, + "local": false, + "subscription": false, + "stateful": 0 + }, + "extra_compute_units": 0 + }, + { + "name": "Filecoin.MpoolSelect", + "block_parsing": { + "parser_arg": [ + "latest" + ], + "parser_func": "DEFAULT" + }, + "compute_units": 10, + "enabled": true, + "category": { + "deterministic": false, + "local": false, + "subscription": false, + "stateful": 0 + }, + "extra_compute_units": 0 + }, + { + "name": "Filecoin.MsigGetAvailableBalance", + "block_parsing": { + "parser_arg": [ + "latest" + ], + "parser_func": "DEFAULT" + }, + "compute_units": 10, + "enabled": true, + "category": { + "deterministic": false, + "local": false, + "subscription": false, + "stateful": 0 + }, + "extra_compute_units": 0 + }, + { + "name": "Filecoin.MsigGetPending", + "block_parsing": { + "parser_arg": [ + "latest" + ], + "parser_func": "DEFAULT" + }, + "compute_units": 10, + "enabled": true, + "category": { + "deterministic": false, + "local": false, + "subscription": false, + "stateful": 0 + }, + "extra_compute_units": 0 + }, + { + "name": "Filecoin.MsigGetVested", + "block_parsing": { + "parser_arg": [ + "latest" + ], + "parser_func": "DEFAULT" + }, + "compute_units": 10, + "enabled": true, + "category": { + "deterministic": false, + "local": false, + "subscription": false, + "stateful": 0 + }, + "extra_compute_units": 0 + }, + { + "name": "Filecoin.MsigGetVestingSchedule", + "block_parsing": { + "parser_arg": [ + "latest" + ], + "parser_func": "DEFAULT" + }, + "compute_units": 10, + "enabled": true, + "category": { + "deterministic": false, + "local": false, + "subscription": false, + "stateful": 0 + }, + "extra_compute_units": 0 + }, + { + "name": "Filecoin.NetListening", + "block_parsing": { + "parser_arg": [ + "latest" + ], + "parser_func": "DEFAULT" + }, + "compute_units": 10, + "enabled": true, + "category": { + "deterministic": true, + "local": false, + "subscription": false, + "stateful": 0 + }, + "extra_compute_units": 0 + }, + { + "name": "Filecoin.NetVersion", + "block_parsing": { + "parser_arg": [ + "latest" + ], + "parser_func": "DEFAULT" + }, + "compute_units": 10, + "enabled": true, + "category": { + "deterministic": true, + "local": true, + "subscription": false, + "stateful": 0 + }, + "extra_compute_units": 0 + }, + { + "name": "Filecoin.StateAccountKey", + "block_parsing": { + "parser_arg": [ + "latest" + ], + "parser_func": "DEFAULT" + }, + "compute_units": 10, + "enabled": true, + "category": { + "deterministic": true, + "local": false, + "subscription": false, + "stateful": 0 + }, + "extra_compute_units": 0 + }, + { + "name": "Filecoin.StateActorCodeCIDs", + "block_parsing": { + "parser_arg": [ + "latest" + ], + "parser_func": "DEFAULT" + }, + "compute_units": 10, + "enabled": true, + "category": { + "deterministic": true, + "local": false, + "subscription": false, + "stateful": 0 + }, + "extra_compute_units": 0 + }, + { + "name": "Filecoin.StateActorManifestCID", + "block_parsing": { + "parser_arg": [ + "latest" + ], + "parser_func": "DEFAULT" + }, + "compute_units": 10, + "enabled": true, + "category": { + "deterministic": true, + "local": false, + "subscription": false, + "stateful": 0 + }, + "extra_compute_units": 0 + }, + { + "name": "Filecoin.StateAllMinerFaults", + "block_parsing": { + "parser_arg": [ + "0" + ], + "parser_func": "PARSE_BY_ARG" + }, + "compute_units": 10, + "enabled": true, + "category": { + "deterministic": true, + "local": false, + "subscription": false, + "stateful": 0 + }, + "extra_compute_units": 0 + }, + { + "name": "Filecoin.StateCall", + "block_parsing": { + "parser_arg": [ + "latest" + ], + "parser_func": "DEFAULT" + }, + "compute_units": 10, + "enabled": true, + "category": { + "deterministic": true, + "local": false, + "subscription": false, + "stateful": 0 + }, + "extra_compute_units": 0 + }, + { + "name": "Filecoin.StateChangedActors", + "block_parsing": { + "parser_arg": [ + "latest" + ], + "parser_func": "DEFAULT" + }, + "compute_units": 10, + "enabled": true, + "category": { + "deterministic": true, + "local": false, + "subscription": false, + "stateful": 0 + }, + "extra_compute_units": 0 + }, + { + "name": "Filecoin.StateCirculatingSupply", + "block_parsing": { + "parser_arg": [ + "latest" + ], + "parser_func": "DEFAULT" + }, + "compute_units": 10, + "enabled": true, + "category": { + "deterministic": true, + "local": false, + "subscription": false, + "stateful": 0 + }, + "extra_compute_units": 0 + }, + { + "name": "Filecoin.StateCompute", + "block_parsing": { + "parser_arg": [ + "0" + ], + "parser_func": "PARSE_BY_ARG" + }, + "compute_units": 10, + "enabled": true, + "category": { + "deterministic": true, + "local": false, + "subscription": false, + "stateful": 0 + }, + "extra_compute_units": 0 + }, + { + "name": "Filecoin.StateComputeDataCID", + "block_parsing": { + "parser_arg": [ + "latest" + ], + "parser_func": "DEFAULT" + }, + "compute_units": 10, + "enabled": true, + "category": { + "deterministic": true, + "local": false, + "subscription": false, + "stateful": 0 + }, + "extra_compute_units": 0 + }, + { + "name": "Filecoin.StateDealProviderCollateralBounds", + "block_parsing": { + "parser_arg": [ + "latest" + ], + "parser_func": "DEFAULT" + }, + "compute_units": 10, + "enabled": true, + "category": { + "deterministic": true, + "local": false, + "subscription": false, + "stateful": 0 + }, + "extra_compute_units": 0 + }, + { + "name": "Filecoin.StateDecodeParams", + "block_parsing": { + "parser_arg": [ + "latest" + ], + "parser_func": "DEFAULT" + }, + "compute_units": 10, + "enabled": true, + "category": { + "deterministic": true, + "local": false, + "subscription": false, + "stateful": 0 + }, + "extra_compute_units": 0 + }, + { + "name": "Filecoin.StateEncodeParams", + "block_parsing": { + "parser_arg": [ + "latest" + ], + "parser_func": "DEFAULT" + }, + "compute_units": 10, + "enabled": true, + "category": { + "deterministic": true, + "local": false, + "subscription": false, + "stateful": 0 + }, + "extra_compute_units": 0 + }, + { + "name": "Filecoin.StateGetActor", + "block_parsing": { + "parser_arg": [ + "latest" + ], + "parser_func": "DEFAULT" + }, + "compute_units": 10, + "enabled": true, + "category": { + "deterministic": true, + "local": false, + "subscription": false, + "stateful": 0 + }, + "extra_compute_units": 0 + }, + { + "name": "Filecoin.StateGetAllAllocation", + "block_parsing": { + "parser_arg": [ + "latest" + ], + "parser_func": "DEFAULT" + }, + "compute_units": 10, + "enabled": true, + "category": { + "deterministic": false, + "local": false, + "subscription": false, + "stateful": 0 + }, + "extra_compute_units": 0 + }, + { + "name": "Filecoin.StateGetAllClaims", + "block_parsing": { + "parser_arg": [ + "latest" + ], + "parser_func": "DEFAULT" + }, + "compute_units": 10, + "enabled": true, + "category": { + "deterministic": false, + "local": false, + "subscription": false, + "stateful": 0 + }, + "extra_compute_units": 0 + }, + { + "name": "Filecoin.StateGetAllocation", + "block_parsing": { + "parser_arg": [ + "latest" + ], + "parser_func": "DEFAULT" + }, + "compute_units": 10, + "enabled": true, + "category": { + "deterministic": false, + "local": false, + "subscription": false, + "stateful": 0 + }, + "extra_compute_units": 0 + }, + { + "name": "Filecoin.StateGetAllocationForPendingDeal", + "block_parsing": { + "parser_arg": [ + "latest" + ], + "parser_func": "DEFAULT" + }, + "compute_units": 10, + "enabled": true, + "category": { + "deterministic": false, + "local": false, + "subscription": false, + "stateful": 0 + }, + "extra_compute_units": 0 + }, + { + "name": "Filecoin.StateGetAllocations", + "block_parsing": { + "parser_arg": [ + "latest" + ], + "parser_func": "DEFAULT" + }, + "compute_units": 10, + "enabled": true, + "category": { + "deterministic": false, + "local": false, + "subscription": false, + "stateful": 0 + }, + "extra_compute_units": 0 + }, + { + "name": "Filecoin.StateGetBeaconEntry", + "block_parsing": { + "parser_arg": [ + "0" + ], + "parser_func": "PARSE_BY_ARG" + }, + "compute_units": 10, + "enabled": true, + "category": { + "deterministic": false, + "local": false, + "subscription": false, + "stateful": 0 + }, + "extra_compute_units": 0 + }, + { + "name": "Filecoin.StateGetClaim", + "block_parsing": { + "parser_arg": [ + "latest" + ], + "parser_func": "DEFAULT" + }, + "compute_units": 10, + "enabled": true, + "category": { + "deterministic": false, + "local": false, + "subscription": false, + "stateful": 0 + }, + "extra_compute_units": 0 + }, + { + "name": "Filecoin.StateGetClaims", + "block_parsing": { + "parser_arg": [ + "latest" + ], + "parser_func": "DEFAULT" + }, + "compute_units": 10, + "enabled": true, + "category": { + "deterministic": false, + "local": false, + "subscription": false, + "stateful": 0 + }, + "extra_compute_units": 0 + }, + { + "name": "Filecoin.StateGetNetworkParams", + "block_parsing": { + "parser_arg": [ + "latest" + ], + "parser_func": "DEFAULT" + }, + "compute_units": 10, + "enabled": true, + "category": { + "deterministic": false, + "local": false, + "subscription": false, + "stateful": 0 + }, + "extra_compute_units": 0 + }, + { + "name": "Filecoin.StateGetRandomnessDigestFromBeacon", + "block_parsing": { + "parser_arg": [ + "0" + ], + "parser_func": "PARSE_BY_ARG" + }, + "compute_units": 10, + "enabled": true, + "category": { + "deterministic": false, + "local": false, + "subscription": false, + "stateful": 0 + }, + "extra_compute_units": 0 + }, + { + "name": "Filecoin.StateGetRandomnessDigestFromTickets", + "block_parsing": { + "parser_arg": [ + "0" + ], + "parser_func": "PARSE_BY_ARG" + }, + "compute_units": 10, + "enabled": true, + "category": { + "deterministic": false, + "local": false, + "subscription": false, + "stateful": 0 + }, + "extra_compute_units": 0 + }, + { + "name": "Filecoin.StateGetRandomnessFromBeacon", + "block_parsing": { + "parser_arg": [ + "1" + ], + "parser_func": "PARSE_BY_ARG" + }, + "compute_units": 10, + "enabled": true, + "category": { + "deterministic": false, + "local": false, + "subscription": false, + "stateful": 0 + }, + "extra_compute_units": 0 + }, + { + "name": "Filecoin.StateGetRandomnessFromTickets", + "block_parsing": { + "parser_arg": [ + "1" + ], + "parser_func": "PARSE_BY_ARG" + }, + "compute_units": 10, + "enabled": true, + "category": { + "deterministic": false, + "local": false, + "subscription": false, + "stateful": 0 + }, + "extra_compute_units": 0 + }, + { + "name": "Filecoin.StateListActors", + "block_parsing": { + "parser_arg": [ + "latest" + ], + "parser_func": "DEFAULT" + }, + "compute_units": 10, + "enabled": true, + "category": { + "deterministic": true, + "local": false, + "subscription": false, + "stateful": 0 + }, + "extra_compute_units": 0 + }, + { + "name": "Filecoin.StateListMessages", + "block_parsing": { + "parser_arg": [ + "2" + ], + "parser_func": "PARSE_BY_ARG" + }, + "compute_units": 10, + "enabled": true, + "category": { + "deterministic": true, + "local": false, + "subscription": false, + "stateful": 0 + }, + "extra_compute_units": 0 + }, + { + "name": "Filecoin.StateListMiners", + "block_parsing": { + "parser_arg": [ + "latest" + ], + "parser_func": "DEFAULT" + }, + "compute_units": 10, + "enabled": true, + "category": { + "deterministic": true, + "local": false, + "subscription": false, + "stateful": 0 + }, + "extra_compute_units": 0 + }, + { + "name": "Filecoin.StateLookupID", + "block_parsing": { + "parser_arg": [ + "latest" + ], + "parser_func": "DEFAULT" + }, + "compute_units": 10, + "enabled": true, + "category": { + "deterministic": true, + "local": false, + "subscription": false, + "stateful": 0 + }, + "extra_compute_units": 0 + }, + { + "name": "Filecoin.StateLookupRobustAddress", + "block_parsing": { + "parser_arg": [ + "latest" + ], + "parser_func": "DEFAULT" + }, + "compute_units": 10, + "enabled": true, + "category": { + "deterministic": true, + "local": false, + "subscription": false, + "stateful": 0 + }, + "extra_compute_units": 0 + }, + { + "name": "Filecoin.StateMarketBalance", + "block_parsing": { + "parser_arg": [ + "latest" + ], + "parser_func": "DEFAULT" + }, + "compute_units": 10, + "enabled": true, + "category": { + "deterministic": true, + "local": false, + "subscription": false, + "stateful": 0 + }, + "extra_compute_units": 0 + }, + { + "name": "Filecoin.StateMarketDeals", + "block_parsing": { + "parser_arg": [ + "latest" + ], + "parser_func": "DEFAULT" + }, + "compute_units": 10, + "enabled": true, + "category": { + "deterministic": true, + "local": false, + "subscription": false, + "stateful": 0 + }, + "extra_compute_units": 0 + }, + { + "name": "Filecoin.StateMarketParticipants", + "block_parsing": { + "parser_arg": [ + "latest" + ], + "parser_func": "DEFAULT" + }, + "compute_units": 10, + "enabled": true, + "category": { + "deterministic": true, + "local": false, + "subscription": false, + "stateful": 0 + }, + "extra_compute_units": 0 + }, + { + "name": "Filecoin.StateMarketStorageDeal", + "block_parsing": { + "parser_arg": [ + "latest" + ], + "parser_func": "DEFAULT" + }, + "compute_units": 10, + "enabled": true, + "category": { + "deterministic": true, + "local": false, + "subscription": false, + "stateful": 0 + }, + "extra_compute_units": 0 + }, + { + "name": "Filecoin.StateMinerActiveSectors", + "block_parsing": { + "parser_arg": [ + "latest" + ], + "parser_func": "DEFAULT" + }, + "compute_units": 10, + "enabled": true, + "category": { + "deterministic": true, + "local": false, + "subscription": false, + "stateful": 0 + }, + "extra_compute_units": 0 + }, + { + "name": "Filecoin.StateMinerAllocated", + "block_parsing": { + "parser_arg": [ + "latest" + ], + "parser_func": "DEFAULT" + }, + "compute_units": 10, + "enabled": true, + "category": { + "deterministic": true, + "local": false, + "subscription": false, + "stateful": 0 + }, + "extra_compute_units": 0 + }, + { + "name": "Filecoin.StateMinerAvailableBalance", + "block_parsing": { + "parser_arg": [ + "latest" + ], + "parser_func": "DEFAULT" }, "compute_units": 10, "enabled": true, @@ -1222,7 +2171,7 @@ "extra_compute_units": 0 }, { - "name": "Filecoin.StateVMCirculatingSupplyInternal", + "name": "Filecoin.StateMinerDeadlines", "block_parsing": { "parser_arg": [ "latest" @@ -1240,7 +2189,7 @@ "extra_compute_units": 0 }, { - "name": "Filecoin.StateVerifiedClientStatus", + "name": "Filecoin.StateMinerFaults", "block_parsing": { "parser_arg": [ "latest" @@ -1258,7 +2207,7 @@ "extra_compute_units": 0 }, { - "name": "Filecoin.StateVerifiedRegistryRootKey", + "name": "Filecoin.StateMinerInfo", "block_parsing": { "parser_arg": [ "latest" @@ -1276,7 +2225,7 @@ "extra_compute_units": 0 }, { - "name": "Filecoin.StateVerifierStatus", + "name": "Filecoin.StateMinerPartitions", "block_parsing": { "parser_arg": [ "latest" @@ -1294,7 +2243,7 @@ "extra_compute_units": 0 }, { - "name": "Filecoin.StateWaitMsg", + "name": "Filecoin.StateMinerPledgeCollateral", "block_parsing": { "parser_arg": [ "latest" @@ -1312,7 +2261,25 @@ "extra_compute_units": 0 }, { - "name": "Filecoin.StateWaitMsgLimited", + "name": "Filecoin.StateMinerPledgeForSector", + "block_parsing": { + "parser_arg": [ + "0" + ], + "parser_func": "PARSE_BY_ARG" + }, + "compute_units": 10, + "enabled": true, + "category": { + "deterministic": true, + "local": false, + "subscription": false, + "stateful": 0 + }, + "extra_compute_units": 0 + }, + { + "name": "Filecoin.StateMinerPower", "block_parsing": { "parser_arg": [ "latest" @@ -1330,7 +2297,7 @@ "extra_compute_units": 0 }, { - "name": "Filecoin.SyncState", + "name": "Filecoin.StateMinerPreCommitDepositForPower", "block_parsing": { "parser_arg": [ "latest" @@ -1340,7 +2307,7 @@ "compute_units": 10, "enabled": true, "category": { - "deterministic": false, + "deterministic": true, "local": false, "subscription": false, "stateful": 0 @@ -1348,7 +2315,7 @@ "extra_compute_units": 0 }, { - "name": "Filecoin.WalletBalance", + "name": "Filecoin.StateMinerProvingDeadline", "block_parsing": { "parser_arg": [ "latest" @@ -1366,7 +2333,7 @@ "extra_compute_units": 0 }, { - "name": "Filecoin.WalletValidateAddress", + "name": "Filecoin.StateMinerRecoveries", "block_parsing": { "parser_arg": [ "latest" @@ -1384,7 +2351,7 @@ "extra_compute_units": 0 }, { - "name": "Filecoin.WalletVerify", + "name": "Filecoin.StateMinerSectorAllocated", "block_parsing": { "parser_arg": [ "latest" @@ -1402,7 +2369,7 @@ "extra_compute_units": 0 }, { - "name": "Filecoin.EthAccounts", + "name": "Filecoin.StateMinerSectorCount", "block_parsing": { "parser_arg": [ "latest" @@ -1420,7 +2387,7 @@ "extra_compute_units": 0 }, { - "name": "Filecoin.EthBlockNumber", + "name": "Filecoin.StateMinerSectors", "block_parsing": { "parser_arg": [ "latest" @@ -1438,12 +2405,12 @@ "extra_compute_units": 0 }, { - "name": "Filecoin.EthCall", + "name": "Filecoin.StateNetworkName", "block_parsing": { "parser_arg": [ - "1" + "latest" ], - "parser_func": "PARSE_BY_ARG" + "parser_func": "DEFAULT" }, "compute_units": 10, "enabled": true, @@ -1456,7 +2423,7 @@ "extra_compute_units": 0 }, { - "name": "Filecoin.EthChainId", + "name": "Filecoin.StateNetworkVersion", "block_parsing": { "parser_arg": [ "latest" @@ -1474,7 +2441,7 @@ "extra_compute_units": 0 }, { - "name": "Filecoin.EthEstimateGas", + "name": "Filecoin.StateReadState", "block_parsing": { "parser_arg": [ "latest" @@ -1492,7 +2459,7 @@ "extra_compute_units": 0 }, { - "name": "Filecoin.EthFeeHistory", + "name": "Filecoin.StateReplay", "block_parsing": { "parser_arg": [ "latest" @@ -1510,7 +2477,25 @@ "extra_compute_units": 0 }, { - "name": "Filecoin.EthGasPrice", + "name": "Filecoin.StateSearchMsg", + "block_parsing": { + "parser_arg": [ + "2" + ], + "parser_func": "PARSE_BY_ARG" + }, + "compute_units": 10, + "enabled": true, + "category": { + "deterministic": true, + "local": false, + "subscription": false, + "stateful": 0 + }, + "extra_compute_units": 0 + }, + { + "name": "Filecoin.StateSectorExpiration", "block_parsing": { "parser_arg": [ "latest" @@ -1528,12 +2513,12 @@ "extra_compute_units": 0 }, { - "name": "Filecoin.EthGetBalance", + "name": "Filecoin.StateSectorGetInfo", "block_parsing": { "parser_arg": [ - "1" + "latest" ], - "parser_func": "PARSE_BY_ARG" + "parser_func": "DEFAULT" }, "compute_units": 10, "enabled": true, @@ -1546,7 +2531,7 @@ "extra_compute_units": 0 }, { - "name": "Filecoin.EthGetBlockByHash", + "name": "Filecoin.StateSectorPartition", "block_parsing": { "parser_arg": [ "latest" @@ -1564,12 +2549,12 @@ "extra_compute_units": 0 }, { - "name": "Filecoin.EthGetBlockByNumber", + "name": "Filecoin.StatePreCommitInfo", "block_parsing": { "parser_arg": [ - "0" + "latest" ], - "parser_func": "PARSE_BY_ARG" + "parser_func": "DEFAULT" }, "compute_units": 10, "enabled": true, @@ -1582,7 +2567,7 @@ "extra_compute_units": 0 }, { - "name": "Filecoin.EthGetBlockTransactionCountByHash", + "name": "Filecoin.StateVMCirculatingSupplyInternal", "block_parsing": { "parser_arg": [ "latest" @@ -1600,12 +2585,12 @@ "extra_compute_units": 0 }, { - "name": "Filecoin.EthGetBlockTransactionCountByNumber", + "name": "Filecoin.StateVerifiedClientStatus", "block_parsing": { "parser_arg": [ - "0" + "latest" ], - "parser_func": "PARSE_BY_ARG" + "parser_func": "DEFAULT" }, "compute_units": 10, "enabled": true, @@ -1618,12 +2603,12 @@ "extra_compute_units": 0 }, { - "name": "Filecoin.EthGetCode", + "name": "Filecoin.StateVerifiedRegistryRootKey", "block_parsing": { "parser_arg": [ - "1" + "latest" ], - "parser_func": "PARSE_BY_ARG" + "parser_func": "DEFAULT" }, "compute_units": 10, "enabled": true, @@ -1636,12 +2621,12 @@ "extra_compute_units": 0 }, { - "name": "Filecoin.EthGetFilterChanges", + "name": "Filecoin.StateVerifierStatus", "block_parsing": { "parser_arg": [ - "1" + "latest" ], - "parser_func": "PARSE_BY_ARG" + "parser_func": "DEFAULT" }, "compute_units": 10, "enabled": true, @@ -1654,7 +2639,7 @@ "extra_compute_units": 0 }, { - "name": "Filecoin.EthGetFilterLogs", + "name": "Filecoin.StateWaitMsg", "block_parsing": { "parser_arg": [ "latest" @@ -1664,7 +2649,7 @@ "compute_units": 10, "enabled": true, "category": { - "deterministic": false, + "deterministic": true, "local": false, "subscription": false, "stateful": 0 @@ -1672,13 +2657,12 @@ "extra_compute_units": 0 }, { - "name": "Filecoin.EthGetLogs", + "name": "Filecoin.Version", "block_parsing": { "parser_arg": [ - "0", - "toBlock" + "latest" ], - "parser_func": "PARSE_CANONICAL" + "parser_func": "DEFAULT" }, "compute_units": 10, "enabled": true, @@ -1691,7 +2675,7 @@ "extra_compute_units": 0 }, { - "name": "Filecoin.EthGetMessageCidByTransactionHash", + "name": "Filecoin.WalletBalance", "block_parsing": { "parser_arg": [ "latest" @@ -1709,12 +2693,12 @@ "extra_compute_units": 0 }, { - "name": "Filecoin.EthGetStorageAt", + "name": "Filecoin.WalletValidateAddress", "block_parsing": { "parser_arg": [ - "2" + "latest" ], - "parser_func": "PARSE_BY_ARG" + "parser_func": "DEFAULT" }, "compute_units": 10, "enabled": true, @@ -1727,7 +2711,7 @@ "extra_compute_units": 0 }, { - "name": "Filecoin.EthGetTransactionByHash", + "name": "Filecoin.WalletVerify", "block_parsing": { "parser_arg": [ "latest" @@ -1745,25 +2729,25 @@ "extra_compute_units": 0 }, { - "name": "Filecoin.EthGetTransactionCount", + "name": "Filecoin.Web3ClientVersion", "block_parsing": { "parser_arg": [ - "1" + "latest" ], - "parser_func": "PARSE_BY_ARG" + "parser_func": "DEFAULT" }, "compute_units": 10, "enabled": true, "category": { "deterministic": true, - "local": false, + "local": true, "subscription": false, "stateful": 0 }, "extra_compute_units": 0 }, { - "name": "Filecoin.EthGetTransactionHashByCid", + "name": "Filecoin.GasEstimateFeeCap", "block_parsing": { "parser_arg": [ "latest" @@ -1774,14 +2758,14 @@ "enabled": true, "category": { "deterministic": true, - "local": false, + "local": true, "subscription": false, "stateful": 0 }, "extra_compute_units": 0 }, { - "name": "Filecoin.EthGetTransactionReceipt", + "name": "Filecoin.GasEstimateGasLimit", "block_parsing": { "parser_arg": [ "latest" @@ -1792,14 +2776,14 @@ "enabled": true, "category": { "deterministic": true, - "local": false, + "local": true, "subscription": false, "stateful": 0 }, "extra_compute_units": 0 }, { - "name": "Filecoin.EthMaxPriorityFeePerGas", + "name": "Filecoin.GasEstimateGasPremium", "block_parsing": { "parser_arg": [ "latest" @@ -1809,8 +2793,26 @@ "compute_units": 10, "enabled": true, "category": { - "deterministic": false, - "local": false, + "deterministic": true, + "local": true, + "subscription": false, + "stateful": 0 + }, + "extra_compute_units": 0 + }, + { + "name": "Filecoin.GasEstimateMessageGas", + "block_parsing": { + "parser_arg": [ + "latest" + ], + "parser_func": "DEFAULT" + }, + "compute_units": 10, + "enabled": true, + "category": { + "deterministic": true, + "local": true, "subscription": false, "stateful": 0 }, @@ -1831,21 +2833,165 @@ }, { "name": "pruning", + "parse_directive": { + "function_tag": "GET_BLOCK_BY_NUM" + }, "values": [ { - "expected_value": "" + "latest_distance": 2880 }, { "extension": "archive", - "expected_value": "" + "expected_value": "1" } ] } + ], + "extensions": [ + { + "name": "archive", + "cu_multiplier": 5, + "rule": { + "block": 2840 + } + } + ] + }, + { + "enabled": true, + "collection_data": { + "api_interface": "jsonrpc", + "internal_path": "/rpc/v0", + "type": "POST", + "add_on": "" + }, + "apis": [], + "inheritance_apis": [ + { + "api_interface": "jsonrpc", + "internal_path": "", + "type": "POST", + "add_on": "" + } + ], + "parse_directives": [], + "verifications": [] + }, + { + "enabled": true, + "collection_data": { + "api_interface": "jsonrpc", + "internal_path": "/rpc/v1", + "type": "POST", + "add_on": "" + }, + "apis": [], + "inheritance_apis": [ + { + "api_interface": "jsonrpc", + "internal_path": "", + "type": "POST", + "add_on": "" + } + ], + "parse_directives": [], + "verifications": [] + } + ], + "contributor": [ + "lava@16gjdwqfpvk3dyasy83wsr26pk27kjq9wvfz0qy" + ], + "contributor_percentage": "0.025" + }, + { + "index": "FVMT", + "name": "filecoin testnet", + "enabled": true, + "imports": [ + "FVM" + ], + "reliability_threshold": 268435455, + "data_reliability_enabled": true, + "block_distance_for_finalized_data": 1, + "blocks_in_finalization_proof": 3, + "average_block_time": 30000, + "allowed_block_lag_for_qos_sync": 2, + "shares": 1, + "min_stake_provider": { + "denom": "ulava", + "amount": "5000000000" + }, + "api_collections": [ + { + "enabled": true, + "collection_data": { + "api_interface": "jsonrpc", + "internal_path": "", + "type": "POST", + "add_on": "" + }, + "apis": [], + "parse_directives": [], + "verifications": [ + { + "name": "chain-id", + "values": [ + { + "expected_value": "0x4cb2f" + } + ] + } + ] + }, + { + "enabled": true, + "collection_data": { + "api_interface": "jsonrpc", + "internal_path": "/rpc/v0", + "type": "POST", + "add_on": "" + }, + "apis": [], + "parse_directives": [], + "verifications": [ + { + "name": "chain-id", + "values": [ + { + "expected_value": "0x4cb2f" + } + ] + } + ] + }, + { + "enabled": true, + "collection_data": { + "api_interface": "jsonrpc", + "internal_path": "/rpc/v1", + "type": "POST", + "add_on": "" + }, + "apis": [], + "parse_directives": [], + "verifications": [ + { + "name": "chain-id", + "values": [ + { + "expected_value": "0x4cb2f" + } + ] + } ] } - ] + ], + "contributor": [ + "lava@16gjdwqfpvk3dyasy83wsr26pk27kjq9wvfz0qy" + ], + "contributor_percentage": "0.025" } ] }, - "deposit": "10000000ulava" + "deposit": "1750000000ulava" } \ No newline at end of file diff --git a/cookbook/specs/movement.json b/cookbook/specs/movement.json index aa7d3d94a9..21264f09b0 100644 --- a/cookbook/specs/movement.json +++ b/cookbook/specs/movement.json @@ -13,13 +13,13 @@ "reliability_threshold": 268435455, "data_reliability_enabled": true, "block_distance_for_finalized_data": 0, - "blocks_in_finalization_proof": 5, - "average_block_time": 200, + "blocks_in_finalization_proof": 1, + "average_block_time": 1000, "allowed_block_lag_for_qos_sync": 50, "shares": 1, "min_stake_provider": { "denom": "ulava", - "amount": "47500000000" + "amount": "5000000000" }, "api_collections": [ { @@ -30,16 +30,39 @@ "type": "GET", "add_on": "" }, - "apis": [], - "headers": [], + "apis": [ + { + "name": "/transactions/wait_by_hash/{txn_hash}", + "block_parsing": { + "parser_arg": [ + "latest" + ], + "parser_func": "DEFAULT" + }, + "compute_units": 10, + "enabled": true, + "category": { + "deterministic": true, + "local": false, + "subscription": false, + "stateful": 0 + }, + "extra_compute_units": 0 + } + ], + "headers": [ + { + "name": "x-aptos-gas-used", + "kind": "pass_reply" + } + ], "inheritance_apis": [], - "parse_directives": [], "verifications": [ { "name": "chain-id", "values": [ { - "expected_value": "4" + "expected_value": "*" } ] }, @@ -47,11 +70,11 @@ "name": "pruning", "values": [ { - "latest_distance": 432000 + "latest_distance": 2400 }, { - "extension": "archive", - "expected_value": "0" + "extension": "archive", + "latest_distance": 0 } ] } @@ -61,27 +84,86 @@ "name": "archive", "cu_multiplier": 5, "rule": { - "block": 427500 + "block": 2200 } } ] - }, + } + ], + "contributor": [ + "lava@1w3qs6ksgzy66rjljmsjnetlqpvvhl9qkvntf0a" + ], + "contributor_percentage": "0.025" + }, + { + "index": "MOVEMENTT", + "name": "movement testnet bardock", + "enabled": true, + "imports": [ + "MOVEMENT" + ], + "reliability_threshold": 268435455, + "data_reliability_enabled": true, + "block_distance_for_finalized_data": 0, + "blocks_in_finalization_proof": 1, + "average_block_time": 1000, + "allowed_block_lag_for_qos_sync": 30, + "shares": 1, + "min_stake_provider": { + "denom": "ulava", + "amount": "5000000000" + }, + "api_collections": [ { "enabled": true, "collection_data": { "api_interface": "rest", "internal_path": "", - "type": "POST", + "type": "GET", "add_on": "" }, "apis": [], - "headers": [], "inheritance_apis": [], - "parse_directives": [] - } - ] + "parse_directives": [], + "verifications": [ + { + "name": "chain-id", + "values": [ + { + "expected_value": "250" + } + ] + }, + { + "name": "pruning", + "values": [ + { + "latest_distance": 2400 + }, + { + "extension": "archive", + "latest_distance": 0 + } + ] + } + ], + "extensions": [ + { + "name": "archive", + "cu_multiplier": 5, + "rule": { + "block": 2200 + } + } + ] + } + ], + "contributor": [ + "lava@1w3qs6ksgzy66rjljmsjnetlqpvvhl9qkvntf0a" + ], + "contributor_percentage": "0.025" } ] }, - "deposit": "10000000ulava" + "deposit": "1750000000ulava" } \ No newline at end of file diff --git a/cookbook/specs/near.json b/cookbook/specs/near.json index 037335089c..e0c46c2cf1 100644 --- a/cookbook/specs/near.json +++ b/cookbook/specs/near.json @@ -95,6 +95,10 @@ "rule": "=final || =optimistic", "parse_type": "DEFAULT_VALUE" }, + { + "parse_path": ".params.block_id", + "parse_type": "BLOCK_HASH" + }, { "parse_path": ".params.[0]", "parse_type": "BLOCK_HASH" @@ -141,6 +145,10 @@ }, "extra_compute_units": 0, "parsers": [ + { + "parse_path": ".params.chunk_id", + "parse_type": "BLOCK_HASH" + }, { "parse_path": ".params.[0]", "parse_type": "BLOCK_HASH" @@ -339,7 +347,17 @@ "hanging_api": true }, "extra_compute_units": 0, - "timeout_ms": 10000 + "timeout_ms": 10000, + "parsers": [ + { + "parse_path": ".params.tx_hash", + "parse_type": "BLOCK_HASH" + }, + { + "parse_path": ".params.[0]", + "parse_type": "BLOCK_HASH" + } + ] }, { "name": "EXPERIMENTAL_tx_status", @@ -357,7 +375,17 @@ "subscription": false, "stateful": 0 }, - "extra_compute_units": 0 + "extra_compute_units": 0, + "parsers": [ + { + "parse_path": ".params.tx_hash", + "parse_type": "BLOCK_HASH" + }, + { + "parse_path": ".params.[0]", + "parse_type": "BLOCK_HASH" + } + ] }, { "name": "EXPERIMENTAL_receipt", @@ -430,6 +458,32 @@ } ], "verifications": [ + { + "name": "tracking-shards", + "parse_directive": { + "function_template": "{\"jsonrpc\":\"2.0\",\"id\":\"dontcare\",\"method\":\"query\",\"params\":{\"request_type\":\"view_account\",\"finality\":\"final\",\"account_id\":\"floor.is.lava\"}}", + "function_tag": "VERIFICATION", + "parsers": [ + { + "parse_path": ".error.cause.name", + "value": "UNKNOWN_ACCOUNT", + "parse_type": "RESULT" + }, + { + "parse_path": ".result.amount", + "value": "*", + "parse_type": "RESULT" + } + ], + "api_name": "query" + }, + "values": [ + { + "expected_value": "*", + "severity": "Warning" + } + ] + }, { "name": "chain-id", "parse_directive": { diff --git a/cookbook/specs/solana.json b/cookbook/specs/solana.json old mode 100644 new mode 100755 index e14203dd39..07d746c62f --- a/cookbook/specs/solana.json +++ b/cookbook/specs/solana.json @@ -479,7 +479,7 @@ "extra_compute_units": 0 }, { - "name": "getMinimumBalanceForRentExemption", + "name": "getMaxShredInsertSlot", "block_parsing": { "parser_arg": [ "latest" @@ -497,7 +497,7 @@ "extra_compute_units": 0 }, { - "name": "getMultipleAccounts", + "name": "getMinimumBalanceForRentExemption", "block_parsing": { "parser_arg": [ "latest" @@ -515,7 +515,7 @@ "extra_compute_units": 0 }, { - "name": "getProgramAccounts", + "name": "getMultipleAccounts", "block_parsing": { "parser_arg": [ "latest" @@ -533,7 +533,7 @@ "extra_compute_units": 0 }, { - "name": "getRecentPerformanceSamples", + "name": "getProgramAccounts", "block_parsing": { "parser_arg": [ "latest" @@ -551,7 +551,7 @@ "extra_compute_units": 0 }, { - "name": "getRecentPrioritizationFees", + "name": "getRecentPerformanceSamples", "block_parsing": { "parser_arg": [ "latest" @@ -569,7 +569,7 @@ "extra_compute_units": 0 }, { - "name": "getSignaturesForAddress", + "name": "getRecentPrioritizationFees", "block_parsing": { "parser_arg": [ "latest" @@ -587,7 +587,7 @@ "extra_compute_units": 0 }, { - "name": "getSignatureStatuses", + "name": "getSignaturesForAddress", "block_parsing": { "parser_arg": [ "latest" @@ -605,7 +605,7 @@ "extra_compute_units": 0 }, { - "name": "getSlot", + "name": "getSignatureStatuses", "block_parsing": { "parser_arg": [ "latest" @@ -623,7 +623,7 @@ "extra_compute_units": 0 }, { - "name": "getSlotLeader", + "name": "getSlot", "block_parsing": { "parser_arg": [ "latest" @@ -641,7 +641,7 @@ "extra_compute_units": 0 }, { - "name": "getSlotLeaders", + "name": "getSlotLeader", "block_parsing": { "parser_arg": [ "latest" @@ -659,7 +659,7 @@ "extra_compute_units": 0 }, { - "name": "getStakeActivation", + "name": "getSlotLeaders", "block_parsing": { "parser_arg": [ "latest" @@ -964,183 +964,411 @@ "stateful": 0 }, "extra_compute_units": 0 + } + ], + "headers": [], + "inheritance_apis": [], + "parse_directives": [ + { + "function_template": "{\"jsonrpc\":\"2.0\",\"method\":\"getLatestBlockhash\",\"params\":[{\"commitment\":\"finalized\"}],\"id\":1}", + "function_tag": "GET_BLOCKNUM", + "result_parsing": { + "parser_arg": [ + "0", + "context", + "slot" + ], + "parser_func": "PARSE_CANONICAL" + }, + "api_name": "getLatestBlockhash" + }, + { + "function_tag": "GET_BLOCK_BY_NUM", + "function_template": "{\"jsonrpc\":\"2.0\",\"method\":\"getBlock\",\"params\":[%d,{\"transactionDetails\":\"none\",\"rewards\":false}],\"id\":1}", + "result_parsing": { + "parser_arg": [ + "0", + "blockhash" + ], + "parser_func": "PARSE_CANONICAL", + "encoding": "base64" + }, + "api_name": "getBlock" + } + ], + "verifications": [ + { + "name": "version", + "parse_directive": { + "function_template": "{\"jsonrpc\":\"2.0\",\"method\":\"getVersion\",\"params\":[],\"id\":1}", + "function_tag": "VERIFICATION", + "result_parsing": { + "parser_arg": [ + "0", + "solana-core" + ], + "parser_func": "PARSE_CANONICAL" + }, + "api_name": "getVersion" + }, + "values": [ + { + "expected_value": "*" + } + ] + }, + { + "name": "tokens-owner-indexed", + "parse_directive": { + "function_template": "{\"jsonrpc\":\"2.0\",\"id\":1,\"method\":\"getTokenAccountsByOwner\",\"params\":[\"4Qkev8aNZcqFNSRhQzwyLMFSsi94jHqE8WNVTJzTP99F\",{\"programId\":\"TokenkegQfeZyiNwAJbNbGKPFXCWuBvf9Ss623VQ5DA\"},{\"encoding\":\"jsonParsed\"}]}", + "function_tag": "VERIFICATION", + "result_parsing": { + "parser_arg": [ + "0", + "value" + ], + "parser_func": "PARSE_CANONICAL" + }, + "api_name": "getTokenAccountsByOwner" + }, + "values": [ + { + "expected_value": "*", + "severity": "Warning" + } + ] + } + ] + }, + { + "enabled": true, + "collection_data": { + "api_interface": "jsonrpc", + "internal_path": "/ws", + "type": "POST", + "add_on": "" + }, + "apis": [ + { + "name": "accountSubscribe", + "block_parsing": { + "parser_arg": [ + "latest" + ], + "parser_func": "DEFAULT" + }, + "compute_units": 1000, + "enabled": true, + "category": { + "deterministic": false, + "local": true, + "subscription": true, + "stateful": 0 + }, + "extra_compute_units": 0 }, { - "name": "getConfirmedBlock", + "name": "accountUnsubscribe", "block_parsing": { "parser_arg": [ - "0" + "latest" ], - "parser_func": "PARSE_BY_ARG" + "parser_func": "DEFAULT" }, - "compute_units": 10, + "compute_units": 1000, "enabled": true, "category": { - "deterministic": true, - "local": false, - "subscription": false, + "deterministic": false, + "local": true, + "subscription": true, "stateful": 0 }, "extra_compute_units": 0 }, { - "name": "getConfirmedBlocks", + "name": "blockSubscribe", "block_parsing": { "parser_arg": [ "latest" ], "parser_func": "DEFAULT" }, - "compute_units": 10, + "compute_units": 1000, "enabled": true, "category": { - "deterministic": true, - "local": false, - "subscription": false, + "deterministic": false, + "local": true, + "subscription": true, "stateful": 0 }, "extra_compute_units": 0 }, { - "name": "getConfirmedBlocksWithLimit", + "name": "blockUnsubscribe", "block_parsing": { "parser_arg": [ "latest" ], "parser_func": "DEFAULT" }, - "compute_units": 10, + "compute_units": 1000, "enabled": true, "category": { - "deterministic": true, - "local": false, - "subscription": false, + "deterministic": false, + "local": true, + "subscription": true, "stateful": 0 }, "extra_compute_units": 0 }, { - "name": "getConfirmedSignaturesForAddress2", + "name": "logsSubscribe", "block_parsing": { "parser_arg": [ "latest" ], "parser_func": "DEFAULT" }, - "compute_units": 10, + "compute_units": 1000, "enabled": true, "category": { - "deterministic": true, - "local": false, - "subscription": false, + "deterministic": false, + "local": true, + "subscription": true, "stateful": 0 }, "extra_compute_units": 0 }, { - "name": "getConfirmedTransaction", + "name": "logsUnsubscribe", "block_parsing": { "parser_arg": [ "latest" ], "parser_func": "DEFAULT" }, - "compute_units": 10, + "compute_units": 1000, "enabled": true, "category": { - "deterministic": true, - "local": false, - "subscription": false, + "deterministic": false, + "local": true, + "subscription": true, "stateful": 0 }, "extra_compute_units": 0 }, { - "name": "getFeeCalculatorForBlockhash", + "name": "programSubscribe", "block_parsing": { "parser_arg": [ "latest" ], "parser_func": "DEFAULT" }, - "compute_units": 10, + "compute_units": 1000, "enabled": true, "category": { - "deterministic": true, - "local": false, - "subscription": false, + "deterministic": false, + "local": true, + "subscription": true, "stateful": 0 }, "extra_compute_units": 0 }, { - "name": "getFeeRateGovernor", + "name": "programUnsubscribe", "block_parsing": { "parser_arg": [ "latest" ], "parser_func": "DEFAULT" }, - "compute_units": 10, + "compute_units": 1000, "enabled": true, "category": { - "deterministic": true, - "local": false, - "subscription": false, + "deterministic": false, + "local": true, + "subscription": true, "stateful": 0 }, "extra_compute_units": 0 }, { - "name": "getFees", + "name": "rootSubscribe", "block_parsing": { "parser_arg": [ "latest" ], "parser_func": "DEFAULT" }, - "compute_units": 10, + "compute_units": 1000, "enabled": true, "category": { - "deterministic": true, - "local": false, - "subscription": false, + "deterministic": false, + "local": true, + "subscription": true, "stateful": 0 }, "extra_compute_units": 0 }, { - "name": "getRecentBlockhash", + "name": "rootUnsubscribe", "block_parsing": { "parser_arg": [ "latest" ], "parser_func": "DEFAULT" }, - "compute_units": 10, + "compute_units": 1000, "enabled": true, "category": { - "deterministic": true, - "local": false, - "subscription": false, + "deterministic": false, + "local": true, + "subscription": true, "stateful": 0 }, "extra_compute_units": 0 }, { - "name": "getSnapshotSlot", + "name": "signatureSubscribe", "block_parsing": { "parser_arg": [ "latest" ], "parser_func": "DEFAULT" }, - "compute_units": 10, + "compute_units": 1000, "enabled": true, "category": { - "deterministic": true, - "local": false, - "subscription": false, + "deterministic": false, + "local": true, + "subscription": true, + "stateful": 0 + }, + "extra_compute_units": 0 + }, + { + "name": "signatureUnsubscribe", + "block_parsing": { + "parser_arg": [ + "latest" + ], + "parser_func": "DEFAULT" + }, + "compute_units": 1000, + "enabled": true, + "category": { + "deterministic": false, + "local": true, + "subscription": true, + "stateful": 0 + }, + "extra_compute_units": 0 + }, + { + "name": "slotSubscribe", + "block_parsing": { + "parser_arg": [ + "latest" + ], + "parser_func": "DEFAULT" + }, + "compute_units": 1000, + "enabled": true, + "category": { + "deterministic": false, + "local": true, + "subscription": true, + "stateful": 0 + }, + "extra_compute_units": 0 + }, + { + "name": "slotUnsubscribe", + "block_parsing": { + "parser_arg": [ + "latest" + ], + "parser_func": "DEFAULT" + }, + "compute_units": 1000, + "enabled": true, + "category": { + "deterministic": false, + "local": true, + "subscription": true, + "stateful": 0 + }, + "extra_compute_units": 0 + }, + { + "name": "slotsUpdatesSubscribe", + "block_parsing": { + "parser_arg": [ + "latest" + ], + "parser_func": "DEFAULT" + }, + "compute_units": 1000, + "enabled": true, + "category": { + "deterministic": false, + "local": true, + "subscription": true, + "stateful": 0 + }, + "extra_compute_units": 0 + }, + { + "name": "slotsUpdatesUnsubscribe", + "block_parsing": { + "parser_arg": [ + "latest" + ], + "parser_func": "DEFAULT" + }, + "compute_units": 1000, + "enabled": true, + "category": { + "deterministic": false, + "local": true, + "subscription": true, + "stateful": 0 + }, + "extra_compute_units": 0 + }, + { + "name": "voteSubscribe", + "block_parsing": { + "parser_arg": [ + "latest" + ], + "parser_func": "DEFAULT" + }, + "compute_units": 1000, + "enabled": true, + "category": { + "deterministic": false, + "local": true, + "subscription": true, + "stateful": 0 + }, + "extra_compute_units": 0 + }, + { + "name": "voteUnsubscribe", + "block_parsing": { + "parser_arg": [ + "latest" + ], + "parser_func": "DEFAULT" + }, + "compute_units": 1000, + "enabled": true, + "category": { + "deterministic": false, + "local": true, + "subscription": true, "stateful": 0 }, "extra_compute_units": 0 @@ -1150,75 +1378,101 @@ "inheritance_apis": [], "parse_directives": [ { - "function_template": "{\"jsonrpc\":\"2.0\",\"method\":\"getLatestBlockhash\",\"params\":[{\"commitment\":\"finalized\"}],\"id\":1}", "function_tag": "GET_BLOCKNUM", "result_parsing": { - "parser_arg": [ - "0", - "context", - "slot" - ], - "parser_func": "PARSE_CANONICAL" - }, - "api_name": "getLatestBlockhash" + "parser_func": "DEFAULT" + } }, { + "function_template": "%d", "function_tag": "GET_BLOCK_BY_NUM", - "function_template": "{\"jsonrpc\":\"2.0\",\"method\":\"getBlock\",\"params\":[%d,{\"transactionDetails\":\"none\",\"rewards\":false}],\"id\":1}", "result_parsing": { - "parser_arg": [ - "0", - "blockhash" - ], - "parser_func": "PARSE_CANONICAL", - "encoding": "base64" - }, - "api_name": "getBlock" - } - ], - "verifications": [ + "parser_func": "DEFAULT" + } + }, { - "name": "version", - "parse_directive": { - "function_template": "{\"jsonrpc\":\"2.0\",\"method\":\"getVersion\",\"params\":[],\"id\":1}", - "function_tag": "VERIFICATION", - "result_parsing": { - "parser_arg": [ - "0", - "solana-core" - ], - "parser_func": "PARSE_CANONICAL" - }, - "api_name": "getVersion" - }, - "values": [ - { - "expected_value": "*" - } - ] + "function_tag": "SUBSCRIBE", + "api_name": "accountSubscribe" }, { - "name": "tokens-owner-indexed", - "parse_directive": { - "function_template": "{\"jsonrpc\":\"2.0\",\"id\":1,\"method\":\"getTokenAccountsByOwner\",\"params\":[\"4Qkev8aNZcqFNSRhQzwyLMFSsi94jHqE8WNVTJzTP99F\",{\"programId\":\"TokenkegQfeZyiNwAJbNbGKPFXCWuBvf9Ss623VQ5DA\"},{\"encoding\":\"jsonParsed\"}]}", - "function_tag": "VERIFICATION", - "result_parsing": { - "parser_arg": [ - "0", - "value" - ], - "parser_func": "PARSE_CANONICAL" - }, - "api_name": "getTokenAccountsByOwner" - }, - "values": [ - { - "expected_value": "*", - "severity": "Warning" - } - ] + "function_template": "{\"jsonrpc\":\"2.0\",\"method\":\"accountUnsubscribe\",\"params\":[%d],\"id\":1}", + "function_tag": "UNSUBSCRIBE", + "api_name": "accountUnsubscribe" + }, + { + "function_tag": "SUBSCRIBE", + "api_name": "blockSubscribe" + }, + { + "function_template": "{\"jsonrpc\":\"2.0\",\"method\":\"blockUnsubscribe\",\"params\":[%d],\"id\":1}", + "function_tag": "UNSUBSCRIBE", + "api_name": "blockUnsubscribe" + }, + { + "function_tag": "SUBSCRIBE", + "api_name": "logsSubscribe" + }, + { + "function_template": "{\"jsonrpc\":\"2.0\",\"method\":\"logsUnsubscribe\",\"params\":[%d],\"id\":1}", + "function_tag": "UNSUBSCRIBE", + "api_name": "logsUnsubscribe" + }, + { + "function_tag": "SUBSCRIBE", + "api_name": "programSubscribe" + }, + { + "function_template": "{\"jsonrpc\":\"2.0\",\"method\":\"programUnsubscribe\",\"params\":[%d],\"id\":1}", + "function_tag": "UNSUBSCRIBE", + "api_name": "programUnsubscribe" + }, + { + "function_tag": "SUBSCRIBE", + "api_name": "rootSubscribe" + }, + { + "function_template": "{\"jsonrpc\":\"2.0\",\"method\":\"rootUnsubscribe\",\"params\":[%d],\"id\":1}", + "function_tag": "UNSUBSCRIBE", + "api_name": "rootUnsubscribe" + }, + { + "function_tag": "SUBSCRIBE", + "api_name": "signatureSubscribe" + }, + { + "function_template": "{\"jsonrpc\":\"2.0\",\"method\":\"signatureUnsubscribe\",\"params\":[%d],\"id\":1}", + "function_tag": "UNSUBSCRIBE", + "api_name": "signatureUnsubscribe" + }, + { + "function_tag": "SUBSCRIBE", + "api_name": "slotSubscribe" + }, + { + "function_template": "{\"jsonrpc\":\"2.0\",\"method\":\"slotUnsubscribe\",\"params\":[%d],\"id\":1}", + "function_tag": "UNSUBSCRIBE", + "api_name": "slotUnsubscribe" + }, + { + "function_tag": "SUBSCRIBE", + "api_name": "slotsUpdatesSubscribe" + }, + { + "function_template": "{\"jsonrpc\":\"2.0\",\"method\":\"slotsUpdatesUnsubscribe\",\"params\":[%d],\"id\":1}", + "function_tag": "UNSUBSCRIBE", + "api_name": "slotsUpdatesUnsubscribe" + }, + { + "function_tag": "SUBSCRIBE", + "api_name": "voteSubscribe" + }, + { + "function_template": "{\"jsonrpc\":\"2.0\",\"method\":\"voteUnsubscribe\",\"params\":[%d],\"id\":1}", + "function_tag": "UNSUBSCRIBE", + "api_name": "voteUnsubscribe" } - ] + ], + "verifications": [] } ] }, diff --git a/cookbook/specs/starknet.json b/cookbook/specs/starknet.json index 567b740a84..9228e7b7ff 100644 --- a/cookbook/specs/starknet.json +++ b/cookbook/specs/starknet.json @@ -11,12 +11,12 @@ "data_reliability_enabled": true, "block_distance_for_finalized_data": 6, "blocks_in_finalization_proof": 3, - "average_block_time": 12000, + "average_block_time": 30000, "allowed_block_lag_for_qos_sync": 2, "shares": 1, "min_stake_provider": { "denom": "ulava", - "amount": "47500000000" + "amount": "5000000000" }, "api_collections": [ { @@ -36,7 +36,7 @@ ], "parser_func": "DEFAULT" }, - "compute_units": 10, + "compute_units": 20, "enabled": true, "category": { "deterministic": true, @@ -93,7 +93,7 @@ ], "parser_func": "DEFAULT" }, - "compute_units": 10, + "compute_units": 20, "enabled": true, "category": { "deterministic": true, @@ -156,7 +156,7 @@ "parser_func": "PARSE_DICTIONARY_OR_ORDERED", "default_value": "latest" }, - "compute_units": 10, + "compute_units": 20, "enabled": true, "category": { "deterministic": true, @@ -445,7 +445,7 @@ ], "parser_func": "DEFAULT" }, - "compute_units": 10, + "compute_units": 20, "enabled": true, "category": { "deterministic": false, @@ -598,6 +598,98 @@ "expected_value": "*" } ] + }, + { + "name": "pruning", + "parse_directive": { + "function_tag": "GET_BLOCK_BY_NUM" + }, + "values": [ + { + "expected_value": "1" + } + ] + } + ] + }, + { + "enabled": false, + "collection_data": { + "api_interface": "jsonrpc", + "internal_path": "WS-ONLY", + "type": "POST", + "add_on": "" + }, + "apis": [ + { + "name": "pathfinder_subscribe", + "block_parsing": { + "parser_arg": [ + "latest" + ], + "parser_func": "DEFAULT" + }, + "compute_units": 1000, + "enabled": true, + "category": { + "deterministic": false, + "local": true, + "subscription": true, + "stateful": 0 + }, + "extra_compute_units": 0 + }, + { + "name": "pathfinder_unsubscribe", + "block_parsing": { + "parser_arg": [ + "latest" + ], + "parser_func": "DEFAULT" + }, + "compute_units": 10, + "enabled": true, + "category": { + "deterministic": false, + "local": true, + "subscription": true, + "stateful": 0 + }, + "extra_compute_units": 0 + } + ], + "parse_directives": [ + { + "function_tag": "SUBSCRIBE", + "api_name": "pathfinder_subscribe" + }, + { + "function_template": "{\"jsonrpc\":\"2.0\",\"method\":\"pathfinder_unsubscribe\",\"params\":[%s],\"id\":1}", + "function_tag": "UNSUBSCRIBE", + "api_name": "pathfinder_unsubscribe" + } + ] + }, + { + "enabled": true, + "collection_data": { + "api_interface": "jsonrpc", + "internal_path": "/ws", + "type": "POST", + "add_on": "" + }, + "inheritance_apis": [ + { + "api_interface": "jsonrpc", + "internal_path": "", + "type": "POST", + "add_on": "" + }, + { + "api_interface": "jsonrpc", + "internal_path": "WS-ONLY", + "type": "POST", + "add_on": "" } ] }, @@ -635,6 +727,69 @@ } ] }, + { + "enabled": true, + "collection_data": { + "api_interface": "jsonrpc", + "internal_path": "/ws/rpc/v0_6", + "type": "POST", + "add_on": "" + }, + "inheritance_apis": [ + { + "api_interface": "jsonrpc", + "internal_path": "/rpc/v0_6", + "type": "POST", + "add_on": "" + }, + { + "api_interface": "jsonrpc", + "internal_path": "WS-ONLY", + "type": "POST", + "add_on": "" + } + ] + }, + { + "enabled": true, + "collection_data": { + "api_interface": "jsonrpc", + "internal_path": "/rpc/v0_7", + "type": "POST", + "add_on": "" + }, + "inheritance_apis": [ + { + "api_interface": "jsonrpc", + "internal_path": "", + "type": "POST", + "add_on": "" + } + ] + }, + { + "enabled": true, + "collection_data": { + "api_interface": "jsonrpc", + "internal_path": "/ws/rpc/v0_7", + "type": "POST", + "add_on": "" + }, + "inheritance_apis": [ + { + "api_interface": "jsonrpc", + "internal_path": "/rpc/v0_7", + "type": "POST", + "add_on": "" + }, + { + "api_interface": "jsonrpc", + "internal_path": "WS-ONLY", + "type": "POST", + "add_on": "" + } + ] + }, { "enabled": true, "collection_data": { @@ -724,6 +879,93 @@ ] } ] + }, + { + "enabled": true, + "collection_data": { + "api_interface": "jsonrpc", + "internal_path": "/rpc/pathfinder/v0.1", + "type": "POST", + "add_on": "" + }, + "apis": [ + { + "name": "pathfinder_version", + "block_parsing": { + "parser_arg": [ + "latest" + ], + "parser_func": "DEFAULT" + }, + "compute_units": 10, + "enabled": true, + "category": { + "deterministic": true, + "local": false, + "subscription": false, + "stateful": 0 + }, + "extra_compute_units": 0 + }, + { + "name": "pathfinder_getProof", + "block_parsing": { + "parser_arg": [ + "latest" + ], + "parser_func": "DEFAULT" + }, + "compute_units": 10, + "enabled": true, + "category": { + "deterministic": true, + "local": false, + "subscription": false, + "stateful": 0 + }, + "extra_compute_units": 0 + }, + { + "name": "pathfinder_getTransactionStatus", + "block_parsing": { + "parser_arg": [ + "latest" + ], + "parser_func": "DEFAULT" + }, + "compute_units": 10, + "enabled": true, + "category": { + "deterministic": true, + "local": false, + "subscription": false, + "stateful": 0 + }, + "extra_compute_units": 0 + } + ], + "verifications": [ + { + "name": "enabled", + "parse_directive": { + "function_template": "{\"jsonrpc\":\"2.0\",\"method\":\"pathfinder_version\",\"params\":[],\"id\":1}", + "function_tag": "VERIFICATION", + "result_parsing": { + "parser_arg": [ + "latest" + ], + "parser_func": "DEFAULT" + }, + "api_name": "pathfinder_version" + }, + "values": [ + { + "expected_value": "*", + "severity": "Warning" + } + ] + } + ] } ] }, @@ -738,11 +980,11 @@ "data_reliability_enabled": true, "block_distance_for_finalized_data": 1, "blocks_in_finalization_proof": 3, - "average_block_time": 1800000, + "average_block_time": 32000, "allowed_block_lag_for_qos_sync": 1, "min_stake_provider": { "denom": "ulava", - "amount": "47500000000" + "amount": "5000000000" }, "api_collections": [ { @@ -768,6 +1010,29 @@ } ] }, + { + "enabled": true, + "collection_data": { + "api_interface": "jsonrpc", + "internal_path": "/ws", + "type": "POST", + "add_on": "" + }, + "apis": [], + "headers": [], + "inheritance_apis": [], + "parse_directives": [], + "verifications": [ + { + "name": "chain-id", + "values": [ + { + "expected_value": "0x534e5f5345504f4c4941" + } + ] + } + ] + }, { "enabled": true, "collection_data": { @@ -791,6 +1056,29 @@ } ] }, + { + "enabled": true, + "collection_data": { + "api_interface": "jsonrpc", + "internal_path": "/ws/rpc/v0_6", + "type": "POST", + "add_on": "" + }, + "apis": [], + "headers": [], + "inheritance_apis": [], + "parse_directives": [], + "verifications": [ + { + "name": "chain-id", + "values": [ + { + "expected_value": "0x534e5f5345504f4c4941" + } + ] + } + ] + }, { "enabled": true, "collection_data": { @@ -813,6 +1101,52 @@ ] } ] + }, + { + "enabled": true, + "collection_data": { + "api_interface": "jsonrpc", + "internal_path": "/rpc/v0_7", + "type": "POST", + "add_on": "" + }, + "apis": [], + "headers": [], + "inheritance_apis": [], + "parse_directives": [], + "verifications": [ + { + "name": "chain-id", + "values": [ + { + "expected_value": "0x534e5f5345504f4c4941" + } + ] + } + ] + }, + { + "enabled": true, + "collection_data": { + "api_interface": "jsonrpc", + "internal_path": "/ws/rpc/v0_7", + "type": "POST", + "add_on": "" + }, + "apis": [], + "headers": [], + "inheritance_apis": [], + "parse_directives": [], + "verifications": [ + { + "name": "chain-id", + "values": [ + { + "expected_value": "0x534e5f5345504f4c4941" + } + ] + } + ] } ] } diff --git a/cookbook/specs/tendermint.json b/cookbook/specs/tendermint.json index 3472a379f8..e818b27c4b 100644 --- a/cookbook/specs/tendermint.json +++ b/cookbook/specs/tendermint.json @@ -139,7 +139,7 @@ "compute_units": 10, "enabled": true, "category": { - "deterministic": true, + "deterministic": false, "local": false, "subscription": false, "stateful": 0 @@ -474,7 +474,7 @@ "compute_units": 10, "enabled": true, "category": { - "deterministic": true, + "deterministic": false, "local": false, "subscription": false, "stateful": 0 @@ -573,6 +573,45 @@ "stateful": 0 }, "extra_compute_units": 0 + }, + { + "name": "header", + "block_parsing": { + "parser_arg": [ + "height", + "=", + "0" + ], + "parser_func": "PARSE_DICTIONARY_OR_ORDERED", + "default_value": "latest" + }, + "compute_units": 10, + "enabled": true, + "category": { + "deterministic": true, + "local": false, + "subscription": false, + "stateful": 0 + }, + "extra_compute_units": 0 + }, + { + "name": "header_by_hash", + "block_parsing": { + "parser_arg": [ + "latest" + ], + "parser_func": "DEFAULT" + }, + "compute_units": 10, + "enabled": true, + "category": { + "deterministic": true, + "local": false, + "subscription": false, + "stateful": 0 + }, + "extra_compute_units": 0 } ], "headers": [], diff --git a/ecosystem/lavajs/package.json b/ecosystem/lavajs/package.json index 8a78279efa..82036236a9 100644 --- a/ecosystem/lavajs/package.json +++ b/ecosystem/lavajs/package.json @@ -1,6 +1,6 @@ { "name": "@lavanet/lavajs", - "version": "2.1.3", + "version": "3.2.0", "description": "lavajs", "author": "Lava Network", "homepage": "https://github.com/lavanet/lava/tree/main/ecosystem/lavajs#readme", diff --git a/ecosystem/lavavisor/pkg/state/lavavisor_state_tracker.go b/ecosystem/lavavisor/pkg/state/lavavisor_state_tracker.go index b460d47b5e..fb1ca9d511 100644 --- a/ecosystem/lavavisor/pkg/state/lavavisor_state_tracker.go +++ b/ecosystem/lavavisor/pkg/state/lavavisor_state_tracker.go @@ -24,7 +24,8 @@ type LavaVisorStateTracker struct { func NewLavaVisorStateTracker(ctx context.Context, txFactory tx.Factory, clientCtx client.Context, chainFetcher chaintracker.ChainFetcher) (lvst *LavaVisorStateTracker, err error) { // validate chainId - status, err := clientCtx.Client.Status(ctx) + stateQuery := updaters.NewStateQuery(ctx, updaters.NewStateQueryAccessInst(clientCtx)) + status, err := stateQuery.Status(ctx) if err != nil { return nil, utils.LavaFormatError("[Lavavisor] failed getting status", err) } @@ -36,7 +37,7 @@ func NewLavaVisorStateTracker(ctx context.Context, txFactory tx.Factory, clientC if err != nil { utils.LavaFormatFatal("chain is missing Lava spec, cant initialize lavavisor", err) } - lst := &LavaVisorStateTracker{stateQuery: updaters.NewStateQuery(ctx, clientCtx), averageBlockTime: time.Duration(specResponse.Spec.AverageBlockTime) * time.Millisecond} + lst := &LavaVisorStateTracker{stateQuery: stateQuery, averageBlockTime: time.Duration(specResponse.Spec.AverageBlockTime) * time.Millisecond} return lst, nil } diff --git a/go.mod b/go.mod index 0bf47fc345..7015c35da6 100644 --- a/go.mod +++ b/go.mod @@ -1,6 +1,6 @@ module github.com/lavanet/lava/v4 -go 1.20 +go 1.23 require ( github.com/99designs/keyring v1.2.1 // indirect @@ -225,7 +225,7 @@ require ( golang.org/x/crypto v0.21.0 // indirect golang.org/x/exp v0.0.0-20230905200255-921286631fa9 golang.org/x/net v0.23.0 - golang.org/x/sync v0.6.0 // indirect + golang.org/x/sync v0.6.0 golang.org/x/sys v0.20.0 // indirect golang.org/x/term v0.18.0 golang.org/x/text v0.14.0 // indirect diff --git a/go.sum b/go.sum index 2b7631814c..f4eb9b3ada 100644 --- a/go.sum +++ b/go.sum @@ -217,6 +217,7 @@ github.com/Azure/azure-sdk-for-go/sdk/azcore v0.21.1/go.mod h1:fBF9PQNqB8scdgpZ3 github.com/Azure/azure-sdk-for-go/sdk/internal v0.8.3/go.mod h1:KLF4gFr6DcKFZwSuH8w8yEK6DpFl3LP5rhdvAb7Yz5I= github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v0.3.0/go.mod h1:tPaiy8S5bQ+S5sOiDlINkp7+Ef339+Nz5L5XO+cnOHo= github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOElx5B5HZ4hJQsoJ/PvUvKRhJHDQXO8P8= +github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/ChainSafe/go-schnorrkel v1.0.0 h1:3aDA67lAykLaG1y3AOjs88dMxC88PgUuHRrLeDnvGIM= github.com/ChainSafe/go-schnorrkel v1.0.0/go.mod h1:dpzHYVxLZcp8pjlV+O+UR8K0Hp/z7vcchBSbMBEhCw4= @@ -227,7 +228,9 @@ github.com/DataDog/zstd v1.5.5 h1:oWf5W7GtOLgp6bciQYDmhHHjdhYkALu6S/5Ni9ZgSvQ= github.com/DataDog/zstd v1.5.5/go.mod h1:g4AWEaM3yOg3HYfnJ3YIawPnVdXJh9QME85blwSAmyw= github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0= github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow= +github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM= github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5 h1:TngWCqHvy9oXAN6lEVMRuU21PR1EtLVZJmdB18Gu3Rw= +github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5/go.mod h1:lmUJ/7eu/Q8D7ML55dXQrVaamCz2vxCfdQBasLZfHKk= github.com/OneOfOne/xxhash v1.2.2 h1:KMrpdQIwFcEqXDklaen+P1axHaj9BSKzvpUUfnHldSE= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo= @@ -241,6 +244,7 @@ github.com/VividCortex/gohistogram v1.0.0 h1:6+hBz+qvs0JOrrNhhmR7lFxo5sINxBCGXrd github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/4+TcAqDqk/vUH7g= github.com/Zilliqa/gozilliqa-sdk v1.2.1-0.20201201074141-dd0ecada1be6/go.mod h1:eSYp2T6f0apnuW8TzhV3f6Aff2SE8Dwio++U4ha4yEM= github.com/adlio/schema v1.3.3 h1:oBJn8I02PyTB466pZO1UZEn1TV5XLlifBSyMrmHl/1I= +github.com/adlio/schema v1.3.3/go.mod h1:1EsRssiv9/Ce2CMzq5DoL7RiMshhuigQxrR4DMV9fHg= github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBAUSII= github.com/afex/hystrix-go v0.0.0-20180502004556-fa1af6a1f4f5/go.mod h1:SkGFH1ia65gfNATL8TAiHDNxPzPdmEL5uirI2Uyuz6c= github.com/ajstarks/svgo v0.0.0-20180226025133-644b8db467af/go.mod h1:K08gAheRH3/J6wwsYMMT4xOr94bZjxIelGM0+d/wbFw= @@ -301,6 +305,7 @@ github.com/btcsuite/btcd/btcec/v2 v2.2.0/go.mod h1:U7MHm051Al6XmscBQ0BoNydpOTsFA github.com/btcsuite/btcd/btcec/v2 v2.3.2 h1:5n0X6hX0Zk+6omWcihdYvdAlGf2DfasC0GMf7DClJ3U= github.com/btcsuite/btcd/btcec/v2 v2.3.2/go.mod h1:zYzJ8etWJQIv1Ogk7OzpWjowwOdXY1W/17j2MW85J04= github.com/btcsuite/btcd/btcutil v1.1.2 h1:XLMbX8JQEiwMcYft2EGi8zPUkoa0abKIU6/BJSRsjzQ= +github.com/btcsuite/btcd/btcutil v1.1.2/go.mod h1:UR7dsSJzJUfMmFiiLlIrMq1lS9jh9EdCV7FStZSnpi0= github.com/btcsuite/btcd/chaincfg/chainhash v1.0.0/go.mod h1:7SFka0XMvUgj3hfZtydOrQY2mwhPclbT2snogU7SQQc= github.com/btcsuite/btcd/chaincfg/chainhash v1.0.1 h1:q0rUy8C/TYNBQS1+CGKw68tLOFYSNEs0TFnxxnS9+4U= github.com/btcsuite/btcd/chaincfg/chainhash v1.0.1/go.mod h1:7SFka0XMvUgj3hfZtydOrQY2mwhPclbT2snogU7SQQc= @@ -358,10 +363,12 @@ github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWH github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20231128003011-0fa0005c9caa h1:jQCWAUqqlij9Pgj2i/PB79y4KOPYVyFYdROxgaCwdTQ= +github.com/cncf/xds/go v0.0.0-20231128003011-0fa0005c9caa/go.mod h1:x/1Gn8zydmfq8dk6e9PdstVsDgu9RuyIIJqAaF//0IM= github.com/cockroachdb/apd/v2 v2.0.2 h1:weh8u7Cneje73dDh+2tEVLUvyBc89iwepWCD8b8034E= github.com/cockroachdb/apd/v2 v2.0.2/go.mod h1:DDxRlzC2lo3/vSlmSoS7JkqbbrARPuFOGr0B9pvN3Gw= github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= github.com/cockroachdb/datadriven v1.0.3-0.20230413201302-be42291fc80f h1:otljaYPt5hWxV3MUfO5dFPFiOXg9CyG5/kCfayTqsJ4= +github.com/cockroachdb/datadriven v1.0.3-0.20230413201302-be42291fc80f/go.mod h1:a9RdTaap04u637JoCzcUoIcDmvwSUtcUFtT/C3kJlTU= github.com/cockroachdb/errors v1.10.0 h1:lfxS8zZz1+OjtV4MtNWgboi/W5tyLEB6VQZBXN+0VUU= github.com/cockroachdb/errors v1.10.0/go.mod h1:lknhIsEVQ9Ss/qKDBQS/UqFSvPQjOwNq2qyKAxtHRqE= github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b h1:r6VH0faHjZeQy818SGhaone5OnYfxFR/+AzdY3sf5aE= @@ -385,6 +392,7 @@ github.com/consensys/bavard v0.1.8-0.20210915155054-088da2f7f54a/go.mod h1:9ItSM github.com/consensys/gnark-crypto v0.4.1-0.20210426202927-39ac3d4b3f1f/go.mod h1:815PAHg3wvysy0SyIqanF8gZ0Y1wjk/hrDHD/iT88+Q= github.com/consensys/gnark-crypto v0.5.3/go.mod h1:hOdPlWQV1gDLp7faZVeg8Y0iEPFaOUnCc4XeCCk96p0= github.com/containerd/continuity v0.3.0 h1:nisirsYROK15TAMVukJOUyGJjz4BNQJBVsNvAXZJ/eg= +github.com/containerd/continuity v0.3.0/go.mod h1:wJEAIwKOm/pBZuBd0JmeTvnLquTB1Ag8espWhkykbPM= github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= @@ -465,7 +473,9 @@ github.com/dnaeon/go-vcr v1.2.0/go.mod h1:R4UdLID7HZT3taECzJs4YgbbH6PIGXB6W/sc5O github.com/docker/docker v1.4.2-0.20180625184442-8e610b2b55bf/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/docker v1.6.2/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= +github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= +github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/dop251/goja v0.0.0-20211011172007-d99e4b8cbf48/go.mod h1:R9ET47fwRVRPZnOGvHxxhuZcbrMCuiqOz3Rlrh4KSnk= github.com/dop251/goja v0.0.0-20220405120441-9037c2b61cbf/go.mod h1:R9ET47fwRVRPZnOGvHxxhuZcbrMCuiqOz3Rlrh4KSnk= github.com/dop251/goja_nodejs v0.0.0-20210225215109-d91c329300e7/go.mod h1:hn7BA7c8pLvoGndExHudxTDKZ84Pyvv+90pbBjbTz0Y= @@ -493,6 +503,7 @@ github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go. github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/envoyproxy/protoc-gen-validate v1.0.4 h1:gVPz/FMfvh57HdSJQyvBtF00j8JU4zdyUgIUNhlgg0A= +github.com/envoyproxy/protoc-gen-validate v1.0.4/go.mod h1:qys6tmnRsYrQqIhm2bvKZH4Blx/1gTIZ2UKVY1M+Yew= github.com/ethereum/go-ethereum v1.10.17/go.mod h1:Lt5WzjM07XlXc95YzrhosmR4J9Ahd6X2wyEV2SvGhk0= github.com/ethereum/go-ethereum v1.10.18 h1:hLEd5M+UD0GJWPaROiYMRgZXl6bi5YwoTJSthsx5CZw= github.com/ethereum/go-ethereum v1.10.18/go.mod h1:RD3NhcSBjZpj3k+SnQq24wBrmnmie78P5R/P62iNBD8= @@ -508,9 +519,11 @@ github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5 h1:FtmdgXiUlNeRsoNMFlK github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5/go.mod h1:VvhXpOYNQvB+uIk2RvXzuaQtkQJzzIx6lSBe1xv7hi0= github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= github.com/fortytw2/leaktest v1.3.0 h1:u8491cBMTQ8ft8aeV+adlcytMZylmA5nnwwkRZjI8vw= +github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g= github.com/franela/goblin v0.0.0-20200105215937-c9ffbefa60db/go.mod h1:7dvUGVsVBjqR7JHJk0brhHOZYGmfBYOrK0ZhYMEtBr4= github.com/franela/goreq v0.0.0-20171204163338-bcd34c9993f8/go.mod h1:ZhphrRTfi2rbfLwlschooIH4+wKKDR4Pdxhh+TRoA20= github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= +github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= @@ -529,10 +542,12 @@ github.com/gin-contrib/sse v0.1.0 h1:Y/yl/+YNO8GZSjAhjMsSuLt29uWRFHdHYUb5lYOV9qE github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI= github.com/gin-gonic/gin v1.6.3/go.mod h1:75u5sXoLsGZoRN5Sgbi1eraJ4GU3++wFwWzhwvtwp4M= github.com/gin-gonic/gin v1.8.1 h1:4+fr/el88TOO3ewCmQr8cx/CtZ/umlIRIs5M4NTNjf8= +github.com/gin-gonic/gin v1.8.1/go.mod h1:ji8BvRH1azfM+SYow9zQ6SZMvR8qOMZHmsCuWR9tTTk= github.com/glycerine/go-unsnap-stream v0.0.0-20180323001048-9f0cb55181dd/go.mod h1:/20jfyN9Y5QPEAprSgKAUr+glWDY39ZiUEAYOEv5dsE= github.com/glycerine/goconvey v0.0.0-20190410193231-58a59202ab31/go.mod h1:Ogl1Tioa0aV7gstGFO7KhffUsb9M4ydbEbbxpcEDc24= github.com/go-chi/chi/v5 v5.0.0/go.mod h1:BBug9lr0cqtdAhsu6R4AAdvufI0/XBzAQSsUqJpoZOs= github.com/go-errors/errors v1.4.2 h1:J6MZopCL4uSllY1OfXM374weqZFFItUbrImctkmUxIA= +github.com/go-errors/errors v1.4.2/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.10.0/go.mod h1:xUsJbQ/Fp4kEt7AFgCuvyX4a71u8h9jB8tj/ORgOZ7o= @@ -559,10 +574,13 @@ github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh github.com/go-playground/assert/v2 v2.0.1/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4= github.com/go-playground/locales v0.13.0/go.mod h1:taPMhCMXrRLJO55olJkUXHZBHCxTMfnGwq/HNwmWNS8= github.com/go-playground/locales v0.14.0 h1:u50s323jtVGugKlcYeyzC0etD1HifMjqmJqb8WugfUU= +github.com/go-playground/locales v0.14.0/go.mod h1:sawfccIbzZTqEDETgFXqTho0QybSa7l++s0DH+LDiLs= github.com/go-playground/universal-translator v0.17.0/go.mod h1:UkSxE5sNxxRwHyU+Scu5vgOQjsIJAF8j9muTVoKLVtA= github.com/go-playground/universal-translator v0.18.0 h1:82dyy6p4OuJq4/CByFNOn/jYrnRPArHwAcmLoJZxyho= +github.com/go-playground/universal-translator v0.18.0/go.mod h1:UvRDBj+xPUEGrFYl+lu/H90nyDXpg0fqeB/AQUGNTVA= github.com/go-playground/validator/v10 v10.2.0/go.mod h1:uOYAAleCW8F/7oMFd6aG0GOhaH6EGOAJShg8Id5JGkI= github.com/go-playground/validator/v10 v10.11.1 h1:prmOlTVv+YjZjmRmNSF3VmspqJIxJWXmqUsHwfTRRkQ= +github.com/go-playground/validator/v10 v10.11.1/go.mod h1:i+3WkQ1FvaUjjxh1kSvIA4dMGDBiPU55YFDl0WbKdWU= github.com/go-sourcemap/sourcemap v2.1.3+incompatible/go.mod h1:F8jJfvm2KbVjc5NqelyYJmf/v5J0dwNLS2mL4sNA1Jg= github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= @@ -668,12 +686,14 @@ github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf/go.mod h1:HP5RmnzzSN github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.1.1-0.20200604201612-c04b05f3adfa/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= +github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/martian v2.1.0+incompatible h1:/CP5g8u/VJHijgedC/Legn3BAbAaWPgecwXBIDzw5no= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= github.com/google/martian/v3 v3.2.1/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk= github.com/google/martian/v3 v3.3.2 h1:IqNFLAmvJOgVlpdEBiQbDc2EwKW77amAycfTuWKdfvw= +github.com/google/martian/v3 v3.3.2/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk= github.com/google/orderedcode v0.0.1 h1:UzfcAexk9Vhv8+9pNOgRu41f16lHq725vPwnSeiG/Us= github.com/google/orderedcode v0.0.1/go.mod h1:iVyU4/qPKHY5h/wSd6rZZCDcLJNxiWO6dvsYES2Sb20= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= @@ -853,6 +873,7 @@ github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/u github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= +github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= github.com/jsternberg/zap-logfmt v1.0.0/go.mod h1:uvPs/4X51zdkcm5jXl5SYoN+4RK21K8mysFmDaM/h+o= @@ -897,6 +918,7 @@ github.com/lavanet/cosmos-sdk v0.47.13-lava-cosmos/go.mod h1:pYMzhTfKFn9AJB5X64E github.com/leanovate/gopter v0.2.9/go.mod h1:U2L/78B+KVFIx2VmW6onHJQzXtFb+p5y3y2Sh+Jxxv8= github.com/leodido/go-urn v1.2.0/go.mod h1:+8+nEpDfqqsY+g338gtMEUOtuK+4dEMhiQEgxpxOKII= github.com/leodido/go-urn v1.2.1 h1:BqpAaACuzVSgi/VLzGZIobT2z4v53pjosyNd9Yv6n/w= +github.com/leodido/go-urn v1.2.1/go.mod h1:zt4jvISO2HfUBqxjfIshjdMTYS56ZS/qv49ictyFfxY= github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/lib/pq v1.10.7 h1:p7ZhMD+KsSRozJr34udlUrhboJwWAgCg34+/ZZNvZZw= github.com/lib/pq v1.10.7/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= @@ -973,6 +995,7 @@ github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJ github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= +github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/modocache/gover v0.0.0-20171022184752-b58185e213c5/go.mod h1:caMODM3PzxT8aQXRPkAt8xlV/e7d7w8GM5g0fa5F0D8= github.com/mschoch/smat v0.0.0-20160514031455-90eadee771ae/go.mod h1:qAyveg+e4CE+eKJXWVjKXM4ck2QobLqTDytGJbLLhJg= github.com/mtibben/percent v0.2.1 h1:5gssi8Nqo8QU/r2pynCm+hBQHpkB/uNK7BJCFogWdzs= @@ -997,6 +1020,7 @@ github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLA github.com/nishanths/predeclared v0.0.0-20200524104333-86fad755b4d3/go.mod h1:nt3d53pc1VYcphSCIaYAJtnPYnr3Zyn8fMq2wvPGPso= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= +github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= github.com/oklog/oklog v0.3.2/go.mod h1:FCV+B7mhrz4o+ueLpx+KqkyXRGMWOYEvfiXtdGtbWGs= github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= @@ -1009,15 +1033,20 @@ github.com/onsi/ginkgo v1.10.3/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+ github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= github.com/onsi/ginkgo v1.16.4 h1:29JGrr5oVBm5ulCWet69zQkzWipVXIol6ygQUe/EzNc= +github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= github.com/onsi/gomega v1.4.1/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= github.com/onsi/gomega v1.20.0 h1:8W0cWlwFkflGPLltQvLRB7ZVD5HuP6ng320w2IS245Q= +github.com/onsi/gomega v1.20.0/go.mod h1:DtrZpjmvpn2mPm4YWQa0/ALMDj9v4YxLgojwPeREyVo= github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= +github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opencontainers/image-spec v1.1.0-rc2 h1:2zx/Stx4Wc5pIPDvIxHXvXtQFW/7XWJGmnM7r3wg034= +github.com/opencontainers/image-spec v1.1.0-rc2/go.mod h1:3OVijpioIKYWTqjiG0zfF6wvoJ4fAXGbjdZuI2NgsRQ= github.com/opencontainers/runc v1.1.3 h1:vIXrkId+0/J2Ymu2m7VjGvbSlAId9XNRPhn2p4b+d8w= +github.com/opencontainers/runc v1.1.3/go.mod h1:1J5XiS+vdZ3wCyZybsuxXZWGrgSr8fFJHLXuG2PsnNg= github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492/go.mod h1:Ngi6UdF0k5OKD5t5wlmGhe/EDKPoUM3BXZSSfIuJbis= github.com/opentracing/basictracer-go v1.0.0/go.mod h1:QfBfYuafItcjQuMwinw9GhYKwFXS9KnPs5lxoYwgW74= github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= @@ -1028,6 +1057,7 @@ github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJ github.com/openzipkin/zipkin-go v0.2.1/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= github.com/openzipkin/zipkin-go v0.2.2/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= github.com/ory/dockertest v3.3.5+incompatible h1:iLLK6SQwIhcbrG783Dghaaa3WPzGc+4Emza6EbVUUGA= +github.com/ory/dockertest v3.3.5+incompatible/go.mod h1:1vX4m9wsvi00u5bseYwXaSnhNrne+V0E6LAcBILJdPs= github.com/pact-foundation/pact-go v1.0.4/go.mod h1:uExwJY4kCzNPcHRj+hCR/HBbOOIwwtUjcrb0b5/5kLM= github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY= @@ -1047,6 +1077,7 @@ github.com/philhofer/fwd v1.0.0/go.mod h1:gk3iGcWd9+svBvR0sR+KPcfE+RNWozjowpeBVG github.com/pierrec/lz4 v1.0.2-0.20190131084431-473cd7ce01a1/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc= github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= github.com/pingcap/errors v0.11.4 h1:lFuQV/oaUMGcD2tqt+01ROSmJs75VG1ToEOkZIZ4nE4= +github.com/pingcap/errors v0.11.4/go.mod h1:Oi8TUi2kEtXXLMJk9l1cGmz20kV3TaQ0usTwv5KuLY8= github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= @@ -1147,6 +1178,7 @@ github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6Mwd github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= github.com/sirupsen/logrus v1.9.0 h1:trlNQbNUG3OdDrDil03MCb1H2o9nJ1x4/5LYw7byDE0= +github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= @@ -1184,6 +1216,7 @@ github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+ github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= +github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= github.com/stretchr/testify v1.2.0/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= @@ -1234,6 +1267,7 @@ github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVM github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY= github.com/ugorji/go/codec v1.2.7 h1:YPXUKf7fYbp/y8xloBqZOw2qaVggbfwMlI8WM3wZUJ0= +github.com/ugorji/go/codec v1.2.7/go.mod h1:WGN1fab3R1fzQlVQTkfxVtIBhWDRqOviHU95kRgeqEY= github.com/ulikunitz/xz v0.5.10/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= github.com/ulikunitz/xz v0.5.11 h1:kpFauv27b6ynzBNT/Xy+1k+fK4WswhN/6PN5WhFAGw8= github.com/ulikunitz/xz v0.5.11/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= @@ -1292,6 +1326,7 @@ go.opentelemetry.io/otel v1.21.0/go.mod h1:QZzNPQPm1zLX4gZK4cMi+71eaorMSGT3A4znn go.opentelemetry.io/otel/metric v1.21.0 h1:tlYWfeo+Bocx5kLEloTjbcDwBuELRrIFxwdQ36PlJu4= go.opentelemetry.io/otel/metric v1.21.0/go.mod h1:o1p3CA8nNHW8j5yuQLdc1eeqEaPfzug24uvsyIEJRWM= go.opentelemetry.io/otel/sdk v1.21.0 h1:FTt8qirL1EysG6sTQRZ5TokkU8d0ugCj8htOgThZXQ8= +go.opentelemetry.io/otel/sdk v1.21.0/go.mod h1:Nna6Yv7PWTdgJHVRD9hIYywQBRx7pbox6nwBnZIxl/E= go.opentelemetry.io/otel/trace v1.21.0 h1:WD9i5gzvoUPuXIXH24ZNBudiarZDKuekPqi/E8fpfLc= go.opentelemetry.io/otel/trace v1.21.0/go.mod h1:LGbsEB0f9LGjN+OZaQQ26sohbOmiMR+BaslueVtS/qQ= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= @@ -1365,6 +1400,7 @@ golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91 golang.org/x/mod v0.6.0/go.mod h1:4mET923SAdbXp2ki8ey+zGs1SLqsuM2Y0uvdZR/fUNI= golang.org/x/mod v0.11.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.12.0 h1:rmsUpXtvNzj340zd98LZ4KntptpfRHwpFOHG188oHXc= +golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/net v0.0.0-20180719180050-a680a1efc54d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -1675,6 +1711,7 @@ golang.org/x/tools v0.1.8-0.20211029000441-d6a9af8af023/go.mod h1:nABZi5QlRsZVlz golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/tools v0.2.0/go.mod h1:y4OqIKeOV/fWJetJ8bXPU1sEVniLMIyDAZWeHdV+NTA= golang.org/x/tools v0.13.0 h1:Iey4qkscZuv0VvIt8E0neZjtPVQFSc870HQ448QgEmQ= +golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -1974,6 +2011,7 @@ gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo= gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= gotest.tools/v3 v3.5.1 h1:EENdUnS3pdur5nybKYIh2Vfgc8IUNBjxDPSjtiJcOzU= +gotest.tools/v3 v3.5.1/go.mod h1:isy3WKz7GK6uNw/sbHzfKBLvlvXwUyV06n6brMxxopU= honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/proto/lavanet/lava/spec/api_collection.proto b/proto/lavanet/lava/spec/api_collection.proto index 1dc6b74b92..182cd085f6 100644 --- a/proto/lavanet/lava/spec/api_collection.proto +++ b/proto/lavanet/lava/spec/api_collection.proto @@ -119,14 +119,14 @@ enum FUNCTION_TAG { } enum PARSER_TYPE { - NO_PARSER = 0; - BLOCK_LATEST = 1; - BLOCK_EARLIEST = 2; - RESULT = 3; - EXTENSION_ARG = 4; - IDENTIFIER = 5; - DEFAULT_VALUE = 6; - BLOCK_HASH = 7; + NO_PARSER = 0; // parsing is disabled + BLOCK_LATEST = 1; // parse the latest block + BLOCK_EARLIEST = 2; // parse the earliest block + RESULT = 3; // parse the result of the rpc call + EXTENSION_ARG = 4; // parse the extension argument (e.g. should we turn on an extension or not, based on the parsed value) + IDENTIFIER = 5; // parse the id of the rpc message + DEFAULT_VALUE = 6; // set the default value for the parsed result (currently used for block, after all other generic parsers failed) + BLOCK_HASH = 7; // parse the block hash } enum PARSER_FUNC{ diff --git a/protocol/badgegenerator/tracker.go b/protocol/badgegenerator/tracker.go index aa82c5f90c..c7822bae83 100644 --- a/protocol/badgegenerator/tracker.go +++ b/protocol/badgegenerator/tracker.go @@ -28,11 +28,11 @@ func NewBadgeStateTracker(ctx context.Context, clientCtx cosmosclient.Context, c emergencyTracker, blockNotFoundCallback := statetracker.NewEmergencyTracker(nil) txFactory := tx.Factory{} txFactory = txFactory.WithChainID(chainId) - stateTrackerBase, err := statetracker.NewStateTracker(ctx, txFactory, clientCtx, chainFetcher, blockNotFoundCallback) + sq := updaters.NewStateQuery(ctx, updaters.NewStateQueryAccessInst(clientCtx)) + stateTrackerBase, err := statetracker.NewStateTracker(ctx, txFactory, sq, chainFetcher, blockNotFoundCallback) if err != nil { return nil, err } - sq := updaters.NewStateQuery(ctx, clientCtx) esq := updaters.NewEpochStateQuery(sq) pst := &BadgeStateTracker{StateTracker: stateTrackerBase, stateQuery: esq, ConsumerEmergencyTrackerInf: emergencyTracker} diff --git a/protocol/badgeserver/tracker.go b/protocol/badgeserver/tracker.go index 9b13ddee42..c54326a488 100644 --- a/protocol/badgeserver/tracker.go +++ b/protocol/badgeserver/tracker.go @@ -28,12 +28,12 @@ func NewBadgeStateTracker(ctx context.Context, clientCtx cosmosclient.Context, c emergencyTracker, blockNotFoundCallback := statetracker.NewEmergencyTracker(nil) txFactory := tx.Factory{} txFactory = txFactory.WithChainID(chainId) - stateTrackerBase, err := statetracker.NewStateTracker(ctx, txFactory, clientCtx, chainFetcher, blockNotFoundCallback) + stateQuery := updaters.NewStateQuery(ctx, updaters.NewStateQueryAccessInst(clientCtx)) + stateTrackerBase, err := statetracker.NewStateTracker(ctx, txFactory, stateQuery, chainFetcher, blockNotFoundCallback) if err != nil { return nil, err } - stateTracker := updaters.NewStateQuery(ctx, clientCtx) - epochStateTracker := updaters.NewEpochStateQuery(stateTracker) + epochStateTracker := updaters.NewEpochStateQuery(stateQuery) badgeStateTracker := &BadgeStateTracker{ StateTracker: stateTrackerBase, diff --git a/protocol/chainlib/base_chain_parser.go b/protocol/chainlib/base_chain_parser.go index 13da350da2..00b03ea90d 100644 --- a/protocol/chainlib/base_chain_parser.go +++ b/protocol/chainlib/base_chain_parser.go @@ -3,6 +3,8 @@ package chainlib import ( "errors" "fmt" + "io" + "net/http" "regexp" "strings" "sync" @@ -11,25 +13,37 @@ import ( "github.com/lavanet/lava/v4/protocol/chainlib/extensionslib" "github.com/lavanet/lava/v4/protocol/common" "github.com/lavanet/lava/v4/utils" + "github.com/lavanet/lava/v4/utils/lavaslices" + "github.com/lavanet/lava/v4/utils/maps" epochstorage "github.com/lavanet/lava/v4/x/epochstorage/types" pairingtypes "github.com/lavanet/lava/v4/x/pairing/types" spectypes "github.com/lavanet/lava/v4/x/spec/types" ) +var AllowMissingApisByDefault = true + type PolicyInf interface { GetSupportedAddons(specID string) (addons []string, err error) GetSupportedExtensions(specID string) (extensions []epochstorage.EndpointService, err error) } +type InternalPath struct { + Path string + Enabled bool + ApiInterface string + ConnectionType string + Addon string +} + type BaseChainParser struct { - internalPaths map[string]struct{} + internalPaths map[string]InternalPath taggedApis map[spectypes.FUNCTION_TAG]TaggedContainer spec spectypes.Spec rwLock sync.RWMutex serverApis map[ApiKey]ApiContainer apiCollections map[CollectionKey]*spectypes.ApiCollection headers map[ApiKey]*spectypes.Header - verifications map[VerificationKey][]VerificationContainer + verifications map[VerificationKey]map[string][]VerificationContainer // map[VerificationKey]map[InternalPath][]VerificationContainer allowedAddons map[string]bool extensionParser extensionslib.ExtensionParser active bool @@ -201,7 +215,7 @@ func (bcp *BaseChainParser) SeparateAddonsExtensions(supported []string) (addons } // gets all verifications for an endpoint supporting multiple addons and extensions -func (bcp *BaseChainParser) GetVerifications(supported []string) (retVerifications []VerificationContainer, err error) { +func (bcp *BaseChainParser) GetVerifications(supported []string, internalPath string, apiInterface string) (retVerifications []VerificationContainer, err error) { // addons will contains extensions and addons, // extensions must exist in all verifications, addons must be split because they are separated addons, extensions, err := bcp.SeparateAddonsExtensions(supported) @@ -212,24 +226,27 @@ func (bcp *BaseChainParser) GetVerifications(supported []string) (retVerificatio extensions = []string{""} } addons = append(addons, "") // always add the empty addon + for _, addon := range addons { for _, extension := range extensions { verificationKey := VerificationKey{ Extension: extension, Addon: addon, } - verifications, ok := bcp.verifications[verificationKey] + collectionVerifications, ok := bcp.verifications[verificationKey] if ok { - retVerifications = append(retVerifications, verifications...) + if verifications, ok := collectionVerifications[internalPath]; ok { + retVerifications = append(retVerifications, verifications...) + } } } } - return + return retVerifications, nil } -func (bcp *BaseChainParser) Construct(spec spectypes.Spec, internalPaths map[string]struct{}, taggedApis map[spectypes.FUNCTION_TAG]TaggedContainer, +func (bcp *BaseChainParser) Construct(spec spectypes.Spec, internalPaths map[string]InternalPath, taggedApis map[spectypes.FUNCTION_TAG]TaggedContainer, serverApis map[ApiKey]ApiContainer, apiCollections map[CollectionKey]*spectypes.ApiCollection, headers map[ApiKey]*spectypes.Header, - verifications map[VerificationKey][]VerificationContainer, + verifications map[VerificationKey]map[string][]VerificationContainer, ) { bcp.spec = spec bcp.internalPaths = internalPaths @@ -263,6 +280,31 @@ func (bcp *BaseChainParser) GetParsingByTag(tag spectypes.FUNCTION_TAG) (parsing return val.Parsing, val.ApiCollection, ok } +func (bcp *BaseChainParser) IsTagInCollection(tag spectypes.FUNCTION_TAG, collectionKey CollectionKey) bool { + bcp.rwLock.RLock() + defer bcp.rwLock.RUnlock() + + apiCollection, ok := bcp.apiCollections[collectionKey] + return ok && lavaslices.ContainsPredicate(apiCollection.ParseDirectives, func(elem *spectypes.ParseDirective) bool { + return elem.FunctionTag == tag + }) +} + +func (bcp *BaseChainParser) GetAllInternalPaths() []string { + bcp.rwLock.RLock() + defer bcp.rwLock.RUnlock() + return lavaslices.Map(maps.ValuesSlice(bcp.internalPaths), func(internalPath InternalPath) string { + return internalPath.Path + }) +} + +func (bcp *BaseChainParser) IsInternalPathEnabled(internalPath string, apiInterface string, addon string) bool { + bcp.rwLock.RLock() + defer bcp.rwLock.RUnlock() + internalPathObj, ok := bcp.internalPaths[internalPath] + return ok && internalPathObj.Enabled && internalPathObj.ApiInterface == apiInterface && internalPathObj.Addon == addon +} + func (bcp *BaseChainParser) ExtensionParsing(addon string, parsedMessageArg *baseChainMessageContainer, extensionInfo extensionslib.ExtensionInfo) { if extensionInfo.ExtensionOverride == nil { // consumer side extension parsing. to set the extension based on the latest block and the request @@ -283,6 +325,35 @@ func (bcp *BaseChainParser) extensionParsingInner(addon string, parsedMessageArg bcp.extensionParser.ExtensionParsing(addon, parsedMessageArg, latestBlock) } +func (apip *BaseChainParser) defaultApiContainer(apiKey ApiKey) (*ApiContainer, error) { + // Guard that the GrpcChainParser instance exists + if apip == nil { + return nil, errors.New("ChainParser not defined") + } + utils.LavaFormatDebug("api not supported", utils.Attribute{Key: "apiKey", Value: apiKey}) + apiCont := &ApiContainer{ + api: &spectypes.Api{ + Enabled: true, + Name: "Default-" + apiKey.Name, + ComputeUnits: 20, // set 20 compute units by default + ExtraComputeUnits: 0, + Category: spectypes.SpecCategory{}, + BlockParsing: spectypes.BlockParser{ + ParserFunc: spectypes.PARSER_FUNC_EMPTY, + }, + TimeoutMs: 0, + Parsers: []spectypes.GenericParser{}, + }, + collectionKey: CollectionKey{ + ConnectionType: apiKey.ConnectionType, + InternalPath: apiKey.InternalPath, + Addon: "", + }, + } + + return apiCont, nil +} + // getSupportedApi fetches service api from spec by name func (apip *BaseChainParser) getSupportedApi(apiKey ApiKey) (*ApiContainer, error) { // Guard that the GrpcChainParser instance exists @@ -299,6 +370,9 @@ func (apip *BaseChainParser) getSupportedApi(apiKey ApiKey) (*ApiContainer, erro // Return an error if spec does not exist if !ok { + if AllowMissingApisByDefault { + return apip.defaultApiContainer(apiKey) + } return nil, common.APINotSupportedError } @@ -318,6 +392,55 @@ func (apip *BaseChainParser) isValidInternalPath(path string) bool { return ok } +// take an http request and direct it through the consumer +func (apip *BaseChainParser) ExtractDataFromRequest(request *http.Request) (url string, data string, connectionType string, metadata []pairingtypes.Metadata, err error) { + // Extract relative URL path + url = request.URL.Path + // Extract connection type + connectionType = request.Method + + // Extract metadata + for key, values := range request.Header { + for _, value := range values { + metadata = append(metadata, pairingtypes.Metadata{ + Name: key, + Value: value, + }) + } + } + + // Extract data + if request.Body != nil { + bodyBytes, err := io.ReadAll(request.Body) + if err != nil { + return "", "", "", nil, err + } + data = string(bodyBytes) + } + + return url, data, connectionType, metadata, nil +} + +func (apip *BaseChainParser) SetResponseFromRelayResult(relayResult *common.RelayResult) (*http.Response, error) { + if relayResult == nil { + return nil, errors.New("relayResult is nil") + } + response := &http.Response{ + StatusCode: relayResult.StatusCode, + Header: make(http.Header), + } + + for _, values := range relayResult.Reply.Metadata { + response.Header.Add(values.Name, values.Value) + } + + if relayResult.Reply != nil && relayResult.Reply.Data != nil { + response.Body = io.NopCloser(strings.NewReader(string(relayResult.Reply.Data))) + } + + return response, nil +} + // getSupportedApi fetches service api from spec by name func (apip *BaseChainParser) getApiCollection(connectionType, internalPath, addon string) (*spectypes.ApiCollection, error) { // Guard that the GrpcChainParser instance exists @@ -350,13 +473,23 @@ func (apip *BaseChainParser) getApiCollection(connectionType, internalPath, addo return api, nil } -func getServiceApis(spec spectypes.Spec, rpcInterface string) (retInternalPaths map[string]struct{}, retServerApis map[ApiKey]ApiContainer, retTaggedApis map[spectypes.FUNCTION_TAG]TaggedContainer, retApiCollections map[CollectionKey]*spectypes.ApiCollection, retHeaders map[ApiKey]*spectypes.Header, retVerifications map[VerificationKey][]VerificationContainer) { - retInternalPaths = map[string]struct{}{} +func getServiceApis( + spec spectypes.Spec, + rpcInterface string, +) ( + retInternalPaths map[string]InternalPath, + retServerApis map[ApiKey]ApiContainer, + retTaggedApis map[spectypes.FUNCTION_TAG]TaggedContainer, + retApiCollections map[CollectionKey]*spectypes.ApiCollection, + retHeaders map[ApiKey]*spectypes.Header, + retVerifications map[VerificationKey]map[string][]VerificationContainer, +) { + retInternalPaths = map[string]InternalPath{} serverApis := map[ApiKey]ApiContainer{} taggedApis := map[spectypes.FUNCTION_TAG]TaggedContainer{} headers := map[ApiKey]*spectypes.Header{} apiCollections := map[CollectionKey]*spectypes.ApiCollection{} - verifications := map[VerificationKey][]VerificationContainer{} + verifications := map[VerificationKey]map[string][]VerificationContainer{} if spec.Enabled { for _, apiCollection := range spec.ApiCollections { if !apiCollection.Enabled { @@ -372,12 +505,30 @@ func getServiceApis(spec spectypes.Spec, rpcInterface string) (retInternalPaths } // add as a valid internal path - retInternalPaths[apiCollection.CollectionData.InternalPath] = struct{}{} + retInternalPaths[apiCollection.CollectionData.InternalPath] = InternalPath{ + Path: apiCollection.CollectionData.InternalPath, + Enabled: apiCollection.Enabled, + ApiInterface: apiCollection.CollectionData.ApiInterface, + ConnectionType: apiCollection.CollectionData.Type, + Addon: apiCollection.CollectionData.AddOn, + } for _, parsing := range apiCollection.ParseDirectives { - taggedApis[parsing.FunctionTag] = TaggedContainer{ - Parsing: parsing, - ApiCollection: apiCollection, + // We do this because some specs may have multiple parse directives + // with the same tag - SUBSCRIBE (like in Solana). + // + // Since the function tag is not used for handling the subscription flow, + // we can ignore the extra parse directives and take only the first one. The + // subscription flow is handled by the consumer websocket manager and the chain router + // that uses the api collection to fetch the correct parse directive. + // + // The only place the SUBSCRIBE tag is checked against the taggedApis map is in the chain parser with GetParsingByTag. + // But there, we're not interested in the parse directive, only if the tag is present. + if _, ok := taggedApis[parsing.FunctionTag]; !ok { + taggedApis[parsing.FunctionTag] = TaggedContainer{ + Parsing: parsing, + ApiCollection: apiCollection, + } } } @@ -385,7 +536,7 @@ func getServiceApis(spec spectypes.Spec, rpcInterface string) (retInternalPaths if !api.Enabled { continue } - // + // TODO: find a better spot for this (more optimized, precompile regex, etc) if rpcInterface == spectypes.APIInterfaceRest { re := regexp.MustCompile(`{[^}]+}`) @@ -455,6 +606,7 @@ func getServiceApis(spec spectypes.Spec, rpcInterface string) (retInternalPaths } verCont := VerificationContainer{ + InternalPath: apiCollection.CollectionData.InternalPath, ConnectionType: apiCollection.CollectionData.Type, Name: verification.Name, ParseDirective: *verification.ParseDirective, @@ -464,10 +616,13 @@ func getServiceApis(spec spectypes.Spec, rpcInterface string) (retInternalPaths Severity: parseValue.Severity, } + internalPath := apiCollection.CollectionData.InternalPath if extensionVerifications, ok := verifications[verificationKey]; !ok { - verifications[verificationKey] = []VerificationContainer{verCont} + verifications[verificationKey] = map[string][]VerificationContainer{internalPath: {verCont}} + } else if collectionVerifications, ok := extensionVerifications[internalPath]; !ok { + verifications[verificationKey][internalPath] = []VerificationContainer{verCont} } else { - verifications[verificationKey] = append(extensionVerifications, verCont) + verifications[verificationKey][internalPath] = append(collectionVerifications, verCont) } } } diff --git a/protocol/chainlib/base_chain_parser_test.go b/protocol/chainlib/base_chain_parser_test.go new file mode 100644 index 0000000000..8339cd2688 --- /dev/null +++ b/protocol/chainlib/base_chain_parser_test.go @@ -0,0 +1,142 @@ +package chainlib + +import ( + reflect "reflect" + "strconv" + "testing" + + "github.com/lavanet/lava/v4/protocol/chainlib/extensionslib" + spectypes "github.com/lavanet/lava/v4/x/spec/types" + "github.com/stretchr/testify/require" +) + +func TestGetVerifications(t *testing.T) { + verifications := map[VerificationKey]map[string][]VerificationContainer{ + { + Extension: "", + Addon: "", + }: { + "/x": { + {InternalPath: "/x"}, + }, + "": { + {InternalPath: ""}, + }, + }, + { + Extension: "", + Addon: "addon1", + }: { + "/x": { + {InternalPath: "/x"}, + }, + "": { + {InternalPath: ""}, + }, + }, + { + Extension: "ext1", + Addon: "addon1", + }: { + "/x": { + {InternalPath: "/x"}, + }, + "": { + {InternalPath: ""}, + }, + }, + { + Extension: "ext1", + Addon: "", + }: { + "/x": { + {InternalPath: "/x"}, + }, + "": { + {InternalPath: ""}, + }, + }, + } + + playBook := []struct { + Extension string + Addon string + InternalPath string + }{ + { + Extension: "", + Addon: "", + InternalPath: "", + }, + { + Extension: "", + Addon: "", + InternalPath: "/x", + }, + { + Extension: "ext1", + Addon: "addon1", + InternalPath: "", + }, + { + Extension: "ext1", + Addon: "addon1", + InternalPath: "/x", + }, + { + Extension: "", + Addon: "addon1", + InternalPath: "", + }, + { + Extension: "", + Addon: "addon1", + InternalPath: "/x", + }, + { + Extension: "ext1", + Addon: "", + InternalPath: "", + }, + { + Extension: "ext1", + Addon: "", + InternalPath: "/x", + }, + } + + baseChainParser := BaseChainParser{ + verifications: verifications, + allowedAddons: map[string]bool{"addon1": true}, + } + baseChainParser.extensionParser = extensionslib.NewExtensionParser(map[string]struct{}{"ext1": {}}, nil) + + for idx, play := range playBook { + for _, apiInterface := range []string{spectypes.APIInterfaceJsonRPC, spectypes.APIInterfaceTendermintRPC, spectypes.APIInterfaceRest, spectypes.APIInterfaceGrpc} { + t.Run("GetVerifications "+strconv.Itoa(idx), func(t *testing.T) { + var supported []string + if play.Extension == "" && play.Addon == "" { + supported = []string{""} + } else if play.Extension == "" { + supported = []string{play.Addon} + } else if play.Addon == "" { + supported = []string{play.Extension} + } else { + supported = []string{play.Extension, play.Addon} + } + + actualVerifications, err := baseChainParser.GetVerifications(supported, play.InternalPath, apiInterface) + require.NoError(t, err) + + expectedVerificationKey := VerificationKey{Extension: play.Extension, Addon: play.Addon} + expectedVerifications := verifications[expectedVerificationKey][play.InternalPath] + // add the empty addon to the expected verifications + if play.Addon != "" { + expectedVerificationKey.Addon = "" + expectedVerifications = append(expectedVerifications, verifications[expectedVerificationKey][play.InternalPath]...) + } + require.True(t, reflect.DeepEqual(expectedVerifications, actualVerifications), "expected: %v, actual: %v", expectedVerifications, actualVerifications) + }) + } + } +} diff --git a/protocol/chainlib/chain_fetcher.go b/protocol/chainlib/chain_fetcher.go index a0a8fde139..920cab724b 100644 --- a/protocol/chainlib/chain_fetcher.go +++ b/protocol/chainlib/chain_fetcher.go @@ -49,7 +49,7 @@ func (cf *ChainFetcher) FetchEndpoint() lavasession.RPCProviderEndpoint { func (cf *ChainFetcher) Validate(ctx context.Context) error { for _, url := range cf.endpoint.NodeUrls { addons := url.Addons - verifications, err := cf.chainParser.GetVerifications(addons) + verifications, err := cf.chainParser.GetVerifications(addons, url.InternalPath, cf.endpoint.ApiInterface) if err != nil { return err } @@ -121,6 +121,26 @@ func (cf *ChainFetcher) populateCache(relayData *pairingtypes.RelayPrivateData, } } +func getExtensionsForVerification(verification VerificationContainer, chainParser ChainParser) []string { + extensions := []string{verification.Extension} + + collectionKey := CollectionKey{ + InternalPath: verification.InternalPath, + Addon: verification.Addon, + ConnectionType: verification.ConnectionType, + } + + if chainParser.IsTagInCollection(spectypes.FUNCTION_TAG_SUBSCRIBE, collectionKey) { + if verification.Extension == "" { + extensions = []string{WebSocketExtension} + } else { + extensions = append(extensions, WebSocketExtension) + } + } + + return extensions +} + func (cf *ChainFetcher) Verify(ctx context.Context, verification VerificationContainer, latestBlock uint64) error { parsing := &verification.ParseDirective @@ -167,17 +187,27 @@ func (cf *ChainFetcher) Verify(ctx context.Context, verification VerificationCon } } - chainMessage, err := CraftChainMessage(parsing, collectionType, cf.chainParser, &CraftData{Path: path, Data: data, ConnectionType: collectionType}, cf.ChainFetcherMetadata()) + craftData := &CraftData{Path: path, Data: data, ConnectionType: collectionType, InternalPath: verification.InternalPath} + chainMessage, err := CraftChainMessage(parsing, collectionType, cf.chainParser, craftData, cf.ChainFetcherMetadata()) if err != nil { return utils.LavaFormatError("[-] verify failed creating chainMessage", err, []utils.Attribute{{Key: "chainID", Value: cf.endpoint.ChainID}, {Key: "APIInterface", Value: cf.endpoint.ApiInterface}}...) } - reply, _, _, proxyUrl, chainId, err := cf.chainRouter.SendNodeMsg(ctx, nil, chainMessage, []string{verification.Extension}) + extensions := getExtensionsForVerification(verification, cf.chainParser) + + reply, _, _, proxyUrl, chainId, err := cf.chainRouter.SendNodeMsg(ctx, nil, chainMessage, extensions) if err != nil { - return utils.LavaFormatWarning("[-] verify failed sending chainMessage", err, []utils.Attribute{{Key: "chainID", Value: cf.endpoint.ChainID}, {Key: "APIInterface", Value: cf.endpoint.ApiInterface}}...) + return utils.LavaFormatWarning("[-] verify failed sending chainMessage", err, + utils.LogAttr("chainID", cf.endpoint.ChainID), + utils.LogAttr("APIInterface", cf.endpoint.ApiInterface), + utils.LogAttr("extensions", extensions), + ) } if reply == nil || reply.RelayReply == nil { - return utils.LavaFormatWarning("[-] verify failed sending chainMessage, reply or reply.RelayReply are nil", nil, []utils.Attribute{{Key: "chainID", Value: cf.endpoint.ChainID}, {Key: "APIInterface", Value: cf.endpoint.ApiInterface}}...) + return utils.LavaFormatWarning("[-] verify failed sending chainMessage, reply or reply.RelayReply are nil", nil, + utils.LogAttr("chainID", cf.endpoint.ChainID), + utils.LogAttr("APIInterface", cf.endpoint.ApiInterface), + ) } parserInput, err := FormatResponseForParsing(reply.RelayReply, chainMessage) @@ -190,7 +220,18 @@ func (cf *ChainFetcher) Verify(ctx context.Context, verification VerificationCon parsedInput := parser.ParseBlockFromReply(parserInput, parsing.ResultParsing, parsing.Parsers) if parsedInput.GetRawParsedData() == "" { - return utils.LavaFormatWarning("[-] verify failed to parse result", err, + return utils.LavaFormatWarning("[-] verify failed to parse result", nil, + utils.LogAttr("chainId", chainId), + utils.LogAttr("nodeUrl", proxyUrl.Url), + utils.LogAttr("Method", parsing.GetApiName()), + utils.LogAttr("Response", string(reply.RelayReply.Data)), + ) + } + + parserError := parsedInput.GetParserError() + if parserError != "" { + return utils.LavaFormatWarning("[-] parser returned an error", nil, + utils.LogAttr("error", parserError), utils.LogAttr("chainId", chainId), utils.LogAttr("nodeUrl", proxyUrl.Url), utils.LogAttr("Method", parsing.GetApiName()), @@ -200,7 +241,7 @@ func (cf *ChainFetcher) Verify(ctx context.Context, verification VerificationCon if verification.LatestDistance != 0 && latestBlock != 0 && verification.ParseDirective.FunctionTag != spectypes.FUNCTION_TAG_GET_BLOCK_BY_NUM { parsedResultAsNumber := parsedInput.GetBlock() if parsedResultAsNumber == spectypes.NOT_APPLICABLE { - return utils.LavaFormatWarning("[-] verify failed to parse result as number", err, + return utils.LavaFormatWarning("[-] verify failed to parse result as number", nil, utils.LogAttr("chainId", chainId), utils.LogAttr("nodeUrl", proxyUrl.Url), utils.LogAttr("Method", parsing.GetApiName()), @@ -210,7 +251,7 @@ func (cf *ChainFetcher) Verify(ctx context.Context, verification VerificationCon } uint64ParsedResultAsNumber := uint64(parsedResultAsNumber) if uint64ParsedResultAsNumber > latestBlock { - return utils.LavaFormatWarning("[-] verify failed parsed result is greater than latestBlock", err, + return utils.LavaFormatWarning("[-] verify failed parsed result is greater than latestBlock", nil, utils.LogAttr("chainId", chainId), utils.LogAttr("nodeUrl", proxyUrl.Url), utils.LogAttr("Method", parsing.GetApiName()), @@ -219,7 +260,7 @@ func (cf *ChainFetcher) Verify(ctx context.Context, verification VerificationCon ) } if latestBlock-uint64ParsedResultAsNumber < verification.LatestDistance { - return utils.LavaFormatWarning("[-] verify failed expected block distance is not sufficient", err, + return utils.LavaFormatWarning("[-] verify failed expected block distance is not sufficient", nil, utils.LogAttr("chainId", chainId), utils.LogAttr("nodeUrl", proxyUrl.Url), utils.LogAttr("Method", parsing.GetApiName()), @@ -233,7 +274,7 @@ func (cf *ChainFetcher) Verify(ctx context.Context, verification VerificationCon if verification.Value != "*" && verification.Value != "" && verification.ParseDirective.FunctionTag != spectypes.FUNCTION_TAG_GET_BLOCK_BY_NUM { rawData := parsedInput.GetRawParsedData() if rawData != verification.Value { - return utils.LavaFormatWarning("[-] verify failed expected and received are different", err, + return utils.LavaFormatWarning("[-] verify failed expected and received are different", nil, utils.LogAttr("chainId", chainId), utils.LogAttr("nodeUrl", proxyUrl.Url), utils.LogAttr("rawParsedBlock", rawData), @@ -245,6 +286,7 @@ func (cf *ChainFetcher) Verify(ctx context.Context, verification VerificationCon ) } } + utils.LavaFormatInfo("[+] verified successfully", utils.LogAttr("chainId", chainId), utils.LogAttr("nodeUrl", proxyUrl.Url), @@ -253,6 +295,7 @@ func (cf *ChainFetcher) Verify(ctx context.Context, verification VerificationCon utils.LogAttr("rawData", parsedInput.GetRawParsedData()), utils.LogAttr("verificationKey", verification.VerificationKey), utils.LogAttr("apiInterface", cf.endpoint.ApiInterface), + utils.LogAttr("internalPath", proxyUrl.InternalPath), ) return nil } @@ -455,7 +498,7 @@ type DummyChainFetcher struct { func (cf *DummyChainFetcher) Validate(ctx context.Context) error { for _, url := range cf.endpoint.NodeUrls { addons := url.Addons - verifications, err := cf.chainParser.GetVerifications(addons) + verifications, err := cf.chainParser.GetVerifications(addons, url.InternalPath, cf.endpoint.ApiInterface) if err != nil { return err } diff --git a/protocol/chainlib/chain_message.go b/protocol/chainlib/chain_message.go index fdd33ea80c..39c0110c95 100644 --- a/protocol/chainlib/chain_message.go +++ b/protocol/chainlib/chain_message.go @@ -31,6 +31,7 @@ type baseChainMessageContainer struct { timeoutOverride time.Duration forceCacheRefresh bool parseDirective *spectypes.ParseDirective // setting the parse directive related to the api, can be nil + usedDefaultValue bool inputHashCache []byte // resultErrorParsingMethod passed by each api interface message to parse the result of the message @@ -38,6 +39,18 @@ type baseChainMessageContainer struct { resultErrorParsingMethod func(data []byte, httpStatusCode int) (hasError bool, errorMessage string) } +func (bcmc *baseChainMessageContainer) UpdateEarliestInMessage(incomingEarliest int64) bool { + updatedSuccessfully := false + if bcmc.earliestRequestedBlock != spectypes.EARLIEST_BLOCK { + // check earliest is not unset (0) or incoming is lower than current value + if bcmc.earliestRequestedBlock == 0 || bcmc.earliestRequestedBlock > incomingEarliest { + bcmc.earliestRequestedBlock = incomingEarliest + updatedSuccessfully = true + } + } + return updatedSuccessfully +} + func (bcnc *baseChainMessageContainer) GetRequestedBlocksHashes() []string { return bcnc.requestedBlockHashes } @@ -158,6 +171,10 @@ func (bcnc *baseChainMessageContainer) OverrideExtensions(extensionNames []strin } } +func (bcnc *baseChainMessageContainer) GetUsedDefaultValue() bool { + return bcnc.usedDefaultValue +} + func (bcnc *baseChainMessageContainer) SetExtension(extension *spectypes.Extension) { if len(bcnc.extensions) > 0 { for _, ext := range bcnc.extensions { @@ -183,6 +200,7 @@ type CraftData struct { Path string Data []byte ConnectionType string + InternalPath string } func CraftChainMessage(parsing *spectypes.ParseDirective, connectionType string, chainParser ChainParser, craftData *CraftData, metadata []pairingtypes.Metadata) (ChainMessageForSend, error) { diff --git a/protocol/chainlib/chain_message_test.go b/protocol/chainlib/chain_message_test.go new file mode 100644 index 0000000000..5e988623b7 --- /dev/null +++ b/protocol/chainlib/chain_message_test.go @@ -0,0 +1,106 @@ +package chainlib + +import ( + "fmt" + "net/http" + "testing" + + testcommon "github.com/lavanet/lava/v4/testutil/common" + "github.com/lavanet/lava/v4/x/spec/types" + "github.com/stretchr/testify/require" +) + +func TestCraftChainMessage(t *testing.T) { + type play struct { + apiInterface string + craftData *CraftData + } + + expectedInternalPath := "/x" + method := "banana" + + playBook := []play{ + { + apiInterface: types.APIInterfaceJsonRPC, + craftData: &CraftData{ + Path: method, + Data: []byte(fmt.Sprintf(`{"jsonrpc":"2.0","method":"%s","params":[],"id":1}`, method)), + ConnectionType: http.MethodPost, + InternalPath: expectedInternalPath, + }, + }, + { + apiInterface: types.APIInterfaceTendermintRPC, + craftData: &CraftData{ + Path: method, + Data: []byte(fmt.Sprintf(`{"jsonrpc":"2.0","method":"%s","params":[],"id":1}`, method)), + ConnectionType: "", + InternalPath: expectedInternalPath, + }, + }, + { + apiInterface: types.APIInterfaceRest, + craftData: &CraftData{ + Data: []byte(method), + ConnectionType: http.MethodGet, + InternalPath: expectedInternalPath, + }, + }, + { + apiInterface: types.APIInterfaceRest, + craftData: &CraftData{ + Path: method, + Data: []byte(`{"data":"banana"}`), + ConnectionType: http.MethodPost, + InternalPath: expectedInternalPath, + }, + }, + { + apiInterface: types.APIInterfaceGrpc, + craftData: &CraftData{ + Path: method, + ConnectionType: "", + InternalPath: expectedInternalPath, + }, + }, + } + + for _, play := range playBook { + runName := play.apiInterface + if play.craftData.ConnectionType != "" { + runName += "_" + play.craftData.ConnectionType + } + + t.Run(runName, func(t *testing.T) { + chainParser, err := NewChainParser(play.apiInterface) + require.NoError(t, err) + + spec := testcommon.CreateMockSpec() + spec.ApiCollections = []*types.ApiCollection{ + { + Enabled: true, + CollectionData: types.CollectionData{ + ApiInterface: play.apiInterface, + Type: play.craftData.ConnectionType, + InternalPath: expectedInternalPath, + }, + Apis: []*types.Api{ + { + Name: method, + ComputeUnits: 100, + Enabled: true, + }, + }, + }, + } + chainParser.SetSpec(spec) + + chainMsg, err := CraftChainMessage(&types.ParseDirective{ApiName: method}, play.craftData.ConnectionType, chainParser, play.craftData, nil) + require.NoError(t, err) + require.NotNil(t, chainMsg) + + internalPath := chainMsg.GetApiCollection().CollectionData.InternalPath + require.Equal(t, expectedInternalPath, internalPath) + }) + } +} diff --git a/protocol/chainlib/chain_router.go b/protocol/chainlib/chain_router.go index 4237965d3c..9c07a6bdbb 100644 --- a/protocol/chainlib/chain_router.go +++ b/protocol/chainlib/chain_router.go @@ -2,7 +2,6 @@ package chainlib import ( "context" - "net/url" "strings" "sync" @@ -16,11 +15,6 @@ import ( "google.golang.org/grpc/metadata" ) -type MethodRoute struct { - lavasession.RouterKey - method string -} - type chainRouterEntry struct { ChainProxy addonsSupported map[string]struct{} @@ -39,16 +33,18 @@ func (cre *chainRouterEntry) isSupporting(addon string) bool { type chainRouterImpl struct { lock *sync.RWMutex - chainProxyRouter map[lavasession.RouterKey][]chainRouterEntry + chainProxyRouter map[string][]chainRouterEntry // key is routing key } -func (cri *chainRouterImpl) GetChainProxySupporting(ctx context.Context, addon string, extensions []string, method string) (ChainProxy, error) { +func (cri *chainRouterImpl) GetChainProxySupporting(ctx context.Context, addon string, extensions []string, method string, internalPath string) (ChainProxy, error) { cri.lock.RLock() defer cri.lock.RUnlock() // check if that specific method has a special route, if it does apply it to the router key wantedRouterKey := lavasession.NewRouterKey(extensions) - if chainProxyEntries, ok := cri.chainProxyRouter[wantedRouterKey]; ok { + wantedRouterKey.ApplyInternalPath(internalPath) + wantedRouterKeyStr := wantedRouterKey.String() + if chainProxyEntries, ok := cri.chainProxyRouter[wantedRouterKeyStr]; ok { for _, chainRouterEntry := range chainProxyEntries { if chainRouterEntry.isSupporting(addon) { // check if the method is supported @@ -58,38 +54,43 @@ func (cri *chainRouterImpl) GetChainProxySupporting(ctx context.Context, addon s } utils.LavaFormatTrace("chainProxy supporting method routing selected", utils.LogAttr("addon", addon), - utils.LogAttr("wantedRouterKey", wantedRouterKey), + utils.LogAttr("wantedRouterKey", wantedRouterKeyStr), utils.LogAttr("method", method), ) } - if wantedRouterKey != lavasession.GetEmptyRouterKey() { // add trailer only when router key is not default (||) - grpc.SetTrailer(ctx, metadata.Pairs(RPCProviderNodeExtension, string(wantedRouterKey))) + if wantedRouterKeyStr != lavasession.GetEmptyRouterKey().String() { // add trailer only when router key is not default (||) + grpc.SetTrailer(ctx, metadata.Pairs(RPCProviderNodeExtension, wantedRouterKeyStr)) } return chainRouterEntry.ChainProxy, nil } utils.LavaFormatTrace("chainProxy supporting extensions but not supporting addon", utils.LogAttr("addon", addon), - utils.LogAttr("wantedRouterKey", wantedRouterKey), + utils.LogAttr("wantedRouterKey", wantedRouterKeyStr), ) } // no support for this addon return nil, utils.LavaFormatError("no chain proxy supporting requested addon", nil, utils.Attribute{Key: "addon", Value: addon}) } // no support for these extensions - return nil, utils.LavaFormatError("no chain proxy supporting requested extensions", nil, utils.Attribute{Key: "extensions", Value: extensions}) + return nil, utils.LavaFormatError("no chain proxy supporting requested extensions and internal path", nil, + utils.LogAttr("extensions", extensions), + utils.LogAttr("internalPath", internalPath), + utils.LogAttr("supported", cri.chainProxyRouter), + ) } -func (cri chainRouterImpl) ExtensionsSupported(extensions []string) bool { +func (cri chainRouterImpl) ExtensionsSupported(internalPath string, extensions []string) bool { routerKey := lavasession.NewRouterKey(extensions) - _, ok := cri.chainProxyRouter[routerKey] + routerKey.ApplyInternalPath(internalPath) + _, ok := cri.chainProxyRouter[routerKey.String()] return ok } func (cri chainRouterImpl) SendNodeMsg(ctx context.Context, ch chan interface{}, chainMessage ChainMessageForSend, extensions []string) (relayReply *RelayReplyWrapper, subscriptionID string, relayReplyServer *rpcclient.ClientSubscription, proxyUrl common.NodeUrl, chainId string, err error) { // add the parsed addon from the apiCollection addon := chainMessage.GetApiCollection().CollectionData.AddOn - selectedChainProxy, err := cri.GetChainProxySupporting(ctx, addon, extensions, chainMessage.GetApi().Name) + selectedChainProxy, err := cri.GetChainProxySupporting(ctx, addon, extensions, chainMessage.GetApi().Name, chainMessage.GetApiCollection().CollectionData.InternalPath) if err != nil { return nil, "", nil, common.NodeUrl{}, "", err } @@ -98,11 +99,74 @@ func (cri chainRouterImpl) SendNodeMsg(ctx context.Context, ch chan interface{}, return relayReply, subscriptionID, relayReplyServer, proxyUrl, chainId, err } +func (cri *chainRouterImpl) autoGenerateMissingInternalPaths(isWs bool, nodeUrl common.NodeUrl, routerKey lavasession.RouterKey, autoGeneratedInternalPaths map[string]struct{}, rpcProviderEndpoint lavasession.RPCProviderEndpoint, chainParser ChainParser, returnedBatch map[string]lavasession.RPCProviderEndpoint) error { + baseUrl := nodeUrl.Url + for _, internalPath := range chainParser.GetAllInternalPaths() { + if internalPath == "" { + // skip the root since we've already added it + continue + } + + autoGeneratedInternalPaths[internalPath] = struct{}{} + + nodeUrl.InternalPath = internalPath // add internal path to the nodeUrl + nodeUrl.Url = baseUrl + internalPath + routerKey.ApplyInternalPath(internalPath) + + addons, _, err := chainParser.SeparateAddonsExtensions(nodeUrl.Addons) + if err != nil { + return err + } + + subscriptionTagFound := func() bool { + for _, connectionType := range []string{"POST", ""} { + if len(addons) == 0 { + addons = append(addons, "") + } + + for _, addon := range addons { + // check subscription exists, we only care for subscription API's because otherwise we use http anyway. + collectionKey := CollectionKey{ + InternalPath: internalPath, + Addon: addon, + ConnectionType: connectionType, + } + + if chainParser.IsTagInCollection(spectypes.FUNCTION_TAG_SUBSCRIBE, collectionKey) { + return true + } + } + } + return false + }() + + if isWs && !subscriptionTagFound { + // this is ws, don't auto generate http paths + continue + } else if !isWs && subscriptionTagFound { + // this is http, don't auto generate ws paths + continue + } + + utils.LavaFormatDebug("auto generated internal path", + utils.LogAttr("nodeUrl", nodeUrl.Url), + utils.LogAttr("internalPath", internalPath), + utils.LogAttr("routerKey", routerKey.String()), + ) + cri.setRouterKeyInBatch(nodeUrl, returnedBatch, routerKey, rpcProviderEndpoint, false) // will not override existing entries + } + + return nil +} + // batch nodeUrls with the same addons together in a copy -func (cri *chainRouterImpl) BatchNodeUrlsByServices(rpcProviderEndpoint lavasession.RPCProviderEndpoint) (map[lavasession.RouterKey]lavasession.RPCProviderEndpoint, error) { - returnedBatch := map[lavasession.RouterKey]lavasession.RPCProviderEndpoint{} - routesToCheck := map[lavasession.RouterKey]bool{} +func (cri *chainRouterImpl) BatchNodeUrlsByServices(rpcProviderEndpoint lavasession.RPCProviderEndpoint, chainParser ChainParser) (map[string]lavasession.RPCProviderEndpoint, error) { + returnedBatch := map[string]lavasession.RPCProviderEndpoint{} + routesToCheck := map[string]bool{} methodRoutes := map[string]int{} + httpRootRouteSet := false + autoGeneratedInternalPaths := map[string]struct{}{} + for _, nodeUrl := range rpcProviderEndpoint.NodeUrls { routerKey := lavasession.NewRouterKey(nodeUrl.Addons) if len(nodeUrl.Methods) > 0 { @@ -114,10 +178,36 @@ func (cri *chainRouterImpl) BatchNodeUrlsByServices(rpcProviderEndpoint lavasess methodRoutes[methodRoutesUnique] = len(methodRoutes) existing = len(methodRoutes) } - routerKey = routerKey.ApplyMethodsRoute(existing) + routerKey.ApplyMethodsRoute(existing) + } + routerKey.ApplyInternalPath(nodeUrl.InternalPath) + isWs, err := IsUrlWebSocket(nodeUrl.Url) + // Some parsing may fail because of gRPC + if err == nil && isWs { + // now change the router key to fit the websocket extension key. + nodeUrl.Addons = append(nodeUrl.Addons, WebSocketExtension) + routerKey.SetExtensions(nodeUrl.Addons) + } + + _, isAlreadyAutoGenerated := autoGeneratedInternalPaths[nodeUrl.InternalPath] + cri.setRouterKeyInBatch(nodeUrl, returnedBatch, routerKey, rpcProviderEndpoint, isAlreadyAutoGenerated) // will override existing entries + + if nodeUrl.InternalPath == "" { // root path + if !isWs { + httpRootRouteSet = true + } + + err = cri.autoGenerateMissingInternalPaths(isWs, nodeUrl, routerKey, autoGeneratedInternalPaths, rpcProviderEndpoint, chainParser, returnedBatch) + if err != nil { + return nil, err + } } - cri.parseNodeUrl(nodeUrl, returnedBatch, routerKey, rpcProviderEndpoint) } + + if !httpRootRouteSet && chainParser.IsInternalPathEnabled("", rpcProviderEndpoint.ApiInterface, "") { + return nil, utils.LavaFormatError("HTTP/HTTPS is mandatory. It is recommended to configure both HTTP/HTTP and WS/WSS.", nil, utils.LogAttr("nodeUrls", rpcProviderEndpoint.NodeUrls)) + } + if len(returnedBatch) == 0 { return nil, utils.LavaFormatError("invalid batch, routes are empty", nil, utils.LogAttr("endpoint", rpcProviderEndpoint)) } @@ -127,52 +217,45 @@ func (cri *chainRouterImpl) BatchNodeUrlsByServices(rpcProviderEndpoint lavasess return nil, utils.LavaFormatError("invalid batch, missing regular route for method route", nil, utils.LogAttr("routerKey", routerKey)) } } + utils.LavaFormatDebug("batched nodeUrls by services", utils.LogAttr("batch", returnedBatch)) return returnedBatch, nil } -func (*chainRouterImpl) parseNodeUrl(nodeUrl common.NodeUrl, returnedBatch map[lavasession.RouterKey]lavasession.RPCProviderEndpoint, routerKey lavasession.RouterKey, rpcProviderEndpoint lavasession.RPCProviderEndpoint) { - u, err := url.Parse(nodeUrl.Url) - // Some parsing may fail because of gRPC - if err == nil && (u.Scheme == "ws" || u.Scheme == "wss") { - // if websocket, check if we have a router key for http already. if not add a websocket router key - // so in case we didn't get an http endpoint, we can use the ws one. - if _, ok := returnedBatch[routerKey]; !ok { - returnedBatch[routerKey] = lavasession.RPCProviderEndpoint{ - NetworkAddress: rpcProviderEndpoint.NetworkAddress, - ChainID: rpcProviderEndpoint.ChainID, - ApiInterface: rpcProviderEndpoint.ApiInterface, - Geolocation: rpcProviderEndpoint.Geolocation, - NodeUrls: []common.NodeUrl{nodeUrl}, - } - } - // now change the router key to fit the websocket extension key. - nodeUrl.Addons = append(nodeUrl.Addons, WebSocketExtension) - routerKey = lavasession.NewRouterKey(nodeUrl.Addons) - } - - if existingEndpoint, ok := returnedBatch[routerKey]; !ok { - returnedBatch[routerKey] = lavasession.RPCProviderEndpoint{ +func (*chainRouterImpl) setRouterKeyInBatch(nodeUrl common.NodeUrl, returnedBatch map[string]lavasession.RPCProviderEndpoint, routerKey lavasession.RouterKey, rpcProviderEndpoint lavasession.RPCProviderEndpoint, overrideExistingEntry bool) { + // if the router key does not exit, create it anyway + // if we need to override, override + // if it exists and we should not override, add the node url to the existing list + routerKeyString := routerKey.String() + if existingEndpoint, ok := returnedBatch[routerKeyString]; ok && !overrideExistingEntry { + // setting the incoming url first as it might be http while existing is websocket. (we prioritize http over ws when possible) + returnedBatch[routerKeyString] = lavasession.RPCProviderEndpoint{ NetworkAddress: rpcProviderEndpoint.NetworkAddress, ChainID: rpcProviderEndpoint.ChainID, ApiInterface: rpcProviderEndpoint.ApiInterface, Geolocation: rpcProviderEndpoint.Geolocation, - NodeUrls: []common.NodeUrl{nodeUrl}, + NodeUrls: append([]common.NodeUrl{nodeUrl}, existingEndpoint.NodeUrls...), } - } else { - // setting the incoming url first as it might be http while existing is websocket. (we prioritize http over ws when possible) - existingEndpoint.NodeUrls = append([]common.NodeUrl{nodeUrl}, existingEndpoint.NodeUrls...) - returnedBatch[routerKey] = existingEndpoint + + return + } + + returnedBatch[routerKeyString] = lavasession.RPCProviderEndpoint{ + NetworkAddress: rpcProviderEndpoint.NetworkAddress, + ChainID: rpcProviderEndpoint.ChainID, + ApiInterface: rpcProviderEndpoint.ApiInterface, + Geolocation: rpcProviderEndpoint.Geolocation, + NodeUrls: []common.NodeUrl{nodeUrl}, } } func newChainRouter(ctx context.Context, nConns uint, rpcProviderEndpoint lavasession.RPCProviderEndpoint, chainParser ChainParser, proxyConstructor func(context.Context, uint, lavasession.RPCProviderEndpoint, ChainParser) (ChainProxy, error)) (*chainRouterImpl, error) { - chainProxyRouter := map[lavasession.RouterKey][]chainRouterEntry{} + chainProxyRouter := map[string][]chainRouterEntry{} cri := chainRouterImpl{ lock: &sync.RWMutex{}, } - requiredMap := map[requirementSt]struct{}{} - supportedMap := map[requirementSt]struct{}{} - rpcProviderEndpointBatch, err := cri.BatchNodeUrlsByServices(rpcProviderEndpoint) + requiredMap := map[string]struct{}{} // key is requirement + supportedMap := map[string]requirement{} // key is requirement + rpcProviderEndpointBatch, err := cri.BatchNodeUrlsByServices(rpcProviderEndpoint, chainParser) if err != nil { return nil, err } @@ -185,19 +268,21 @@ func newChainRouter(ctx context.Context, nConns uint, rpcProviderEndpoint lavase // this function calculated all routing combinations and populates them for verification at the end of the function updateRouteCombinations := func(extensions, addons []string) (fullySupportedRouterKey lavasession.RouterKey) { allExtensionsRouterKey := lavasession.NewRouterKey(extensions) - requirement := requirementSt{ - extensions: allExtensionsRouterKey, - addon: "", + requirement := requirement{ + RouterKey: allExtensionsRouterKey, + addon: "", } for _, addon := range addons { populateRequiredForAddon(addon, extensions, requiredMap) requirement.addon = addon - supportedMap[requirement] = struct{}{} + supportedMap[requirement.String()] = requirement addonsSupportedMap[addon] = struct{}{} } return allExtensionsRouterKey } routerKey := updateRouteCombinations(extensions, addons) + routerKey.ApplyInternalPath(rpcProviderEndpointEntry.NodeUrls[0].InternalPath) + routerKeyStr := routerKey.String() methodsRouted := map[string]struct{}{} methods := rpcProviderEndpointEntry.NodeUrls[0].Methods if len(methods) > 0 { @@ -216,14 +301,14 @@ func newChainRouter(ctx context.Context, nConns uint, rpcProviderEndpoint lavase addonsSupported: addonsSupportedMap, methodsRouted: methodsRouted, } - if chainRouterEntries, ok := chainProxyRouter[routerKey]; !ok { - chainProxyRouter[routerKey] = []chainRouterEntry{chainRouterEntryInst} + if chainRouterEntries, ok := chainProxyRouter[routerKeyStr]; !ok { + chainProxyRouter[routerKeyStr] = []chainRouterEntry{chainRouterEntryInst} } else { if len(methodsRouted) > 0 { // if there are routed methods we want this in the beginning to intercept them - chainProxyRouter[routerKey] = append([]chainRouterEntry{chainRouterEntryInst}, chainRouterEntries...) + chainProxyRouter[routerKeyStr] = append([]chainRouterEntry{chainRouterEntryInst}, chainRouterEntries...) } else { - chainProxyRouter[routerKey] = append(chainRouterEntries, chainRouterEntryInst) + chainProxyRouter[routerKeyStr] = append(chainRouterEntries, chainRouterEntryInst) } } } @@ -234,22 +319,22 @@ func newChainRouter(ctx context.Context, nConns uint, rpcProviderEndpoint lavase _, apiCollection, hasSubscriptionInSpec := chainParser.GetParsingByTag(spectypes.FUNCTION_TAG_SUBSCRIBE) // validating we have websocket support for subscription supported specs. webSocketSupported := false - for key := range supportedMap { - if key.IsRequirementMet(WebSocketExtension) { + for _, requirement := range supportedMap { + if requirement.IsRequirementMet(WebSocketExtension) { webSocketSupported = true + break } } if hasSubscriptionInSpec && apiCollection.Enabled && !webSocketSupported { - err := utils.LavaFormatError("subscriptions are applicable for this chain, but websocket is not provided in 'supported' map. By not setting ws/wss your provider wont be able to accept ws subscriptions, therefore might receive less rewards and lower QOS score.", nil, + return nil, utils.LavaFormatError("subscriptions are applicable for this chain, but websocket is not provided in 'supported' map. By not setting ws/wss your provider wont be able to accept ws subscriptions, therefore might receive less rewards and lower QOS score.", nil, utils.LogAttr("apiInterface", apiCollection.CollectionData.ApiInterface), utils.LogAttr("supportedMap", supportedMap), utils.LogAttr("required", WebSocketExtension), ) - if !IgnoreSubscriptionNotConfiguredError { - return nil, err - } } + utils.LavaFormatDebug("router keys", utils.LogAttr("chainProxyRouter", chainProxyRouter)) + // make sure all chainProxyRouter entries have one without a method routing for routerKey, chainRouterEntries := range chainProxyRouter { // get the last entry, if it has methods routed, we need to error out @@ -260,40 +345,37 @@ func newChainRouter(ctx context.Context, nConns uint, rpcProviderEndpoint lavase } cri.chainProxyRouter = chainProxyRouter + utils.LavaFormatDebug("chainRouter created", utils.LogAttr("chainProxyRouter", chainProxyRouter)) return &cri, nil } -type requirementSt struct { - extensions lavasession.RouterKey - addon string +type requirement struct { + lavasession.RouterKey + addon string } -func (rs *requirementSt) String() string { - return string(rs.extensions) + rs.addon +func (rs *requirement) String() string { + return rs.RouterKey.String() + "addon:" + rs.addon + lavasession.RouterKeySeparator } -func (rs *requirementSt) IsRequirementMet(requirement string) bool { - return strings.Contains(string(rs.extensions), requirement) || strings.Contains(rs.addon, requirement) +func (rs *requirement) IsRequirementMet(requirement string) bool { + return rs.RouterKey.HasExtension(requirement) || strings.Contains(rs.addon, requirement) } -func populateRequiredForAddon(addon string, extensions []string, required map[requirementSt]struct{}) { - if len(extensions) == 0 { - required[requirementSt{ - extensions: lavasession.NewRouterKey([]string{}), - addon: addon, - }] = struct{}{} - return +func populateRequiredForAddon(addon string, extensions []string, required map[string]struct{}) { + requirement := requirement{ + RouterKey: lavasession.NewRouterKey(extensions), + addon: addon, } - requirement := requirementSt{ - extensions: lavasession.NewRouterKey(extensions), - addon: addon, - } - if _, ok := required[requirement]; ok { + + requirementKey := requirement.String() + if _, ok := required[requirementKey]; ok { // already handled return } - required[requirement] = struct{}{} + + required[requirementKey] = struct{}{} for i := 0; i < len(extensions); i++ { extensionsWithoutI := make([]string, len(extensions)-1) copy(extensionsWithoutI[:i], extensions[:i]) diff --git a/protocol/chainlib/chain_router_test.go b/protocol/chainlib/chain_router_test.go index 4705f08927..7d80f343a1 100644 --- a/protocol/chainlib/chain_router_test.go +++ b/protocol/chainlib/chain_router_test.go @@ -2,6 +2,7 @@ package chainlib import ( "context" + "fmt" "log" "net" "os" @@ -39,8 +40,6 @@ func TestChainRouterWithDisabledWebSocketInSpec(t *testing.T) { chainParser, err := NewChainParser(apiInterface) require.NoError(t, err) - IgnoreSubscriptionNotConfiguredError = false - addonsOptions := []string{"-addon-", "-addon2-"} extensionsOptions := []string{"-test-", "-test2-", "-test3-"} @@ -399,8 +398,6 @@ func TestChainRouterWithEnabledWebSocketInSpec(t *testing.T) { chainParser, err := NewChainParser(apiInterface) require.NoError(t, err) - IgnoreSubscriptionNotConfiguredError = false - addonsOptions := []string{"-addon-", "-addon2-"} extensionsOptions := []string{"-test-", "-test2-", "-test3-"} @@ -794,8 +791,6 @@ func TestChainRouterWithMethodRoutes(t *testing.T) { chainParser, err := NewChainParser(apiInterface) require.NoError(t, err) - IgnoreSubscriptionNotConfiguredError = false - addonsOptions := []string{"-addon-", "-addon2-"} extensionsOptions := []string{"-test-", "-test2-", "-test3-"} @@ -1110,7 +1105,7 @@ func TestChainRouterWithMethodRoutes(t *testing.T) { } chainMsg, err := chainParser.ParseMsg(api, nil, "", nil, extension) require.NoError(t, err) - chainProxy, err := chainRouter.GetChainProxySupporting(ctx, chainMsg.GetApiCollection().CollectionData.AddOn, common.GetExtensionNames(chainMsg.GetExtensions()), api) + chainProxy, err := chainRouter.GetChainProxySupporting(ctx, chainMsg.GetApiCollection().CollectionData.AddOn, common.GetExtensionNames(chainMsg.GetExtensions()), api, "") require.NoError(t, err) _, urlFromProxy := chainProxy.GetChainProxyInformation() require.Equal(t, url, urlFromProxy, "chainMsg: %+v, ---chainRouter: %+v", chainMsg, chainRouter) @@ -1202,3 +1197,1028 @@ func TestMain(m *testing.M) { listener.Close() os.Exit(code) } + +func TestChainRouterWithInternalPaths(t *testing.T) { + type play struct { + name string + specApiCollections []*spectypes.ApiCollection + apiInterface string + nodeUrls []common.NodeUrl + expectedServicesToNodeUrls map[string][]common.NodeUrl + expectedError bool + } + + playBook := []play{} + + apiInterfaces := []string{spectypes.APIInterfaceJsonRPC, spectypes.APIInterfaceTendermintRPC} + for _, apiInterface := range apiInterfaces { + playBook = append(playBook, []play{ + { + name: "no_internal_paths_in_spec__single_http_node_url_configured", + apiInterface: apiInterface, + specApiCollections: []*spectypes.ApiCollection{ + { + Enabled: true, + CollectionData: spectypes.CollectionData{ + ApiInterface: apiInterface, + InternalPath: "", + Type: "POST", + AddOn: "", + }, + }, + }, + nodeUrls: []common.NodeUrl{ + { + Url: "https://localhost:1234", + InternalPath: "", + }, + }, + expectedServicesToNodeUrls: map[string][]common.NodeUrl{ + "||": {{Url: "https://localhost:1234", InternalPath: ""}}, + }, + }, + { + name: "no_internal_paths_in_spec__multiple_http_node_urls_configured", + apiInterface: apiInterface, + specApiCollections: []*spectypes.ApiCollection{ + { + Enabled: true, + CollectionData: spectypes.CollectionData{ + ApiInterface: apiInterface, + InternalPath: "", + Type: "POST", + AddOn: "", + }, + }, + }, + nodeUrls: []common.NodeUrl{ + { + Url: "https://localhost:1234", + InternalPath: "", + }, + { + Url: "https://localhost:5678", + InternalPath: "", + }, + }, + expectedServicesToNodeUrls: map[string][]common.NodeUrl{ + "||": { + {Url: "https://localhost:1234", InternalPath: ""}, + {Url: "https://localhost:5678", InternalPath: ""}, + }, + }, + }, + { + name: "no_internal_paths_in_spec__single_ws_node_url__should_error", + apiInterface: apiInterface, + specApiCollections: []*spectypes.ApiCollection{ + { + Enabled: true, + CollectionData: spectypes.CollectionData{ + ApiInterface: apiInterface, + InternalPath: "", + Type: "POST", + AddOn: "", + }, + }, + }, + nodeUrls: []common.NodeUrl{ + { + Url: "wss://localhost:1234/ws", + InternalPath: "", + }, + }, + expectedError: true, + }, + { + name: "no_internal_paths_in_spec__both_ws_and_http_node_urls", + apiInterface: apiInterface, + specApiCollections: []*spectypes.ApiCollection{ + { + Enabled: true, + CollectionData: spectypes.CollectionData{ + ApiInterface: apiInterface, + InternalPath: "", + Type: "POST", + AddOn: "", + }, + }, + }, + nodeUrls: []common.NodeUrl{ + { + Url: "https://localhost:1234", + InternalPath: "", + }, + { + Url: "wss://localhost:1234/ws", + InternalPath: "", + }, + }, + expectedServicesToNodeUrls: map[string][]common.NodeUrl{ + "||": {{Url: "https://localhost:1234", InternalPath: ""}}, + "|websocket|": {{Url: "wss://localhost:1234/ws", InternalPath: ""}}, + }, + }, + { + name: "with_internal_paths_in_spec__single_http_node_url_configured__not_covering_all_internal_paths", + apiInterface: apiInterface, + specApiCollections: []*spectypes.ApiCollection{ + { + Enabled: true, + CollectionData: spectypes.CollectionData{ + ApiInterface: apiInterface, + InternalPath: "", + Type: "POST", + AddOn: "", + }, + }, + { + Enabled: true, + CollectionData: spectypes.CollectionData{ + ApiInterface: apiInterface, + InternalPath: "/X", + Type: "POST", + AddOn: "", + }, + InheritanceApis: []*spectypes.CollectionData{ + { + ApiInterface: apiInterface, + InternalPath: "", + Type: "POST", + AddOn: "", + }, + }, + }, + { + Enabled: true, + CollectionData: spectypes.CollectionData{ + ApiInterface: apiInterface, + InternalPath: "/Y", + Type: "POST", + AddOn: "", + }, + InheritanceApis: []*spectypes.CollectionData{ + { + ApiInterface: apiInterface, + InternalPath: "", + Type: "POST", + AddOn: "", + }, + }, + }, + }, + nodeUrls: []common.NodeUrl{ + { + Url: "https://localhost:1234", + InternalPath: "", + }, + }, + expectedServicesToNodeUrls: map[string][]common.NodeUrl{ + "||": {{Url: "https://localhost:1234", InternalPath: ""}}, + "||internal-path:/X|": {{Url: "https://localhost:1234/X", InternalPath: "/X"}}, + "||internal-path:/Y|": {{Url: "https://localhost:1234/Y", InternalPath: "/Y"}}, + }, + }, + { + name: "with_internal_paths_in_spec__multiple_http_node_urls_configured__covering_some_internal_paths", + apiInterface: apiInterface, + specApiCollections: []*spectypes.ApiCollection{ + { + Enabled: true, + CollectionData: spectypes.CollectionData{ + ApiInterface: apiInterface, + InternalPath: "", + Type: "POST", + AddOn: "", + }, + }, + { + Enabled: true, + CollectionData: spectypes.CollectionData{ + ApiInterface: apiInterface, + InternalPath: "/X", + Type: "POST", + AddOn: "", + }, + InheritanceApis: []*spectypes.CollectionData{ + { + ApiInterface: apiInterface, + InternalPath: "", + Type: "POST", + AddOn: "", + }, + }, + }, + { + Enabled: true, + CollectionData: spectypes.CollectionData{ + ApiInterface: apiInterface, + InternalPath: "/Y", + Type: "POST", + AddOn: "", + }, + InheritanceApis: []*spectypes.CollectionData{ + { + ApiInterface: apiInterface, + InternalPath: "", + Type: "POST", + AddOn: "", + }, + }, + }, + }, + nodeUrls: []common.NodeUrl{ + { + Url: "https://localhost:1234", + InternalPath: "", + }, + { + Url: "https://localhost:1234/X", + InternalPath: "/X", + }, + }, + expectedServicesToNodeUrls: map[string][]common.NodeUrl{ + "||": {{Url: "https://localhost:1234", InternalPath: ""}}, + "||internal-path:/X|": {{Url: "https://localhost:1234/X", InternalPath: "/X"}}, + "||internal-path:/Y|": {{Url: "https://localhost:1234/Y", InternalPath: "/Y"}}, + }, + }, + { + name: "with_internal_paths_in_spec__multiple_http_node_urls_configured__covering_all_internal_paths", + apiInterface: apiInterface, + specApiCollections: []*spectypes.ApiCollection{ + { + Enabled: true, + CollectionData: spectypes.CollectionData{ + ApiInterface: apiInterface, + InternalPath: "", + Type: "POST", + AddOn: "", + }, + }, + { + Enabled: true, + CollectionData: spectypes.CollectionData{ + ApiInterface: apiInterface, + InternalPath: "/X", + Type: "POST", + AddOn: "", + }, + InheritanceApis: []*spectypes.CollectionData{ + { + ApiInterface: apiInterface, + InternalPath: "", + Type: "POST", + AddOn: "", + }, + }, + }, + { + Enabled: true, + CollectionData: spectypes.CollectionData{ + ApiInterface: apiInterface, + InternalPath: "/Y", + Type: "POST", + AddOn: "", + }, + InheritanceApis: []*spectypes.CollectionData{ + { + ApiInterface: apiInterface, + InternalPath: "", + Type: "POST", + AddOn: "", + }, + }, + }, + }, + nodeUrls: []common.NodeUrl{ + { + Url: "https://localhost:1234", + InternalPath: "", + }, + { + Url: "https://localhost:1234/X", + InternalPath: "/X", + }, + { + Url: "https://localhost:1234/Y", + InternalPath: "/Y", + }, + }, + expectedServicesToNodeUrls: map[string][]common.NodeUrl{ + "||": {{Url: "https://localhost:1234", InternalPath: ""}}, + "||internal-path:/X|": {{Url: "https://localhost:1234/X", InternalPath: "/X"}}, + "||internal-path:/Y|": {{Url: "https://localhost:1234/Y", InternalPath: "/Y"}}, + }, + }, + { + name: "with_internal_paths_in_spec__multiple_http_node_urls_configured__no_root_internal_path__should_error", + apiInterface: apiInterface, + specApiCollections: []*spectypes.ApiCollection{ + { + Enabled: true, + CollectionData: spectypes.CollectionData{ + ApiInterface: apiInterface, + InternalPath: "", + Type: "POST", + AddOn: "", + }, + }, + { + Enabled: true, + CollectionData: spectypes.CollectionData{ + ApiInterface: apiInterface, + InternalPath: "/X", + Type: "POST", + AddOn: "", + }, + InheritanceApis: []*spectypes.CollectionData{ + { + ApiInterface: apiInterface, + InternalPath: "", + Type: "POST", + AddOn: "", + }, + }, + }, + { + Enabled: true, + CollectionData: spectypes.CollectionData{ + ApiInterface: apiInterface, + InternalPath: "/Y", + Type: "POST", + AddOn: "", + }, + InheritanceApis: []*spectypes.CollectionData{ + { + ApiInterface: apiInterface, + InternalPath: "", + Type: "POST", + AddOn: "", + }, + }, + }, + }, + nodeUrls: []common.NodeUrl{ + { + Url: "https://localhost:1234/X", + InternalPath: "/X", + }, + { + Url: "https://localhost:1234/Y", + InternalPath: "/Y", + }, + }, + expectedError: true, + }, + { + name: "with_internal_paths_in_spec__multiple_http_node_urls_and_ws_configured__covering_all_internal_paths", + apiInterface: apiInterface, + specApiCollections: []*spectypes.ApiCollection{ + { + Enabled: true, + CollectionData: spectypes.CollectionData{ + ApiInterface: apiInterface, + InternalPath: "", + Type: "POST", + AddOn: "", + }, + }, + { + Enabled: true, + CollectionData: spectypes.CollectionData{ + ApiInterface: apiInterface, + InternalPath: "/X", + Type: "POST", + AddOn: "", + }, + InheritanceApis: []*spectypes.CollectionData{ + { + ApiInterface: apiInterface, + InternalPath: "", + Type: "POST", + AddOn: "", + }, + }, + }, + { + Enabled: true, + CollectionData: spectypes.CollectionData{ + ApiInterface: apiInterface, + InternalPath: "/Y", + Type: "POST", + AddOn: "", + }, + InheritanceApis: []*spectypes.CollectionData{ + { + ApiInterface: apiInterface, + InternalPath: "", + Type: "POST", + AddOn: "", + }, + }, + }, + }, + nodeUrls: []common.NodeUrl{ + { + Url: "wss://localhost:1234/ws", + InternalPath: "", + }, + { + Url: "https://localhost:5678", + InternalPath: "", + }, + { + Url: "https://localhost:5678/X", + InternalPath: "/X", + }, + { + Url: "https://localhost:9012/Y", + InternalPath: "/Y", + }, + }, + expectedServicesToNodeUrls: map[string][]common.NodeUrl{ + "||": {{Url: "https://localhost:5678", InternalPath: ""}}, + "||internal-path:/X|": {{Url: "https://localhost:5678/X", InternalPath: "/X"}}, + "||internal-path:/Y|": {{Url: "https://localhost:9012/Y", InternalPath: "/Y"}}, + "|websocket|": {{Url: "wss://localhost:1234/ws", InternalPath: ""}}, + }, + }, + { + name: "with_internal_paths_in_spec__only_root_http_and_ws_configured", + apiInterface: apiInterface, + specApiCollections: []*spectypes.ApiCollection{ + { + Enabled: true, + CollectionData: spectypes.CollectionData{ + ApiInterface: apiInterface, + InternalPath: "", + Type: "POST", + AddOn: "", + }, + }, + { + Enabled: true, + CollectionData: spectypes.CollectionData{ + ApiInterface: apiInterface, + InternalPath: "/X", + Type: "POST", + AddOn: "", + }, + InheritanceApis: []*spectypes.CollectionData{ + { + ApiInterface: apiInterface, + InternalPath: "", + Type: "POST", + AddOn: "", + }, + }, + }, + { + Enabled: true, + CollectionData: spectypes.CollectionData{ + ApiInterface: apiInterface, + InternalPath: "/Y", + Type: "POST", + AddOn: "", + }, + InheritanceApis: []*spectypes.CollectionData{ + { + ApiInterface: apiInterface, + InternalPath: "", + Type: "POST", + AddOn: "", + }, + }, + }, + }, + nodeUrls: []common.NodeUrl{ + { + Url: "https://localhost:1234", + InternalPath: "", + }, + { + Url: "wss://localhost:1234", + InternalPath: "", + }, + }, + expectedServicesToNodeUrls: map[string][]common.NodeUrl{ + "||": {{Url: "https://localhost:1234", InternalPath: ""}}, + "||internal-path:/X|": {{Url: "https://localhost:1234/X", InternalPath: "/X"}}, + "||internal-path:/Y|": {{Url: "https://localhost:1234/Y", InternalPath: "/Y"}}, + "|websocket|": {{Url: "wss://localhost:1234", InternalPath: ""}}, + }, + }, + { + name: "with_internal_paths_in_spec__only_root_http_and_ws_and_one_out_of_two_internal_paths_are_configured", + apiInterface: apiInterface, + specApiCollections: []*spectypes.ApiCollection{ + { + Enabled: true, + CollectionData: spectypes.CollectionData{ + ApiInterface: apiInterface, + InternalPath: "", + Type: "POST", + AddOn: "", + }, + }, + { + Enabled: true, + CollectionData: spectypes.CollectionData{ + ApiInterface: apiInterface, + InternalPath: "/X", + Type: "POST", + AddOn: "", + }, + InheritanceApis: []*spectypes.CollectionData{ + { + ApiInterface: apiInterface, + InternalPath: "", + Type: "POST", + AddOn: "", + }, + }, + }, + { + Enabled: true, + CollectionData: spectypes.CollectionData{ + ApiInterface: apiInterface, + InternalPath: "/Y", + Type: "POST", + AddOn: "", + }, + InheritanceApis: []*spectypes.CollectionData{ + { + ApiInterface: apiInterface, + InternalPath: "", + Type: "POST", + AddOn: "", + }, + }, + }, + }, + nodeUrls: []common.NodeUrl{ + { + Url: "https://localhost:1234", + InternalPath: "", + }, + { + Url: "https://localhost:5678/X", + InternalPath: "/X", + }, + { + Url: "wss://localhost:1234", + InternalPath: "", + }, + }, + expectedServicesToNodeUrls: map[string][]common.NodeUrl{ + "||": {{Url: "https://localhost:1234", InternalPath: ""}}, + "||internal-path:/X|": {{Url: "https://localhost:5678/X", InternalPath: "/X"}}, + "||internal-path:/Y|": {{Url: "https://localhost:1234/Y", InternalPath: "/Y"}}, + "|websocket|": {{Url: "wss://localhost:1234", InternalPath: ""}}, + }, + }, + { + name: "with_internal_paths_and_ws_internal_paths_in_spec__only_http_is_configured", + apiInterface: apiInterface, + specApiCollections: []*spectypes.ApiCollection{ + { + Enabled: true, + CollectionData: spectypes.CollectionData{ + ApiInterface: apiInterface, + InternalPath: "", + Type: "POST", + AddOn: "", + }, + }, + { + Enabled: true, + CollectionData: spectypes.CollectionData{ + ApiInterface: apiInterface, + InternalPath: "/X", + Type: "POST", + AddOn: "", + }, + InheritanceApis: []*spectypes.CollectionData{ + { + ApiInterface: apiInterface, + InternalPath: "", + Type: "POST", + AddOn: "", + }, + }, + }, + { + Enabled: true, + CollectionData: spectypes.CollectionData{ + ApiInterface: apiInterface, + InternalPath: "/WS", + Type: "POST", + AddOn: "", + }, + InheritanceApis: []*spectypes.CollectionData{ + { + ApiInterface: apiInterface, + InternalPath: "", + Type: "POST", + AddOn: "", + }, + }, + ParseDirectives: []*spectypes.ParseDirective{{ + FunctionTag: spectypes.FUNCTION_TAG_SUBSCRIBE, + }}, + }, + }, + nodeUrls: []common.NodeUrl{ + { + Url: "https://localhost:1234", + InternalPath: "", + }, + }, + expectedServicesToNodeUrls: map[string][]common.NodeUrl{ + "||": {{Url: "https://localhost:1234", InternalPath: ""}}, + "||internal-path:/X|": {{Url: "https://localhost:1234/X", InternalPath: "/X"}}, + }, + }, + { + name: "with_internal_paths_and_ws_internal_paths_in_spec__http_and_ws_is_configured", + apiInterface: apiInterface, + specApiCollections: []*spectypes.ApiCollection{ + { + Enabled: true, + CollectionData: spectypes.CollectionData{ + ApiInterface: apiInterface, + InternalPath: "", + Type: "POST", + AddOn: "", + }, + }, + { + Enabled: true, + CollectionData: spectypes.CollectionData{ + ApiInterface: apiInterface, + InternalPath: "/X", + Type: "POST", + AddOn: "", + }, + InheritanceApis: []*spectypes.CollectionData{ + { + ApiInterface: apiInterface, + InternalPath: "", + Type: "POST", + AddOn: "", + }, + }, + }, + { + Enabled: true, + CollectionData: spectypes.CollectionData{ + ApiInterface: apiInterface, + InternalPath: "/WS", + Type: "POST", + AddOn: "", + }, + InheritanceApis: []*spectypes.CollectionData{ + { + ApiInterface: apiInterface, + InternalPath: "", + Type: "POST", + AddOn: "", + }, + }, + ParseDirectives: []*spectypes.ParseDirective{{ + FunctionTag: spectypes.FUNCTION_TAG_SUBSCRIBE, + }}, + }, + }, + nodeUrls: []common.NodeUrl{ + { + Url: "https://localhost:1234", + InternalPath: "", + }, + { + Url: "wss://localhost:5678", + InternalPath: "", + }, + }, + expectedServicesToNodeUrls: map[string][]common.NodeUrl{ + "||": {{Url: "https://localhost:1234", InternalPath: ""}}, + "||internal-path:/X|": {{Url: "https://localhost:1234/X", InternalPath: "/X"}}, + "|websocket|": {{Url: "wss://localhost:5678", InternalPath: ""}}, + "|websocket|internal-path:/WS|": {{Url: "wss://localhost:5678/WS", InternalPath: "/WS"}}, + }, + }, + { + name: "with_internal_paths_and_multiple_ws_internal_paths_in_spec__http_and_ws_is_configured", + apiInterface: apiInterface, + specApiCollections: []*spectypes.ApiCollection{ + { + Enabled: true, + CollectionData: spectypes.CollectionData{ + ApiInterface: apiInterface, + InternalPath: "", + Type: "POST", + AddOn: "", + }, + }, + { + Enabled: true, + CollectionData: spectypes.CollectionData{ + ApiInterface: apiInterface, + InternalPath: "/X", + Type: "POST", + AddOn: "", + }, + InheritanceApis: []*spectypes.CollectionData{ + { + ApiInterface: apiInterface, + InternalPath: "", + Type: "POST", + AddOn: "", + }, + }, + ParseDirectives: []*spectypes.ParseDirective{{ + FunctionTag: spectypes.FUNCTION_TAG_SUBSCRIBE, + }}, + }, + { + Enabled: true, + CollectionData: spectypes.CollectionData{ + ApiInterface: apiInterface, + InternalPath: "/WS", + Type: "POST", + AddOn: "", + }, + InheritanceApis: []*spectypes.CollectionData{ + { + ApiInterface: apiInterface, + InternalPath: "", + Type: "POST", + AddOn: "", + }, + }, + ParseDirectives: []*spectypes.ParseDirective{{ + FunctionTag: spectypes.FUNCTION_TAG_SUBSCRIBE, + }}, + }, + }, + nodeUrls: []common.NodeUrl{ + { + Url: "https://localhost:1234", + InternalPath: "", + }, + { + Url: "wss://localhost:1234", + InternalPath: "", + }, + }, + expectedServicesToNodeUrls: map[string][]common.NodeUrl{ + "||": {{Url: "https://localhost:1234", InternalPath: ""}}, + "|websocket|": {{Url: "wss://localhost:1234", InternalPath: ""}}, + "|websocket|internal-path:/WS|": {{Url: "wss://localhost:1234/WS", InternalPath: "/WS"}}, + "|websocket|internal-path:/X|": {{Url: "wss://localhost:1234/X", InternalPath: "/X"}}, + }, + }, + { + name: "with_internal_paths_and_mixed_internal_paths_in_spec_and_root_is_disabled_http_only_is_configured", + apiInterface: apiInterface, + specApiCollections: []*spectypes.ApiCollection{ + { + Enabled: false, + CollectionData: spectypes.CollectionData{ + ApiInterface: apiInterface, + InternalPath: "", + Type: "POST", + AddOn: "", + }, + }, + { + Enabled: true, + CollectionData: spectypes.CollectionData{ + ApiInterface: apiInterface, + InternalPath: "/X", + Type: "POST", + AddOn: "", + }, + InheritanceApis: []*spectypes.CollectionData{ + { + ApiInterface: apiInterface, + InternalPath: "", + Type: "POST", + AddOn: "", + }, + }, + }, + { + Enabled: true, + CollectionData: spectypes.CollectionData{ + ApiInterface: apiInterface, + InternalPath: "/WS", + Type: "POST", + AddOn: "", + }, + InheritanceApis: []*spectypes.CollectionData{ + { + ApiInterface: apiInterface, + InternalPath: "", + Type: "POST", + AddOn: "", + }, + }, + ParseDirectives: []*spectypes.ParseDirective{{ + FunctionTag: spectypes.FUNCTION_TAG_SUBSCRIBE, + }}, + }, + }, + nodeUrls: []common.NodeUrl{ + { + Url: "wss://localhost:1234", + InternalPath: "/WS", + }, + }, + expectedServicesToNodeUrls: map[string][]common.NodeUrl{ + "|websocket|internal-path:/WS|": {{Url: "wss://localhost:1234", InternalPath: "/WS"}}, + }, + }, + { + name: "with_internal_paths_and_mixed_internal_paths_in_spec_and_root_is_disabled_ws_only_is_configured", + apiInterface: apiInterface, + specApiCollections: []*spectypes.ApiCollection{ + { + Enabled: false, + CollectionData: spectypes.CollectionData{ + ApiInterface: apiInterface, + InternalPath: "", + Type: "POST", + AddOn: "", + }, + }, + { + Enabled: true, + CollectionData: spectypes.CollectionData{ + ApiInterface: apiInterface, + InternalPath: "/X", + Type: "POST", + AddOn: "", + }, + InheritanceApis: []*spectypes.CollectionData{ + { + ApiInterface: apiInterface, + InternalPath: "", + Type: "POST", + AddOn: "", + }, + }, + }, + { + Enabled: true, + CollectionData: spectypes.CollectionData{ + ApiInterface: apiInterface, + InternalPath: "/WS", + Type: "POST", + AddOn: "", + }, + InheritanceApis: []*spectypes.CollectionData{ + { + ApiInterface: apiInterface, + InternalPath: "", + Type: "POST", + AddOn: "", + }, + }, + ParseDirectives: []*spectypes.ParseDirective{{ + FunctionTag: spectypes.FUNCTION_TAG_SUBSCRIBE, + }}, + }, + }, + nodeUrls: []common.NodeUrl{ + { + Url: "wss://localhost:1234", + InternalPath: "/WS", + }, + }, + expectedServicesToNodeUrls: map[string][]common.NodeUrl{ + "|websocket|internal-path:/WS|": {{Url: "wss://localhost:1234", InternalPath: "/WS"}}, + }, + }, + { + name: "with_internal_paths_and_mixed_internal_paths_in_spec_and_root_is_disabled_http_and_ws_is_configured", + apiInterface: apiInterface, + specApiCollections: []*spectypes.ApiCollection{ + { + Enabled: false, + CollectionData: spectypes.CollectionData{ + ApiInterface: apiInterface, + InternalPath: "", + Type: "POST", + AddOn: "", + }, + }, + { + Enabled: true, + CollectionData: spectypes.CollectionData{ + ApiInterface: apiInterface, + InternalPath: "/X", + Type: "POST", + AddOn: "", + }, + InheritanceApis: []*spectypes.CollectionData{ + { + ApiInterface: apiInterface, + InternalPath: "", + Type: "POST", + AddOn: "", + }, + }, + }, + { + Enabled: true, + CollectionData: spectypes.CollectionData{ + ApiInterface: apiInterface, + InternalPath: "/WS", + Type: "POST", + AddOn: "", + }, + InheritanceApis: []*spectypes.CollectionData{ + { + ApiInterface: apiInterface, + InternalPath: "", + Type: "POST", + AddOn: "", + }, + }, + ParseDirectives: []*spectypes.ParseDirective{{ + FunctionTag: spectypes.FUNCTION_TAG_SUBSCRIBE, + }}, + }, + }, + nodeUrls: []common.NodeUrl{ + { + Url: "https://localhost:1234", + InternalPath: "/X", + }, + { + Url: "wss://localhost:1234", + InternalPath: "/WS", + }, + }, + expectedServicesToNodeUrls: map[string][]common.NodeUrl{ + "||internal-path:/X|": {{Url: "https://localhost:1234", InternalPath: "/X"}}, + "|websocket|internal-path:/WS|": {{Url: "wss://localhost:1234", InternalPath: "/WS"}}, + }, + }, + }...) + } + + for _, play := range playBook { + t.Run(play.apiInterface+"__"+play.name, func(t *testing.T) { + chainParser, err := NewChainParser(play.apiInterface) + require.NoError(t, err) + + spec := testcommon.CreateMockSpec() + spec.ApiCollections = play.specApiCollections + chainParser.SetSpec(spec) + + endpoint := lavasession.RPCProviderEndpoint{ + NetworkAddress: lavasession.NetworkAddressData{}, + ChainID: spec.Index, + ApiInterface: play.apiInterface, + Geolocation: 1, + NodeUrls: play.nodeUrls, + } + + chainRouter := &chainRouterImpl{} + + nodeUrlsByService, err := chainRouter.BatchNodeUrlsByServices(endpoint, chainParser) + if play.expectedError { + require.Error(t, err) + return + } + + require.NoError(t, err) + + require.Equal(t, len(play.expectedServicesToNodeUrls), len(nodeUrlsByService), nodeUrlsByService) + actualNodeUrlsCount := 0 + for routerKey, actualEndpoint := range nodeUrlsByService { + // Check that the router key is in the expected services + require.Contains(t, play.expectedServicesToNodeUrls, routerKey, routerKey) + actualNodeUrlsCount += len(actualEndpoint.NodeUrls) + + expectedNodeUrls := play.expectedServicesToNodeUrls[routerKey] + require.Len(t, actualEndpoint.NodeUrls, len(expectedNodeUrls), + fmt.Sprintf("RouterKey: %v, NodeUrls: %v", routerKey, actualEndpoint.NodeUrls)) + + for _, actualNodeUrl := range actualEndpoint.NodeUrls { + found := false + for _, expectedNodeUrls := range expectedNodeUrls { + if expectedNodeUrls.Url == actualNodeUrl.Url && expectedNodeUrls.InternalPath == actualNodeUrl.InternalPath { + found = true + break + } + } + require.True(t, found, actualNodeUrl) + } + } + }) + } +} diff --git a/protocol/chainlib/chainlib.go b/protocol/chainlib/chainlib.go index 6fbf4ba536..41c024ae04 100644 --- a/protocol/chainlib/chainlib.go +++ b/protocol/chainlib/chainlib.go @@ -3,6 +3,7 @@ package chainlib import ( "context" "fmt" + "net/http" "time" "github.com/lavanet/lava/v4/protocol/chainlib/chainproxy/rpcInterfaceMessages" @@ -11,13 +12,13 @@ import ( "github.com/lavanet/lava/v4/protocol/common" "github.com/lavanet/lava/v4/protocol/lavasession" "github.com/lavanet/lava/v4/protocol/metrics" + "github.com/lavanet/lava/v4/utils" pairingtypes "github.com/lavanet/lava/v4/x/pairing/types" spectypes "github.com/lavanet/lava/v4/x/spec/types" ) -var ( - IgnoreSubscriptionNotConfiguredError = true - IgnoreSubscriptionNotConfiguredErrorFlag = "ignore-subscription-not-configured-error" +const ( + INTERNAL_ADDRESS = "internal-addr" ) func NewChainParser(apiInterface string) (chainParser ChainParser, err error) { @@ -44,6 +45,10 @@ func NewChainListener( refererData *RefererData, consumerWsSubscriptionManager *ConsumerWSSubscriptionManager, ) (ChainListener, error) { + if listenEndpoint.NetworkAddress == INTERNAL_ADDRESS { + utils.LavaFormatDebug("skipping chain listener for internal address") + return NewEmptyChainListener(), nil + } switch listenEndpoint.ApiInterface { case spectypes.APIInterfaceJsonRPC: return NewJrpcChainListener(ctx, listenEndpoint, relaySender, healthReporter, rpcConsumerLogs, refererData, consumerWsSubscriptionManager), nil @@ -63,9 +68,12 @@ type ChainParser interface { DataReliabilityParams() (enabled bool, dataReliabilityThreshold uint32) ChainBlockStats() (allowedBlockLagForQosSync int64, averageBlockTime time.Duration, blockDistanceForFinalizedData, blocksInFinalizationProof uint32) GetParsingByTag(tag spectypes.FUNCTION_TAG) (parsing *spectypes.ParseDirective, apiCollection *spectypes.ApiCollection, existed bool) + IsTagInCollection(tag spectypes.FUNCTION_TAG, collectionKey CollectionKey) bool + GetAllInternalPaths() []string + IsInternalPathEnabled(internalPath string, apiInterface string, addon string) bool CraftMessage(parser *spectypes.ParseDirective, connectionType string, craftData *CraftData, metadata []pairingtypes.Metadata) (ChainMessageForSend, error) HandleHeaders(metadata []pairingtypes.Metadata, apiCollection *spectypes.ApiCollection, headersDirection spectypes.Header_HeaderType) (filtered []pairingtypes.Metadata, overwriteReqBlock string, ignoredMetadata []pairingtypes.Metadata) - GetVerifications(supported []string) ([]VerificationContainer, error) + GetVerifications(supported []string, internalPath string, apiInterface string) ([]VerificationContainer, error) SeparateAddonsExtensions(supported []string) (addons, extensions []string, err error) SetPolicy(policy PolicyInf, chainId string, apiInterface string) error Active() bool @@ -73,6 +81,8 @@ type ChainParser interface { UpdateBlockTime(newBlockTime time.Duration) GetUniqueName() string ExtensionsParser() *extensionslib.ExtensionParser + ExtractDataFromRequest(*http.Request) (url string, data string, connectionType string, metadata []pairingtypes.Metadata, err error) + SetResponseFromRelayResult(*common.RelayResult) (*http.Response, error) } type ChainMessage interface { @@ -89,6 +99,9 @@ type ChainMessage interface { CheckResponseError(data []byte, httpStatusCode int) (hasError bool, errorMessage string) GetRawRequestHash() ([]byte, error) GetRequestedBlocksHashes() []string + UpdateEarliestInMessage(incomingEarliest int64) bool + SetExtension(extension *spectypes.Extension) + GetUsedDefaultValue() bool ChainMessageForSend } @@ -124,7 +137,6 @@ type RelaySender interface { connectionType string, dappID string, consumerIp string, - analytics *metrics.RelayMetrics, metadata []pairingtypes.Metadata, ) (ProtocolMessage, error) SendParsedRelay( @@ -144,7 +156,7 @@ type ChainListener interface { type ChainRouter interface { SendNodeMsg(ctx context.Context, ch chan interface{}, chainMessage ChainMessageForSend, extensions []string) (relayReply *RelayReplyWrapper, subscriptionID string, relayReplyServer *rpcclient.ClientSubscription, proxyUrl common.NodeUrl, chainId string, err error) // has to be thread safe, reuse code within ParseMsg as common functionality - ExtensionsSupported([]string) bool + ExtensionsSupported(internalPath string, extensions []string) bool } type ChainProxy interface { @@ -168,3 +180,17 @@ func GetChainRouter(ctx context.Context, nConns uint, rpcProviderEndpoint *lavas } return newChainRouter(ctx, nConns, *rpcProviderEndpoint, chainParser, proxyConstructor) } + +type EmptyChainListener struct{} + +func NewEmptyChainListener() ChainListener { + return &EmptyChainListener{} +} + +func (*EmptyChainListener) Serve(ctx context.Context, cmdFlags common.ConsumerCmdFlags) { + // do nothing +} + +func (*EmptyChainListener) GetListeningAddress() string { + return "" +} diff --git a/protocol/chainlib/chainlib_mock.go b/protocol/chainlib/chainlib_mock.go index fec033fe68..3978d88b83 100644 --- a/protocol/chainlib/chainlib_mock.go +++ b/protocol/chainlib/chainlib_mock.go @@ -282,6 +282,15 @@ func (m *MockChainMessage) EXPECT() *MockChainMessageMockRecorder { return m.recorder } + +// GetUsedDefaultValue mocks base method. +func (m *MockChainMessage) GetUsedDefaultValue() bool { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetUsedDefaultValue") + ret0, _ := ret[0].(bool) + return ret0 +} + // AppendHeader mocks base method. func (m *MockChainMessage) GetRequestedBlocksHashes() []string { m.ctrl.T.Helper() @@ -434,6 +443,20 @@ func (m *MockChainMessage) OverrideExtensions(extensionNames []string, extension m.ctrl.Call(m, "OverrideExtensions", extensionNames, extensionParser) } +// OverrideExtensions mocks base method. +func (m *MockChainMessage) SetExtension(extension *types0.Extension) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "SetExtension", extension) +} + +// OverrideExtensions mocks base method. +func (m *MockChainMessage) UpdateEarliestInMessage(incomingEarliest int64) bool { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "UpdateEarliestInMessage", incomingEarliest) + ret0, _ := ret[0].(bool) + return ret0 +} + // OverrideExtensions indicates an expected call of OverrideExtensions. func (mr *MockChainMessageMockRecorder) OverrideExtensions(extensionNames, extensionParser interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() @@ -714,18 +737,18 @@ func (mr *MockRelaySenderMockRecorder) CreateDappKey(userData interface{}) *gomo } // ParseRelay mocks base method. -func (m *MockRelaySender) ParseRelay(ctx context.Context, url, req, connectionType, dappID, consumerIp string, analytics *metrics.RelayMetrics, metadata []types.Metadata) (ProtocolMessage, error) { +func (m *MockRelaySender) ParseRelay(ctx context.Context, url, req, connectionType, dappID, consumerIp string, metadata []types.Metadata) (ProtocolMessage, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ParseRelay", ctx, url, req, connectionType, dappID, consumerIp, analytics, metadata) + ret := m.ctrl.Call(m, "ParseRelay", ctx, url, req, connectionType, dappID, consumerIp, metadata) ret0, _ := ret[0].(ProtocolMessage) ret1, _ := ret[1].(error) return ret0, ret1 } // ParseRelay indicates an expected call of ParseRelay. -func (mr *MockRelaySenderMockRecorder) ParseRelay(ctx, url, req, connectionType, dappID, consumerIp, analytics, metadata interface{}) *gomock.Call { +func (mr *MockRelaySenderMockRecorder) ParseRelay(ctx, url, req, connectionType, dappID, consumerIp, metadata interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ParseRelay", reflect.TypeOf((*MockRelaySender)(nil).ParseRelay), ctx, url, req, connectionType, dappID, consumerIp, analytics, metadata) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ParseRelay", reflect.TypeOf((*MockRelaySender)(nil).ParseRelay), ctx, url, req, connectionType, dappID, consumerIp, metadata) } // SendParsedRelay mocks base method. diff --git a/protocol/chainlib/chainproxy/common.go b/protocol/chainlib/chainproxy/common.go index d0ddf9928e..ba02bc8b1c 100644 --- a/protocol/chainlib/chainproxy/common.go +++ b/protocol/chainlib/chainproxy/common.go @@ -5,6 +5,7 @@ import ( "github.com/goccy/go-json" + "github.com/lavanet/lava/v4/protocol/chainlib/chainproxy/rpcclient" "github.com/lavanet/lava/v4/protocol/parser" pairingtypes "github.com/lavanet/lava/v4/x/pairing/types" spectypes "github.com/lavanet/lava/v4/x/spec/types" @@ -93,6 +94,10 @@ func (dri DefaultRPCInput) GetID() json.RawMessage { return nil } +func (dri DefaultRPCInput) GetError() *rpcclient.JsonError { + return nil +} + func (dri DefaultRPCInput) ParseBlock(inp string) (int64, error) { return parser.ParseDefaultBlockParameter(inp) } diff --git a/protocol/chainlib/chainproxy/rpcInterfaceMessages/common.go b/protocol/chainlib/chainproxy/rpcInterfaceMessages/common.go index fb288637a2..0fc5974272 100644 --- a/protocol/chainlib/chainproxy/rpcInterfaceMessages/common.go +++ b/protocol/chainlib/chainproxy/rpcInterfaceMessages/common.go @@ -5,6 +5,7 @@ import ( "github.com/goccy/go-json" "github.com/lavanet/lava/v4/protocol/chainlib/chainproxy" + "github.com/lavanet/lava/v4/protocol/chainlib/chainproxy/rpcclient" "github.com/lavanet/lava/v4/protocol/parser" pairingtypes "github.com/lavanet/lava/v4/x/pairing/types" ) @@ -13,6 +14,7 @@ var WontCalculateBatchHash = sdkerrors.New("Wont calculate batch hash", 892, "wo type ParsableRPCInput struct { Result json.RawMessage + Error *rpcclient.JsonError chainproxy.BaseMessage } @@ -36,6 +38,10 @@ func (pri ParsableRPCInput) GetID() json.RawMessage { return nil } +func (pri ParsableRPCInput) GetError() *rpcclient.JsonError { + return pri.Error +} + type GenericMessage interface { GetHeaders() []pairingtypes.Metadata DisableErrorHandling() diff --git a/protocol/chainlib/chainproxy/rpcInterfaceMessages/grpcMessage.go b/protocol/chainlib/chainproxy/rpcInterfaceMessages/grpcMessage.go index 9734f73327..0bdaa8173a 100644 --- a/protocol/chainlib/chainproxy/rpcInterfaceMessages/grpcMessage.go +++ b/protocol/chainlib/chainproxy/rpcInterfaceMessages/grpcMessage.go @@ -126,6 +126,10 @@ func (gm GrpcMessage) GetID() json.RawMessage { return nil } +func (gm GrpcMessage) GetError() *rpcclient.JsonError { + return nil +} + func (gm GrpcMessage) NewParsableRPCInput(input json.RawMessage) (parser.RPCInput, error) { msgFactory := dynamic.NewMessageFactoryWithDefaults() if gm.methodDesc == nil { diff --git a/protocol/chainlib/chainproxy/rpcInterfaceMessages/grpcMessage_test.go b/protocol/chainlib/chainproxy/rpcInterfaceMessages/grpcMessage_test.go index f049e1c793..2104585c0a 100644 --- a/protocol/chainlib/chainproxy/rpcInterfaceMessages/grpcMessage_test.go +++ b/protocol/chainlib/chainproxy/rpcInterfaceMessages/grpcMessage_test.go @@ -36,8 +36,6 @@ func TestGRPCParseBlock(t *testing.T) { } for _, testCase := range testTable { - testCase := testCase - t.Run(testCase.name, func(t *testing.T) { t.Parallel() @@ -81,9 +79,8 @@ func TestReflectionSupport(t *testing.T) { } for _, testCase := range testTable { - testCase := testCase - t.Run(testCase.name, func(t *testing.T) { + t.Parallel() result := ReflectionSupport(testCase.err) if testCase.err == nil { @@ -132,9 +129,8 @@ func TestParseSymbol(t *testing.T) { } for _, testCase := range testTable { - testCase := testCase - t.Run(testCase.name, func(t *testing.T) { + t.Parallel() s, m := ParseSymbol(testCase.input) if s != testCase.expectedS { t.Errorf("expected %q, but got %q", testCase.expectedS, s) diff --git a/protocol/chainlib/chainproxy/rpcInterfaceMessages/jsonRPCMessage.go b/protocol/chainlib/chainproxy/rpcInterfaceMessages/jsonRPCMessage.go index cc31752d3c..59148df36d 100644 --- a/protocol/chainlib/chainproxy/rpcInterfaceMessages/jsonRPCMessage.go +++ b/protocol/chainlib/chainproxy/rpcInterfaceMessages/jsonRPCMessage.go @@ -121,11 +121,7 @@ func (jm JsonrpcMessage) NewParsableRPCInput(input json.RawMessage) (parser.RPCI return nil, utils.LavaFormatError("failed unmarshaling JsonrpcMessage", err, utils.Attribute{Key: "input", Value: input}) } - // Make sure the response does not have an error - if msg.Error != nil && msg.Result == nil { - return nil, utils.LavaFormatError("response is an error message", msg.Error) - } - return ParsableRPCInput{Result: msg.Result}, nil + return ParsableRPCInput{Result: msg.Result, Error: msg.Error}, nil } func (jm JsonrpcMessage) GetParams() interface{} { @@ -147,6 +143,10 @@ func (jm JsonrpcMessage) GetID() json.RawMessage { return jm.ID } +func (jm JsonrpcMessage) GetError() *rpcclient.JsonError { + return jm.Error +} + func (jm JsonrpcMessage) ParseBlock(inp string) (int64, error) { return parser.ParseDefaultBlockParameter(inp) } diff --git a/protocol/chainlib/chainproxy/rpcInterfaceMessages/jsonRPCMessage_test.go b/protocol/chainlib/chainproxy/rpcInterfaceMessages/jsonRPCMessage_test.go index f4113932fc..837969aa5e 100644 --- a/protocol/chainlib/chainproxy/rpcInterfaceMessages/jsonRPCMessage_test.go +++ b/protocol/chainlib/chainproxy/rpcInterfaceMessages/jsonRPCMessage_test.go @@ -77,8 +77,6 @@ func TestJsonrpcMessage_ParseBlock(t *testing.T) { } for _, testCase := range testTable { - testCase := testCase - t.Run(testCase.name, func(t *testing.T) { t.Parallel() diff --git a/protocol/chainlib/chainproxy/rpcInterfaceMessages/restMessage.go b/protocol/chainlib/chainproxy/rpcInterfaceMessages/restMessage.go index 1d5c18329a..8d3da122fd 100644 --- a/protocol/chainlib/chainproxy/rpcInterfaceMessages/restMessage.go +++ b/protocol/chainlib/chainproxy/rpcInterfaceMessages/restMessage.go @@ -105,6 +105,10 @@ func (rm RestMessage) GetID() json.RawMessage { return nil } +func (rm RestMessage) GetError() *rpcclient.JsonError { + return nil +} + // ParseBlock parses default block number from string to int func (rm RestMessage) ParseBlock(inp string) (int64, error) { return parser.ParseDefaultBlockParameter(inp) diff --git a/protocol/chainlib/chainproxy/rpcInterfaceMessages/restMessage_test.go b/protocol/chainlib/chainproxy/rpcInterfaceMessages/restMessage_test.go index 3cef8aba0c..1c13605453 100644 --- a/protocol/chainlib/chainproxy/rpcInterfaceMessages/restMessage_test.go +++ b/protocol/chainlib/chainproxy/rpcInterfaceMessages/restMessage_test.go @@ -50,8 +50,6 @@ func TestRestParseBlock(t *testing.T) { } for _, testCase := range testTable { - testCase := testCase - t.Run(testCase.name, func(t *testing.T) { t.Parallel() diff --git a/protocol/chainlib/chainproxy/rpcInterfaceMessages/tendermintRPCMessage_test.go b/protocol/chainlib/chainproxy/rpcInterfaceMessages/tendermintRPCMessage_test.go index 59c355e506..6c8f4b88d6 100644 --- a/protocol/chainlib/chainproxy/rpcInterfaceMessages/tendermintRPCMessage_test.go +++ b/protocol/chainlib/chainproxy/rpcInterfaceMessages/tendermintRPCMessage_test.go @@ -59,8 +59,6 @@ func TestTendermintrpcMessage_ParseBlock(t *testing.T) { } for _, testCase := range testTable { - testCase := testCase - t.Run(testCase.name, func(t *testing.T) { t.Parallel() @@ -111,8 +109,6 @@ func TestGetTendermintRPCError(t *testing.T) { } for _, testCase := range testTable { - testCase := testCase - t.Run(testCase.name, func(t *testing.T) { t.Parallel() @@ -153,8 +149,8 @@ func TestConvertErrorToRPCError(t *testing.T) { } for _, testCase := range testTable { - testCase := testCase t.Run(testCase.name, func(t *testing.T) { + t.Parallel() errMsg := "" if testCase.err != nil { @@ -205,9 +201,8 @@ func TestIdFromRawMessage(t *testing.T) { } for _, testCase := range testTable { - testCase := testCase - t.Run(testCase.name, func(t *testing.T) { + t.Parallel() result, err := IdFromRawMessage(testCase.rawID) if testCase.expectedErr == false { assert.Equal(t, testCase.expectedResult, result) @@ -308,8 +303,6 @@ func TestConvertTendermintMsg(t *testing.T) { }, } for _, testCase := range testTable { - testCase := testCase - t.Run(testCase.name, func(t *testing.T) { t.Parallel() res, err := ConvertTendermintMsg(testCase.rpcMsg) diff --git a/protocol/chainlib/chainproxy/rpcclient/handler.go b/protocol/chainlib/chainproxy/rpcclient/handler.go index b2775326fc..acedfd97d9 100755 --- a/protocol/chainlib/chainproxy/rpcclient/handler.go +++ b/protocol/chainlib/chainproxy/rpcclient/handler.go @@ -237,9 +237,18 @@ func (h *handler) handleImmediate(msg *JsonrpcMessage) bool { h.handleSubscriptionResultTendermint(msg) return true case msg.isEthereumNotification(): - if strings.HasSuffix(msg.Method, notificationMethodSuffix) { + if strings.HasSuffix(msg.Method, ethereumNotificationMethodSuffix) { h.handleSubscriptionResultEthereum(msg) return true + } else if strings.HasSuffix(msg.Method, solanaNotificationMethodSuffix) { + h.handleSubscriptionResultSolana(msg) + return true + } + return false + case msg.isStarkNetPathfinderNotification(): + if strings.HasSuffix(msg.Method, ethereumNotificationMethodSuffix) { + h.handleSubscriptionResultStarkNetPathfinder(msg) + return true } return false case msg.isResponse(): @@ -251,10 +260,31 @@ func (h *handler) handleImmediate(msg *JsonrpcMessage) bool { } } +func (h *handler) handleSubscriptionResultStarkNetPathfinder(msg *JsonrpcMessage) { + var result integerIdSubscriptionResult + if err := json.Unmarshal(msg.Result, &result); err != nil { + utils.LavaFormatTrace("Dropping invalid starknet pathfinder subscription message", + utils.LogAttr("err", err), + utils.LogAttr("result", string(msg.Result)), + ) + h.log.Debug("Dropping invalid subscription message") + return + } + + id := strconv.Itoa(result.ID) + if h.clientSubs[id] != nil { + h.clientSubs[id].deliver(msg) + } +} + // handleSubscriptionResult processes subscription notifications. func (h *handler) handleSubscriptionResultEthereum(msg *JsonrpcMessage) { var result ethereumSubscriptionResult if err := json.Unmarshal(msg.Params, &result); err != nil { + utils.LavaFormatTrace("Dropping invalid ethereum subscription message", + utils.LogAttr("err", err), + utils.LogAttr("params", string(msg.Params)), + ) h.log.Debug("Dropping invalid subscription message") return } @@ -263,9 +293,28 @@ func (h *handler) handleSubscriptionResultEthereum(msg *JsonrpcMessage) { } } +func (h *handler) handleSubscriptionResultSolana(msg *JsonrpcMessage) { + var result integerIdSubscriptionResult + if err := json.Unmarshal(msg.Params, &result); err != nil { + utils.LavaFormatTrace("Dropping invalid solana subscription message", + utils.LogAttr("err", err), + utils.LogAttr("params", string(msg.Params)), + ) + h.log.Debug("Dropping invalid subscription message") + return + } + if h.clientSubs[strconv.Itoa(result.ID)] != nil { + h.clientSubs[strconv.Itoa(result.ID)].deliver(msg) + } +} + func (h *handler) handleSubscriptionResultTendermint(msg *JsonrpcMessage) { var result tendermintSubscriptionResult if err := json.Unmarshal(msg.Result, &result); err != nil { + utils.LavaFormatTrace("Dropping invalid tendermint subscription message", + utils.LogAttr("err", err), + utils.LogAttr("result", string(msg.Result)), + ) h.log.Debug("Dropping invalid subscription message") return } @@ -302,6 +351,15 @@ func (h *handler) handleResponse(msg *JsonrpcMessage) { } else if op.err = json.Unmarshal(msg.Result, &op.sub.subid); op.err == nil { go op.sub.run() h.clientSubs[op.sub.subid] = op.sub + } else { + // This is because StarkNet Pathfinder is returning an integer instead of a string in the result + var integerSubId int + if json.Unmarshal(msg.Result, &integerSubId) == nil { + op.err = nil + op.sub.subid = strconv.Itoa(integerSubId) + go op.sub.run() + h.clientSubs[op.sub.subid] = op.sub + } } } diff --git a/protocol/chainlib/chainproxy/rpcclient/json.go b/protocol/chainlib/chainproxy/rpcclient/json.go index 6ee84fd4e9..84ab2e0a6e 100755 --- a/protocol/chainlib/chainproxy/rpcclient/json.go +++ b/protocol/chainlib/chainproxy/rpcclient/json.go @@ -33,11 +33,12 @@ import ( ) const ( - Vsn = "2.0" - serviceMethodSeparator = "_" - subscribeMethodSuffix = "_subscribe" - unsubscribeMethodSuffix = "_unsubscribe" - notificationMethodSuffix = "_subscription" + Vsn = "2.0" + serviceMethodSeparator = "_" + subscribeMethodSuffix = "_subscribe" + unsubscribeMethodSuffix = "_unsubscribe" + ethereumNotificationMethodSuffix = "_subscription" + solanaNotificationMethodSuffix = "Notification" defaultWriteTimeout = 10 * time.Second // used if context has no deadline ) @@ -49,6 +50,11 @@ type ethereumSubscriptionResult struct { Result json.RawMessage `json:"result,omitempty"` } +type integerIdSubscriptionResult struct { + ID int `json:"subscription"` + Result json.RawMessage `json:"result,omitempty"` +} + type tendermintSubscriptionResult struct { Query string `json:"query"` } @@ -68,8 +74,12 @@ type tendermintSubscribeReply struct { Query string `json:"query"` } +func (msg *JsonrpcMessage) isStarkNetPathfinderNotification() bool { + return msg.ID == nil && msg.Method != "" && msg.Result != nil +} + func (msg *JsonrpcMessage) isEthereumNotification() bool { - return msg.ID == nil && msg.Method != "" + return msg.ID == nil && msg.Method != "" && msg.Params != nil } func (msg *JsonrpcMessage) isTendermintNotification() bool { @@ -165,6 +175,20 @@ func (err *JsonError) ErrorData() interface{} { return err.Data } +func (err *JsonError) ToMap() map[string]interface{} { + if err == nil { + return nil + } + + return map[string]interface{}{ + "code": err.Code, + "message": err.Message, + "data": err.Data, + "name": err.Name, + "cause": err.Cause, + } +} + // Conn is a subset of the methods of net.Conn which are sufficient for ServerCodec. type Conn interface { io.ReadWriteCloser diff --git a/protocol/chainlib/chainproxy/rpcclient/subscription.go b/protocol/chainlib/chainproxy/rpcclient/subscription.go index 803ecc171c..cc882364ac 100755 --- a/protocol/chainlib/chainproxy/rpcclient/subscription.go +++ b/protocol/chainlib/chainproxy/rpcclient/subscription.go @@ -181,7 +181,7 @@ func (n *Notifier) send(sub *Subscription, data json.RawMessage) error { ctx := context.Background() return n.h.conn.writeJSON(ctx, &JsonrpcMessage{ Version: Vsn, - Method: n.namespace + notificationMethodSuffix, + Method: n.namespace + ethereumNotificationMethodSuffix, Params: params, }) } diff --git a/protocol/chainlib/chainproxy/rpcclient/types_test.go b/protocol/chainlib/chainproxy/rpcclient/types_test.go index 9ccf87853c..e7f8a57bb2 100755 --- a/protocol/chainlib/chainproxy/rpcclient/types_test.go +++ b/protocol/chainlib/chainproxy/rpcclient/types_test.go @@ -137,7 +137,6 @@ func TestBlockNumberOrHash_WithNumber_MarshalAndUnmarshal(t *testing.T) { {"earliest", int64(EarliestBlockNumber)}, } for _, test := range tests { - test := test t.Run(test.name, func(t *testing.T) { bnh := BlockNumberOrHashWithNumber(BlockNumber(test.number)) marshalled, err := json.Marshal(bnh) diff --git a/protocol/chainlib/common.go b/protocol/chainlib/common.go index bbb4fe11d0..08a7374b4a 100644 --- a/protocol/chainlib/common.go +++ b/protocol/chainlib/common.go @@ -5,6 +5,7 @@ import ( "fmt" "net" "net/http" + "net/url" "strings" "time" @@ -53,6 +54,7 @@ type VerificationKey struct { } type VerificationContainer struct { + InternalPath string ConnectionType string Name string ParseDirective spectypes.ParseDirective @@ -435,3 +437,12 @@ func GetTimeoutInfo(chainMessage ChainMessageForSend) common.TimeoutInfo { Stateful: GetStateful(chainMessage), } } + +func IsUrlWebSocket(urlToParse string) (bool, error) { + u, err := url.Parse(urlToParse) + if err != nil { + return false, err + } + + return u.Scheme == "ws" || u.Scheme == "wss", nil +} diff --git a/protocol/chainlib/common_test.go b/protocol/chainlib/common_test.go index 7ca79f5d41..762f05f74d 100644 --- a/protocol/chainlib/common_test.go +++ b/protocol/chainlib/common_test.go @@ -93,8 +93,6 @@ func TestMatchSpecApiByName(t *testing.T) { }, } for _, testCase := range testTable { - testCase := testCase - t.Run(testCase.name, func(t *testing.T) { t.Parallel() @@ -125,8 +123,6 @@ func TestConvertToJsonError(t *testing.T) { } for _, testCase := range testTable { - testCase := testCase - t.Run(testCase.name, func(t *testing.T) { t.Parallel() @@ -158,8 +154,6 @@ func TestAddAttributeToError(t *testing.T) { } for _, testCase := range testTable { - testCase := testCase - t.Run(testCase.name, func(t *testing.T) { t.Parallel() result := addAttributeToError(testCase.key, testCase.value, testCase.errorMessage) @@ -210,10 +204,8 @@ func TestExtractDappIDFromWebsocketConnection(t *testing.T) { }() time.Sleep(time.Millisecond * 20) // let the server go up for _, testCase := range testCases { - testCase := testCase - t.Run(testCase.name, func(t *testing.T) { - url := "ws://localhost:3000" + testCase.route + url := "ws://127.0.0.1:3000" + testCase.route dialer := &websocket2.Dialer{} conn, _, err := dialer.Dial(url, testCase.headers) if err != nil { diff --git a/protocol/chainlib/common_test_utils.go b/protocol/chainlib/common_test_utils.go index 88b545a721..9b05bdd6c8 100644 --- a/protocol/chainlib/common_test_utils.go +++ b/protocol/chainlib/common_test_utils.go @@ -198,6 +198,10 @@ func CreateChainLibMocks( mockWebSocketServer.Close() } endpoint.NodeUrls = append(endpoint.NodeUrls, common.NodeUrl{Url: mockHttpServer.URL, Addons: addons}) + if len(extensions) > 0 { + endpoint.NodeUrls = append(endpoint.NodeUrls, common.NodeUrl{Url: mockHttpServer.URL, Addons: extensions}) + endpoint.NodeUrls = append(endpoint.NodeUrls, common.NodeUrl{Url: wsUrl, Addons: extensions}) + } endpoint.NodeUrls = append(endpoint.NodeUrls, common.NodeUrl{Url: wsUrl, Addons: nil}) chainRouter, err = GetChainRouter(ctx, 1, endpoint, chainParser) if err != nil { diff --git a/protocol/chainlib/consumer_websocket_manager.go b/protocol/chainlib/consumer_websocket_manager.go index 6bf645cf4a..83ff9f08f3 100644 --- a/protocol/chainlib/consumer_websocket_manager.go +++ b/protocol/chainlib/consumer_websocket_manager.go @@ -2,6 +2,7 @@ package chainlib import ( "context" + "fmt" "strconv" "sync/atomic" "time" @@ -20,6 +21,12 @@ import ( var ( WebSocketRateLimit = -1 // rate limit requests per second on websocket connection WebSocketBanDuration = time.Duration(0) // once rate limit is reached, will not allow new incoming message for a duration + MaxIdleTimeInSeconds = int64(20 * 60) // 20 minutes of idle time will disconnect the websocket connection +) + +const ( + WebSocketRateLimitHeader = "x-lava-websocket-rate-limit" + WebSocketOpenConnectionsLimitHeader = "x-lava-websocket-open-connections-limit" ) type ConsumerWebsocketManager struct { @@ -35,6 +42,7 @@ type ConsumerWebsocketManager struct { relaySender RelaySender consumerWsSubscriptionManager *ConsumerWSSubscriptionManager WebsocketConnectionUID string + headerRateLimit uint64 } type ConsumerWebsocketManagerOptions struct { @@ -50,6 +58,7 @@ type ConsumerWebsocketManagerOptions struct { RelaySender RelaySender ConsumerWsSubscriptionManager *ConsumerWSSubscriptionManager WebsocketConnectionUID string + headerRateLimit uint64 } func NewConsumerWebsocketManager(options ConsumerWebsocketManagerOptions) *ConsumerWebsocketManager { @@ -66,6 +75,7 @@ func NewConsumerWebsocketManager(options ConsumerWebsocketManagerOptions) *Consu refererData: options.RefererData, consumerWsSubscriptionManager: options.ConsumerWsSubscriptionManager, WebsocketConnectionUID: options.WebsocketConnectionUID, + headerRateLimit: options.headerRateLimit, } return cwm } @@ -142,10 +152,12 @@ func (cwm *ConsumerWebsocketManager) ListenToMessages() { } }() - // rate limit routine + // set up a routine to check for rate limits or idle time + idleFor := atomic.Int64{} + idleFor.Store(time.Now().Unix()) requestsPerSecond := &atomic.Uint64{} go func() { - if WebSocketRateLimit <= 0 { + if WebSocketRateLimit <= 0 && cwm.headerRateLimit <= 0 && MaxIdleTimeInSeconds <= 0 { return } ticker := time.NewTicker(time.Second) // rate limit per second. @@ -153,23 +165,36 @@ func (cwm *ConsumerWebsocketManager) ListenToMessages() { for { select { case <-webSocketCtx.Done(): + utils.LavaFormatDebug("ctx done in time checker") return case <-ticker.C: - // check if rate limit reached, and ban is required - if WebSocketBanDuration > 0 && requestsPerSecond.Load() > uint64(WebSocketRateLimit) { - // wait the ban duration before resetting the store. - select { - case <-webSocketCtx.Done(): + if MaxIdleTimeInSeconds > 0 { + utils.LavaFormatDebug("checking idle time", utils.LogAttr("idleFor", idleFor.Load()), utils.LogAttr("maxIdleTime", MaxIdleTimeInSeconds), utils.LogAttr("now", time.Now().Unix())) + idleDuration := idleFor.Load() + MaxIdleTimeInSeconds + if time.Now().Unix() > idleDuration { + websocketConn.WriteMessage(websocket.CloseMessage, websocket.FormatCloseMessage(websocket.CloseNormalClosure, fmt.Sprintf("Connection idle for too long, closing connection. Idle time: %d", idleDuration))) return - case <-time.After(WebSocketBanDuration): // just continue } } - requestsPerSecond.Store(0) + if cwm.headerRateLimit > 0 || WebSocketRateLimit > 0 { + // check if rate limit reached, and ban is required + currentRequestsPerSecondLoad := requestsPerSecond.Load() + if WebSocketBanDuration > 0 && (currentRequestsPerSecondLoad > cwm.headerRateLimit || currentRequestsPerSecondLoad > uint64(WebSocketRateLimit)) { + // wait the ban duration before resetting the store. + select { + case <-webSocketCtx.Done(): + return + case <-time.After(WebSocketBanDuration): // just continue + } + } + requestsPerSecond.Store(0) + } } } }() for { + idleFor.Store(time.Now().Unix()) startTime := time.Now() msgSeed := guidString + "_" + strconv.Itoa(rand.Intn(10000000000)) // use message seed with original guid and new int @@ -185,7 +210,9 @@ func (cwm *ConsumerWebsocketManager) ListenToMessages() { } // Check rate limit is met - if WebSocketRateLimit > 0 && requestsPerSecond.Add(1) > uint64(WebSocketRateLimit) { + currentRequestsPerSecond := requestsPerSecond.Add(1) + if (cwm.headerRateLimit > 0 && currentRequestsPerSecond > cwm.headerRateLimit) || + (WebSocketRateLimit > 0 && currentRequestsPerSecond > uint64(WebSocketRateLimit)) { rateLimitResponse, err := cwm.handleRateLimitReached(msg) if err == nil { websocketConnWriteChan <- webSocketMsgWithType{messageType: messageType, msg: rateLimitResponse} @@ -218,7 +245,7 @@ func (cwm *ConsumerWebsocketManager) ListenToMessages() { metricsData := metrics.NewRelayAnalytics(dappID, cwm.chainId, cwm.apiInterface) - protocolMessage, err := cwm.relaySender.ParseRelay(webSocketCtx, "", string(msg), cwm.connectionType, dappID, userIp, metricsData, nil) + protocolMessage, err := cwm.relaySender.ParseRelay(webSocketCtx, "", string(msg), cwm.connectionType, dappID, userIp, nil) if err != nil { utils.LavaFormatDebug("ws manager could not parse message", utils.LogAttr("message", msg), utils.LogAttr("err", err)) formatterMsg := logger.AnalyzeWebSocketErrorAndGetFormattedMessage(websocketConn.LocalAddr().String(), err, msgSeed, msg, cwm.apiInterface, time.Since(startTime)) @@ -313,6 +340,7 @@ func (cwm *ConsumerWebsocketManager) ListenToMessages() { ) for subscriptionMsgReply := range subscriptionMsgsChan { + idleFor.Store(time.Now().Unix()) websocketConnWriteChan <- webSocketMsgWithType{messageType: messageType, msg: outputFormatter(subscriptionMsgReply.Data)} } diff --git a/protocol/chainlib/consumer_websocket_manager_test.go b/protocol/chainlib/consumer_websocket_manager_test.go new file mode 100644 index 0000000000..c501a663c9 --- /dev/null +++ b/protocol/chainlib/consumer_websocket_manager_test.go @@ -0,0 +1,117 @@ +package chainlib + +import ( + "net" + "testing" + + "github.com/golang/mock/gomock" + "github.com/lavanet/lava/v4/protocol/common" + "github.com/stretchr/testify/assert" +) + +func TestWebsocketConnectionLimiter(t *testing.T) { + tests := []struct { + name string + connectionLimit int64 + headerLimit int64 + ipAddress string + forwardedIP string + userAgent string + expectSuccess []bool + }{ + { + name: "Single connection allowed", + connectionLimit: 1, + headerLimit: 0, + ipAddress: "127.0.0.1", + forwardedIP: "", + userAgent: "test-agent", + expectSuccess: []bool{true}, + }, + { + name: "Single connection allowed", + connectionLimit: 1, + headerLimit: 0, + ipAddress: "127.0.0.1", + forwardedIP: "", + userAgent: "test-agent", + expectSuccess: []bool{true, false}, + }, + { + name: "Multiple connections allowed", + connectionLimit: 2, + headerLimit: 0, + ipAddress: "127.0.0.1", + forwardedIP: "", + userAgent: "test-agent", + expectSuccess: []bool{true, true}, + }, + { + name: "Multiple connections allowed", + connectionLimit: 2, + headerLimit: 0, + ipAddress: "127.0.0.1", + forwardedIP: "", + userAgent: "test-agent", + expectSuccess: []bool{true, true, false}, + }, + { + name: "Header limit overrides global limit succeed", + connectionLimit: 3, + headerLimit: 2, + ipAddress: "127.0.0.1", + forwardedIP: "", + userAgent: "test-agent", + expectSuccess: []bool{true, true}, + }, + { + name: "Header limit overrides global limit fail", + connectionLimit: 0, + headerLimit: 2, + ipAddress: "127.0.0.1", + forwardedIP: "", + userAgent: "test-agent", + expectSuccess: []bool{true, true, false}, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + // Create a new connection limiter + wcl := &WebsocketConnectionLimiter{ + ipToNumberOfActiveConnections: make(map[string]int64), + } + + // Set global connection limit for testing + MaximumNumberOfParallelWebsocketConnectionsPerIp = tt.connectionLimit + + // Create mock websocket connection + mockWsConn := NewMockWebsocketConnection(ctrl) + + // Set up expectations + mockWsConn.EXPECT().Locals(WebSocketOpenConnectionsLimitHeader).Return(tt.headerLimit).AnyTimes() + mockWsConn.EXPECT().Locals(common.IP_FORWARDING_HEADER_NAME).Return(tt.forwardedIP).AnyTimes() + mockWsConn.EXPECT().Locals("User-Agent").Return(tt.userAgent).AnyTimes() + mockWsConn.EXPECT().RemoteAddr().Return(&net.TCPAddr{ + IP: net.ParseIP(tt.ipAddress), + Port: 8080, + }).AnyTimes() + mockWsConn.EXPECT().WriteMessage(gomock.Any(), gomock.Any()).Do(func(messageType int, data []byte) { + t.Logf("WriteMessage called with messageType: %d, data: %s", messageType, string(data)) + }).AnyTimes() + + // Test the connection + for _, expectSuccess := range tt.expectSuccess { + canOpen, _ := wcl.CanOpenConnection(mockWsConn) + if expectSuccess { + assert.True(t, canOpen, "Expected connection to be allowed") + } else { + assert.False(t, canOpen, "Expected connection to be denied") + } + } + }) + } +} diff --git a/protocol/chainlib/consumer_ws_subscription_manager.go b/protocol/chainlib/consumer_ws_subscription_manager.go index 72ed94e3ee..88c2fc3aac 100644 --- a/protocol/chainlib/consumer_ws_subscription_manager.go +++ b/protocol/chainlib/consumer_ws_subscription_manager.go @@ -697,7 +697,7 @@ func (cwsm *ConsumerWSSubscriptionManager) craftUnsubscribeMessage(hashedParams, // Craft the unsubscribe chain message ctx := context.Background() - protocolMessage, err := cwsm.relaySender.ParseRelay(ctx, "", unsubscribeRequestData, cwsm.connectionType, dappID, consumerIp, metricsData, nil) + protocolMessage, err := cwsm.relaySender.ParseRelay(ctx, "", unsubscribeRequestData, cwsm.connectionType, dappID, consumerIp, nil) if err != nil { return nil, utils.LavaFormatError("could not craft unsubscribe chain message", err, utils.LogAttr("hashedParams", utils.ToHexString(hashedParams)), diff --git a/protocol/chainlib/consumer_ws_subscription_manager_test.go b/protocol/chainlib/consumer_ws_subscription_manager_test.go index 08015c239f..4683eac50e 100644 --- a/protocol/chainlib/consumer_ws_subscription_manager_test.go +++ b/protocol/chainlib/consumer_ws_subscription_manager_test.go @@ -88,7 +88,7 @@ func TestConsumerWSSubscriptionManagerParallelSubscriptionsOnSameDappIdIp(t *tes relaySender. EXPECT(). - ParseRelay(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()). + ParseRelay(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()). Return(protocolMessage1, nil). AnyTimes() @@ -244,7 +244,7 @@ func TestConsumerWSSubscriptionManagerParallelSubscriptions(t *testing.T) { relaySender. EXPECT(). - ParseRelay(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()). + ParseRelay(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()). Return(protocolMessage1, nil). AnyTimes() @@ -484,7 +484,7 @@ func TestConsumerWSSubscriptionManager(t *testing.T) { require.True(t, ok) areEqual := reqData == string(play.unsubscribeMessage1) return areEqual - }), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()). + }), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()). Return(unsubscribeProtocolMessage1, nil). AnyTimes() @@ -495,7 +495,7 @@ func TestConsumerWSSubscriptionManager(t *testing.T) { require.True(t, ok) areEqual := reqData == string(play.subscriptionRequestData1) return areEqual - }), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()). + }), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()). Return(subscribeProtocolMessage1, nil). AnyTimes() @@ -600,7 +600,7 @@ func TestConsumerWSSubscriptionManager(t *testing.T) { require.True(t, ok) areEqual := reqData == string(play.unsubscribeMessage2) return areEqual - }), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()). + }), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()). Return(unsubscribeProtocolMessage2, nil). AnyTimes() @@ -611,7 +611,7 @@ func TestConsumerWSSubscriptionManager(t *testing.T) { require.True(t, ok) areEqual := reqData == string(play.subscriptionRequestData2) return areEqual - }), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()). + }), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()). Return(subscribeProtocolMessage2, nil). AnyTimes() diff --git a/protocol/chainlib/extensionslib/extension_parser.go b/protocol/chainlib/extensionslib/extension_parser.go index c8fc38db90..3b2ac1eebc 100644 --- a/protocol/chainlib/extensionslib/extension_parser.go +++ b/protocol/chainlib/extensionslib/extension_parser.go @@ -4,6 +4,8 @@ import ( spectypes "github.com/lavanet/lava/v4/x/spec/types" ) +const ArchiveExtension = "archive" + type ExtensionInfo struct { ExtensionOverride []string LatestBlock uint64 @@ -69,7 +71,6 @@ func (ep *ExtensionParser) ExtensionParsing(addon string, extensionsChainMessage if len(ep.configuredExtensions) == 0 { return } - for extensionKey, extension := range ep.configuredExtensions { if extensionKey.Addon != addon { // this extension is not relevant for this api @@ -84,7 +85,7 @@ func (ep *ExtensionParser) ExtensionParsing(addon string, extensionsChainMessage func NewExtensionParserRule(extension *spectypes.Extension) ExtensionParserRule { switch extension.Name { - case "archive": + case ArchiveExtension: return ArchiveParserRule{extension: extension} default: // unsupported rule diff --git a/protocol/chainlib/grpc.go b/protocol/chainlib/grpc.go index 926be7bcf3..081941df2b 100644 --- a/protocol/chainlib/grpc.go +++ b/protocol/chainlib/grpc.go @@ -115,7 +115,9 @@ func (apip *GrpcChainParser) CraftMessage(parsing *spectypes.ParseDirective, con if err != nil { return nil, err } - return apip.newChainMessage(apiCont.api, spectypes.NOT_APPLICABLE, nil, grpcMessage, apiCollection), nil + parsedInput := &parser.ParsedInput{} + parsedInput.SetBlock(spectypes.NOT_APPLICABLE) + return apip.newChainMessage(apiCont.api, parsedInput, grpcMessage, apiCollection), nil } // ParseMsg parses message data into chain message object @@ -165,18 +167,19 @@ func (apip *GrpcChainParser) ParseMsg(url string, data []byte, connectionType st utils.LogAttr("overwriteRequestedBlock", overwriteReqBlock), ) parsedInput.SetBlock(spectypes.NOT_APPLICABLE) + } else { + parsedInput.UsedDefaultValue = false } } - parsedBlock := parsedInput.GetBlock() - blockHashes, _ := parsedInput.GetBlockHashes() - - nodeMsg := apip.newChainMessage(apiCont.api, parsedBlock, blockHashes, &grpcMessage, apiCollection) + nodeMsg := apip.newChainMessage(apiCont.api, parsedInput, &grpcMessage, apiCollection) apip.BaseChainParser.ExtensionParsing(apiCollection.CollectionData.AddOn, nodeMsg, extensionInfo) return nodeMsg, apip.BaseChainParser.Validate(nodeMsg) } -func (*GrpcChainParser) newChainMessage(api *spectypes.Api, requestedBlock int64, requestedHashes []string, grpcMessage *rpcInterfaceMessages.GrpcMessage, apiCollection *spectypes.ApiCollection) *baseChainMessageContainer { +func (*GrpcChainParser) newChainMessage(api *spectypes.Api, parsedInput *parser.ParsedInput, grpcMessage *rpcInterfaceMessages.GrpcMessage, apiCollection *spectypes.ApiCollection) *baseChainMessageContainer { + requestedBlock := parsedInput.GetBlock() + requestedHashes, _ := parsedInput.GetBlockHashes() nodeMsg := &baseChainMessageContainer{ api: api, msg: grpcMessage, // setting the grpc message as a pointer so we can set descriptors for parsing @@ -185,6 +188,7 @@ func (*GrpcChainParser) newChainMessage(api *spectypes.Api, requestedBlock int64 apiCollection: apiCollection, resultErrorParsingMethod: grpcMessage.CheckResponseError, parseDirective: GetParseDirective(api, apiCollection), + usedDefaultValue: parsedInput.UsedDefaultValue, } return nodeMsg } diff --git a/protocol/chainlib/grpc_test.go b/protocol/chainlib/grpc_test.go index 119ba64e84..cd3404b2b6 100644 --- a/protocol/chainlib/grpc_test.go +++ b/protocol/chainlib/grpc_test.go @@ -5,7 +5,6 @@ import ( "fmt" "net/http" "strconv" - "strings" "testing" "time" @@ -92,10 +91,12 @@ func TestGRPCGetSupportedApi(t *testing.T) { serverApis: map[ApiKey]ApiContainer{{Name: "API1", ConnectionType: connectionType_test}: {api: &spectypes.Api{Name: "API1", Enabled: true}, collectionKey: CollectionKey{ConnectionType: connectionType_test}}}, }, } - _, err = apip.getSupportedApi("API2", connectionType_test) - assert.Error(t, err) - found := strings.Contains(err.Error(), "api not supported") - require.True(t, found) + apiCont, err = apip.getSupportedApi("API2", connectionType_test) + if err == nil { + require.Equal(t, "Default-API2", apiCont.api.Name) + } else { + require.Contains(t, err.Error(), "api not supported") + } // Test case 3: Returns error if the API is disabled apip = &GrpcChainParser{ @@ -105,8 +106,7 @@ func TestGRPCGetSupportedApi(t *testing.T) { } _, err = apip.getSupportedApi("API1", connectionType_test) assert.Error(t, err) - found = strings.Contains(err.Error(), "api is disabled") - require.True(t, found) + require.Contains(t, err.Error(), "api is disabled") } func TestGRPCParseMessage(t *testing.T) { diff --git a/protocol/chainlib/jsonRPC.go b/protocol/chainlib/jsonRPC.go index 99dbe2a116..de48263375 100644 --- a/protocol/chainlib/jsonRPC.go +++ b/protocol/chainlib/jsonRPC.go @@ -28,7 +28,11 @@ import ( spectypes "github.com/lavanet/lava/v4/x/spec/types" ) -const SEP = "&" +const ( + SEP = "&" +) + +var MaximumNumberOfParallelWebsocketConnectionsPerIp int64 = 0 type JsonRPCChainParser struct { BaseChainParser @@ -61,7 +65,12 @@ func (apip *JsonRPCChainParser) getSupportedApi(name, connectionType string, int func (apip *JsonRPCChainParser) CraftMessage(parsing *spectypes.ParseDirective, connectionType string, craftData *CraftData, metadata []pairingtypes.Metadata) (ChainMessageForSend, error) { if craftData != nil { - chainMessage, err := apip.ParseMsg("", craftData.Data, craftData.ConnectionType, metadata, extensionslib.ExtensionInfo{LatestBlock: 0}) + path := craftData.Path + if craftData.InternalPath != "" { + path = craftData.InternalPath + } + + chainMessage, err := apip.ParseMsg(path, craftData.Data, craftData.ConnectionType, metadata, extensionslib.ExtensionInfo{LatestBlock: 0}) if err == nil { chainMessage.AppendHeader(metadata) } @@ -83,7 +92,7 @@ func (apip *JsonRPCChainParser) CraftMessage(parsing *spectypes.ParseDirective, if err != nil { return nil, err } - return apip.newChainMessage(apiCont.api, spectypes.NOT_APPLICABLE, nil, msg, apiCollection), nil + return apip.newChainMessage(apiCont.api, spectypes.NOT_APPLICABLE, nil, msg, apiCollection, false), nil } // this func parses message data into chain message object @@ -106,6 +115,7 @@ func (apip *JsonRPCChainParser) ParseMsg(url string, data []byte, connectionType var apiCollection *spectypes.ApiCollection var latestRequestedBlock, earliestRequestedBlock int64 = 0, 0 blockHashes := []string{} + parsedDefault := true for idx, msg := range msgs { parsedInput := parser.NewParsedInput() internalPath := "" @@ -140,6 +150,9 @@ func (apip *JsonRPCChainParser) ParseMsg(url string, data []byte, connectionType if hashes, err := parsedInput.GetBlockHashes(); err == nil { blockHashes = append(blockHashes, hashes...) } + if !parsedInput.UsedDefaultValue { + parsedDefault = false + } } else { parsedBlock, err := msg.ParseBlock(overwriteReqBlock) parsedInput.SetBlock(parsedBlock) @@ -149,6 +162,8 @@ func (apip *JsonRPCChainParser) ParseMsg(url string, data []byte, connectionType utils.LogAttr("overwriteReqBlock", overwriteReqBlock), ) parsedInput.SetBlock(spectypes.NOT_APPLICABLE) + } else { + parsedInput.UsedDefaultValue = false } } @@ -200,9 +215,9 @@ func (apip *JsonRPCChainParser) ParseMsg(url string, data []byte, connectionType var nodeMsg *baseChainMessageContainer if len(msgs) == 1 { - nodeMsg = apip.newChainMessage(api, latestRequestedBlock, blockHashes, &msgs[0], apiCollection) + nodeMsg = apip.newChainMessage(api, latestRequestedBlock, blockHashes, &msgs[0], apiCollection, parsedDefault) } else { - nodeMsg, err = apip.newBatchChainMessage(api, latestRequestedBlock, earliestRequestedBlock, blockHashes, msgs, apiCollection) + nodeMsg, err = apip.newBatchChainMessage(api, latestRequestedBlock, earliestRequestedBlock, blockHashes, msgs, apiCollection, parsedDefault) if err != nil { return nil, err } @@ -211,7 +226,7 @@ func (apip *JsonRPCChainParser) ParseMsg(url string, data []byte, connectionType return nodeMsg, apip.BaseChainParser.Validate(nodeMsg) } -func (*JsonRPCChainParser) newBatchChainMessage(serviceApi *spectypes.Api, requestedBlock int64, earliestRequestedBlock int64, requestedBlockHashes []string, msgs []rpcInterfaceMessages.JsonrpcMessage, apiCollection *spectypes.ApiCollection) (*baseChainMessageContainer, error) { +func (*JsonRPCChainParser) newBatchChainMessage(serviceApi *spectypes.Api, requestedBlock int64, earliestRequestedBlock int64, requestedBlockHashes []string, msgs []rpcInterfaceMessages.JsonrpcMessage, apiCollection *spectypes.ApiCollection, usedDefaultValue bool) (*baseChainMessageContainer, error) { batchMessage, err := rpcInterfaceMessages.NewBatchMessage(msgs) if err != nil { return nil, err @@ -225,11 +240,12 @@ func (*JsonRPCChainParser) newBatchChainMessage(serviceApi *spectypes.Api, reque earliestRequestedBlock: earliestRequestedBlock, resultErrorParsingMethod: rpcInterfaceMessages.CheckResponseErrorForJsonRpcBatch, parseDirective: nil, + usedDefaultValue: usedDefaultValue, } return nodeMsg, err } -func (*JsonRPCChainParser) newChainMessage(serviceApi *spectypes.Api, requestedBlock int64, requestedBlockHashes []string, msg *rpcInterfaceMessages.JsonrpcMessage, apiCollection *spectypes.ApiCollection) *baseChainMessageContainer { +func (*JsonRPCChainParser) newChainMessage(serviceApi *spectypes.Api, requestedBlock int64, requestedBlockHashes []string, msg *rpcInterfaceMessages.JsonrpcMessage, apiCollection *spectypes.ApiCollection, usedDefaultValue bool) *baseChainMessageContainer { nodeMsg := &baseChainMessageContainer{ api: serviceApi, apiCollection: apiCollection, @@ -238,6 +254,7 @@ func (*JsonRPCChainParser) newChainMessage(serviceApi *spectypes.Api, requestedB msg: msg, resultErrorParsingMethod: msg.CheckResponseError, parseDirective: GetParseDirective(serviceApi, apiCollection), + usedDefaultValue: usedDefaultValue, } return nodeMsg } @@ -308,6 +325,7 @@ type JsonRPCChainListener struct { refererData *RefererData consumerWsSubscriptionManager *ConsumerWSSubscriptionManager listeningAddress string + websocketConnectionLimiter *WebsocketConnectionLimiter } // NewJrpcChainListener creates a new instance of JsonRPCChainListener @@ -325,6 +343,7 @@ func NewJrpcChainListener(ctx context.Context, listenEndpoint *lavasession.RPCEn logger: rpcConsumerLogs, refererData: refererData, consumerWsSubscriptionManager: consumerWsSubscriptionManager, + websocketConnectionLimiter: &WebsocketConnectionLimiter{ipToNumberOfActiveConnections: make(map[string]int64)}, } return chainListener @@ -341,6 +360,8 @@ func (apil *JsonRPCChainListener) Serve(ctx context.Context, cmdFlags common.Con app := createAndSetupBaseAppListener(cmdFlags, apil.endpoint.HealthCheckPath, apil.healthReporter) app.Use("/ws", func(c *fiber.Ctx) error { + apil.websocketConnectionLimiter.HandleFiberRateLimitFlags(c) + // IsWebSocketUpgrade returns true if the client // requested upgrade to the WebSocket protocol. if websocket.IsWebSocketUpgrade(c) { @@ -354,6 +375,17 @@ func (apil *JsonRPCChainListener) Serve(ctx context.Context, cmdFlags common.Con apiInterface := apil.endpoint.ApiInterface webSocketCallback := websocket.New(func(websocketConn *websocket.Conn) { + canOpenConnection, decreaseIpConnection := apil.websocketConnectionLimiter.CanOpenConnection(websocketConn) + defer decreaseIpConnection() + if !canOpenConnection { + return + } + rateLimitInf := websocketConn.Locals(WebSocketRateLimitHeader) + rateLimit, assertionSuccessful := rateLimitInf.(int64) + if !assertionSuccessful || rateLimit < 0 { + rateLimit = 0 + } + utils.LavaFormatDebug("jsonrpc websocket opened", utils.LogAttr("consumerIp", websocketConn.LocalAddr().String())) defer utils.LavaFormatDebug("jsonrpc websocket closed", utils.LogAttr("consumerIp", websocketConn.LocalAddr().String())) @@ -370,6 +402,7 @@ func (apil *JsonRPCChainListener) Serve(ctx context.Context, cmdFlags common.Con RelaySender: apil.relaySender, ConsumerWsSubscriptionManager: apil.consumerWsSubscriptionManager, WebsocketConnectionUID: strconv.FormatUint(utils.GenerateUniqueIdentifier(), 10), + headerRateLimit: uint64(rateLimit), }) consumerWebsocketManager.ListenToMessages() @@ -427,6 +460,10 @@ func (apil *JsonRPCChainListener) Serve(ctx context.Context, cmdFlags common.Con return fiberCtx.Status(fiber.StatusOK).JSON(common.JsonRpcMethodNotFoundError) } + if _, ok := err.(*json.SyntaxError); ok { + return fiberCtx.Status(fiber.StatusBadRequest).JSON(common.JsonRpcParseError) + } + // Get unique GUID response errMasking := apil.logger.GetUniqueGuidResponseForError(err, msgSeed) @@ -496,7 +533,7 @@ func (apil *JsonRPCChainListener) GetListeningAddress() string { type JrpcChainProxy struct { BaseChainProxy - conn map[string]*chainproxy.Connector + conn *chainproxy.Connector } func NewJrpcChainProxy(ctx context.Context, nConns uint, rpcProviderEndpoint lavasession.RPCProviderEndpoint, chainParser ChainParser) (ChainProxy, error) { @@ -504,7 +541,10 @@ func NewJrpcChainProxy(ctx context.Context, nConns uint, rpcProviderEndpoint lav return nil, utils.LavaFormatError("rpcProviderEndpoint.NodeUrl list is empty missing node url", nil, utils.Attribute{Key: "chainID", Value: rpcProviderEndpoint.ChainID}, utils.Attribute{Key: "ApiInterface", Value: rpcProviderEndpoint.ApiInterface}) } _, averageBlockTime, _, _ := chainParser.ChainBlockStats() + + // look for the first node url that has no internal path, otherwise take first node url nodeUrl := rpcProviderEndpoint.NodeUrls[0] + cp := &JrpcChainProxy{ BaseChainProxy: BaseChainProxy{ averageBlockTime: averageBlockTime, @@ -512,72 +552,30 @@ func NewJrpcChainProxy(ctx context.Context, nConns uint, rpcProviderEndpoint lav ErrorHandler: &JsonRPCErrorHandler{}, ChainID: rpcProviderEndpoint.ChainID, }, - conn: map[string]*chainproxy.Connector{}, + conn: nil, } validateEndpoints(rpcProviderEndpoint.NodeUrls, spectypes.APIInterfaceJsonRPC) - internalPaths := map[string]struct{}{} - jsonRPCChainParser, ok := chainParser.(*JsonRPCChainParser) - if ok { - internalPaths = jsonRPCChainParser.GetInternalPaths() - } - internalPathsLength := len(internalPaths) - if internalPathsLength > 0 && internalPathsLength == len(rpcProviderEndpoint.NodeUrls) { - return cp, cp.startWithSpecificInternalPaths(ctx, nConns, rpcProviderEndpoint.NodeUrls, internalPaths) - } else if internalPathsLength > 0 && len(rpcProviderEndpoint.NodeUrls) > 1 { - // provider provided specific endpoints but not enough to fill all requirements - return nil, utils.LavaFormatError("Internal Paths specified but not all paths provided", nil, utils.Attribute{Key: "required", Value: internalPaths}, utils.Attribute{Key: "provided", Value: rpcProviderEndpoint.NodeUrls}) - } - return cp, cp.start(ctx, nConns, nodeUrl, internalPaths) -} - -func (cp *JrpcChainProxy) startWithSpecificInternalPaths(ctx context.Context, nConns uint, nodeUrls []common.NodeUrl, internalPaths map[string]struct{}) error { - for _, url := range nodeUrls { - _, ok := internalPaths[url.InternalPath] - if !ok { - return utils.LavaFormatError("url.InternalPath was not found in internalPaths", nil, utils.Attribute{Key: "internalPaths", Value: internalPaths}, utils.Attribute{Key: "url.InternalPath", Value: url.InternalPath}) - } - utils.LavaFormatDebug("connecting", utils.Attribute{Key: "url", Value: url.String()}) - conn, err := chainproxy.NewConnector(ctx, nConns, url) - if err != nil { - return err - } - cp.conn[url.InternalPath] = conn - } - if len(cp.conn) != len(internalPaths) { - return utils.LavaFormatError("missing connectors for a chain with internal paths", nil, utils.Attribute{Key: "internalPaths", Value: internalPaths}, utils.Attribute{Key: "nodeUrls", Value: nodeUrls}) - } - return nil + return cp, cp.start(ctx, nConns, nodeUrl) } -func (cp *JrpcChainProxy) start(ctx context.Context, nConns uint, nodeUrl common.NodeUrl, internalPaths map[string]struct{}) error { - if len(internalPaths) == 0 { - internalPaths = map[string]struct{}{"": {}} // add default path +func (cp *JrpcChainProxy) start(ctx context.Context, nConns uint, nodeUrl common.NodeUrl) error { + conn, err := chainproxy.NewConnector(ctx, nConns, nodeUrl) + if err != nil { + return err } - basePath := nodeUrl.Url - for path := range internalPaths { - nodeUrl.Url = basePath + path - conn, err := chainproxy.NewConnector(ctx, nConns, nodeUrl) - if err != nil { - return err - } - cp.conn[path] = conn - if cp.conn == nil { - return errors.New("g_conn == nil") - } - } + cp.conn = conn return nil } func (cp *JrpcChainProxy) sendBatchMessage(ctx context.Context, nodeMessage *rpcInterfaceMessages.JsonrpcBatchMessage, chainMessage ChainMessageForSend) (relayReply *RelayReplyWrapper, err error) { - internalPath := chainMessage.GetApiCollection().CollectionData.InternalPath - rpc, err := cp.conn[internalPath].GetRpc(ctx, true) + rpc, err := cp.conn.GetRpc(ctx, true) if err != nil { return nil, err } - defer cp.conn[internalPath].ReturnRpc(rpc) + defer cp.conn.ReturnRpc(rpc) if len(nodeMessage.GetHeaders()) > 0 { for _, metadata := range nodeMessage.GetHeaders() { rpc.SetHeader(metadata.Name, metadata.Value) @@ -602,7 +600,7 @@ func (cp *JrpcChainProxy) sendBatchMessage(ctx context.Context, nodeMessage *rpc } replyMsgs := make([]rpcInterfaceMessages.JsonrpcMessage, len(batch)) for idx, element := range batch { - // convert them because batch elements can't be marshaled back to the user, they are missing tags and flieds + // convert them because batch elements can't be marshaled back to the user, they are missing tags and fields replyMsgs[idx], err = rpcInterfaceMessages.ConvertBatchElement(element) if err != nil { return nil, err @@ -637,20 +635,18 @@ func (cp *JrpcChainProxy) SendNodeMsg(ctx context.Context, ch chan interface{}, reply, err := cp.sendBatchMessage(ctx, batchMessage, chainMessage) return reply, "", nil, err } - internalPath := chainMessage.GetApiCollection().CollectionData.InternalPath - connector := cp.conn[internalPath] - rpc, err := connector.GetRpc(ctx, true) + + rpc, err := cp.conn.GetRpc(ctx, true) if err != nil { return nil, "", nil, err } - defer connector.ReturnRpc(rpc) + defer cp.conn.ReturnRpc(rpc) // appending hashed url - grpc.SetTrailer(ctx, metadata.Pairs(RPCProviderNodeAddressHash, connector.GetUrlHash())) + grpc.SetTrailer(ctx, metadata.Pairs(RPCProviderNodeAddressHash, cp.conn.GetUrlHash())) // Call our node var rpcMessage *rpcclient.JsonrpcMessage - var replyMessage *rpcInterfaceMessages.JsonrpcMessage var sub *rpcclient.ClientSubscription // support setting headers if len(nodeMessage.GetHeaders()) > 0 { @@ -671,41 +667,43 @@ func (cp *JrpcChainProxy) SendNodeMsg(ctx context.Context, ch chan interface{}, cp.NodeUrl.SetIpForwardingIfNecessary(ctx, rpc.SetHeader) rpcMessage, nodeErr = rpc.CallContext(connectCtx, nodeMessage.ID, nodeMessage.Method, nodeMessage.Params, true, nodeMessage.GetDisableErrorHandling()) - if err != nil { + if nodeErr != nil { // here we are getting an error for every code that is not 200-300 - if common.StatusCodeError504.Is(err) || common.StatusCodeError429.Is(err) || common.StatusCodeErrorStrict.Is(err) { - return nil, "", nil, utils.LavaFormatWarning("Received invalid status code", err, utils.Attribute{Key: "chainID", Value: cp.BaseChainProxy.ChainID}, utils.Attribute{Key: "apiName", Value: chainMessage.GetApi().Name}) + if common.StatusCodeError504.Is(nodeErr) || common.StatusCodeError429.Is(nodeErr) || common.StatusCodeErrorStrict.Is(nodeErr) { + return nil, "", nil, utils.LavaFormatWarning("Received invalid status code", nodeErr, utils.Attribute{Key: "chainID", Value: cp.BaseChainProxy.ChainID}, utils.Attribute{Key: "apiName", Value: chainMessage.GetApi().Name}) } // Validate if the error is related to the provider connection to the node or it is a valid error // in case the error is valid (e.g. bad input parameters) the error will return in the form of a valid error reply - if parsedError := cp.HandleNodeError(ctx, err); parsedError != nil { + if parsedError := cp.HandleNodeError(ctx, nodeErr); parsedError != nil { return nil, "", nil, parsedError } } } - var replyMsg rpcInterfaceMessages.JsonrpcMessage + var replyMsg *rpcInterfaceMessages.JsonrpcMessage // the error check here would only wrap errors not from the rpc - if nodeErr != nil { - utils.LavaFormatDebug("got error from node", utils.LogAttr("GUID", ctx), utils.LogAttr("nodeErr", nodeErr)) - return nil, "", nil, nodeErr + // try to parse node error as json message + rpcMessage = TryRecoverNodeErrorFromClientError(nodeErr) + if rpcMessage == nil { + utils.LavaFormatDebug("got error from node", utils.LogAttr("GUID", ctx), utils.LogAttr("nodeErr", nodeErr), utils.LogAttr("nodeUrl", cp.NodeUrl.Url)) + return nil, "", nil, nodeErr + } } - replyMessage, err = rpcInterfaceMessages.ConvertJsonRPCMsg(rpcMessage) + replyMsg, err = rpcInterfaceMessages.ConvertJsonRPCMsg(rpcMessage) if err != nil { return nil, "", nil, utils.LavaFormatError("jsonRPC error", err, utils.Attribute{Key: "GUID", Value: ctx}) } // validate result is valid - if replyMessage.Error == nil { - responseIsNilValidationError := ValidateNilResponse(string(replyMessage.Result)) + if replyMsg.Error == nil { + responseIsNilValidationError := ValidateNilResponse(string(replyMsg.Result)) if responseIsNilValidationError != nil { return nil, "", nil, responseIsNilValidationError } } - replyMsg = *replyMessage - err = cp.ValidateRequestAndResponseIds(nodeMessage.ID, replyMessage.ID) + err = cp.ValidateRequestAndResponseIds(nodeMessage.ID, replyMsg.ID) if err != nil { return nil, "", nil, utils.LavaFormatError("jsonRPC ID mismatch error", err, utils.Attribute{Key: "GUID", Value: ctx}, diff --git a/protocol/chainlib/jsonRPC_test.go b/protocol/chainlib/jsonRPC_test.go index 885bf953d2..1510458704 100644 --- a/protocol/chainlib/jsonRPC_test.go +++ b/protocol/chainlib/jsonRPC_test.go @@ -115,8 +115,12 @@ func TestJSONGetSupportedApi(t *testing.T) { serverApis: map[ApiKey]ApiContainer{{Name: "API1", ConnectionType: connectionType_test}: {api: &spectypes.Api{Name: "API1", Enabled: true}, collectionKey: CollectionKey{ConnectionType: connectionType_test}}}, }, } - _, err = apip.getSupportedApi("API2", connectionType_test, "") - assert.Error(t, err) + apiCont, err := apip.getSupportedApi("API2", connectionType_test, "") + if err == nil { + assert.Equal(t, "Default-API2", apiCont.api.Name) + } else { + assert.ErrorIs(t, err, common.APINotSupportedError) + } // Test case 3: Returns error if the API is disabled apip = &JsonRPCChainParser{ @@ -187,9 +191,9 @@ func TestJsonRpcChainProxy(t *testing.T) { require.NoError(t, err) _, err = chainFetcher.FetchBlockHashByNum(ctx, block) - actualErrMsg := "GET_BLOCK_BY_NUM Failed ParseMessageResponse {error:blockParsing - parse failed {error:invalid parser input format," - expectedErrMsg := err.Error()[:len(actualErrMsg)] - require.Equal(t, actualErrMsg, expectedErrMsg, err.Error()) + expectedErrMsg := "GET_BLOCK_BY_NUM Failed ParseMessageResponse {error:failed to parse with legacy block parser ErrMsg: blockParsing -" + actualErrMsg := err.Error()[:len(expectedErrMsg)] + require.Equal(t, expectedErrMsg, actualErrMsg, err.Error()) } func TestAddonAndVerifications(t *testing.T) { @@ -214,7 +218,7 @@ func TestAddonAndVerifications(t *testing.T) { require.NotNil(t, chainRouter) require.NotNil(t, chainFetcher) - verifications, err := chainParser.GetVerifications([]string{"debug"}) + verifications, err := chainParser.GetVerifications([]string{"debug"}, "", "jsonrpc") require.NoError(t, err) require.NotEmpty(t, verifications) for _, verification := range verifications { @@ -499,11 +503,174 @@ func TestJsonRpcInternalPathsMultipleVersionsAvalanche(t *testing.T) { require.Equal(t, reqDataWithApiName.apiName, api.Name) require.Equal(t, correctPath, collection.CollectionData.InternalPath) } else { - require.Error(t, err) - require.ErrorIs(t, err, common.APINotSupportedError) - require.Nil(t, chainMessage) + if err == nil { + require.Contains(t, chainMessage.GetApi().Name, "Default-") + } else { + require.ErrorIs(t, err, common.APINotSupportedError) + require.Nil(t, chainMessage) + } } }) } } } + +func TestJsonRPC_SpecUpdateWithAddons(t *testing.T) { + // create a new instance of RestChainParser + apip, err := NewJrpcChainParser() + if err != nil { + t.Errorf("Error creating RestChainParser: %v", err) + } + + // set the spec + spec := spectypes.Spec{ + Enabled: true, + ReliabilityThreshold: 10, + AllowedBlockLagForQosSync: 11, + AverageBlockTime: 12000, + BlockDistanceForFinalizedData: 13, + BlocksInFinalizationProof: 14, + ApiCollections: []*spectypes.ApiCollection{ + { + Enabled: true, + CollectionData: spectypes.CollectionData{ + ApiInterface: "jsonrpc", + InternalPath: "", + Type: "POST", + AddOn: "debug", + }, + Apis: []*spectypes.Api{ + { + Enabled: true, + Name: "foo", + }, + }, + }, + }, + } + + // Set the spec for the first time + apip.SetSpec(spec) + + // At first, addon should be disabled + require.False(t, apip.allowedAddons["debug"]) + + // Setting the spec again, for sanity check + apip.SetSpec(spec) + + // Sanity check that addon still disabled + require.False(t, apip.allowedAddons["debug"]) + + // Allow the addon + apip.SetPolicyFromAddonAndExtensionMap(map[string]struct{}{ + "debug": {}, + }) + + // Sanity check + require.True(t, apip.allowedAddons["debug"]) + + // Set the spec again + apip.SetSpec(spec) + + // Should stay the same + require.True(t, apip.allowedAddons["debug"]) + + // Disallow the addon + apip.SetPolicyFromAddonAndExtensionMap(map[string]struct{}{}) + + // Sanity check + require.False(t, apip.allowedAddons["debug"]) + + // Set the spec again + apip.SetSpec(spec) + + // Should stay the same + require.False(t, apip.allowedAddons["debug"]) +} + +func TestJsonRPC_SpecUpdateWithExtensions(t *testing.T) { + // create a new instance of RestChainParser + apip, err := NewJrpcChainParser() + if err != nil { + t.Errorf("Error creating RestChainParser: %v", err) + } + + // set the spec + spec := spectypes.Spec{ + Enabled: true, + ReliabilityThreshold: 10, + AllowedBlockLagForQosSync: 11, + AverageBlockTime: 12000, + BlockDistanceForFinalizedData: 13, + BlocksInFinalizationProof: 14, + ApiCollections: []*spectypes.ApiCollection{ + { + Enabled: true, + CollectionData: spectypes.CollectionData{ + ApiInterface: "jsonrpc", + InternalPath: "", + Type: "POST", + AddOn: "", + }, + Extensions: []*spectypes.Extension{ + { + Name: "archive", + Rule: &spectypes.Rule{ + Block: 123, + }, + }, + }, + }, + }, + } + + extensionKey := extensionslib.ExtensionKey{ + Extension: "archive", + ConnectionType: "POST", + InternalPath: "", + Addon: "", + } + + isExtensionConfigured := func() bool { + _, isConfigured := apip.extensionParser.GetConfiguredExtensions()[extensionKey] + return isConfigured + } + + // Set the spec for the first time + apip.SetSpec(spec) + + // At first, extension should not be configured + require.False(t, isExtensionConfigured()) + + // Setting the spec again, for sanity check + apip.SetSpec(spec) + + // Sanity check that extension is still not configured + require.False(t, isExtensionConfigured()) + + // Allow the extension + apip.SetPolicyFromAddonAndExtensionMap(map[string]struct{}{ + "archive": {}, + }) + + // Sanity check + require.True(t, isExtensionConfigured()) + + // Set the spec again + apip.SetSpec(spec) + + // Should stay the same + require.True(t, isExtensionConfigured()) + + // Disallow the extension + apip.SetPolicyFromAddonAndExtensionMap(map[string]struct{}{}) + + // Sanity check + require.False(t, isExtensionConfigured()) + + // Set the spec again + apip.SetSpec(spec) + + // Should stay the same + require.False(t, isExtensionConfigured()) +} diff --git a/protocol/chainlib/mock_websocket.go b/protocol/chainlib/mock_websocket.go new file mode 100644 index 0000000000..a87f28f0ab --- /dev/null +++ b/protocol/chainlib/mock_websocket.go @@ -0,0 +1,77 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: consumer_websocket_manager_test.go + +// Package chainlib is a generated GoMock package. +package chainlib + +import ( + net "net" + reflect "reflect" + + gomock "github.com/golang/mock/gomock" +) + +// MockWebsocketConnection is a mock of WebsocketConnection interface. +type MockWebsocketConnection struct { + ctrl *gomock.Controller + recorder *MockWebsocketConnectionMockRecorder +} + +// MockWebsocketConnectionMockRecorder is the mock recorder for MockWebsocketConnection. +type MockWebsocketConnectionMockRecorder struct { + mock *MockWebsocketConnection +} + +// NewMockWebsocketConnection creates a new mock instance. +func NewMockWebsocketConnection(ctrl *gomock.Controller) *MockWebsocketConnection { + mock := &MockWebsocketConnection{ctrl: ctrl} + mock.recorder = &MockWebsocketConnectionMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockWebsocketConnection) EXPECT() *MockWebsocketConnectionMockRecorder { + return m.recorder +} + +// Locals mocks base method. +func (m *MockWebsocketConnection) Locals(key string) interface{} { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Locals", key) + ret0, _ := ret[0].(interface{}) + return ret0 +} + +// Locals indicates an expected call of Locals. +func (mr *MockWebsocketConnectionMockRecorder) Locals(key interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Locals", reflect.TypeOf((*MockWebsocketConnection)(nil).Locals), key) +} + +// RemoteAddr mocks base method. +func (m *MockWebsocketConnection) RemoteAddr() net.Addr { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "RemoteAddr") + ret0, _ := ret[0].(net.Addr) + return ret0 +} + +// RemoteAddr indicates an expected call of RemoteAddr. +func (mr *MockWebsocketConnectionMockRecorder) RemoteAddr() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RemoteAddr", reflect.TypeOf((*MockWebsocketConnection)(nil).RemoteAddr)) +} + +// WriteMessage mocks base method. +func (m *MockWebsocketConnection) WriteMessage(messageType int, data []byte) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "WriteMessage", messageType, data) + ret0, _ := ret[0].(error) + return ret0 +} + +// WriteMessage indicates an expected call of WriteMessage. +func (mr *MockWebsocketConnectionMockRecorder) WriteMessage(messageType, data interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WriteMessage", reflect.TypeOf((*MockWebsocketConnection)(nil).WriteMessage), messageType, data) +} diff --git a/protocol/chainlib/node_error_handler.go b/protocol/chainlib/node_error_handler.go index 9376d54644..5d5d52bf4c 100644 --- a/protocol/chainlib/node_error_handler.go +++ b/protocol/chainlib/node_error_handler.go @@ -139,6 +139,20 @@ func (geh *genericErrorHandler) ValidateRequestAndResponseIds(nodeMessageID json return nil } +func TryRecoverNodeErrorFromClientError(nodeErr error) *rpcclient.JsonrpcMessage { + // try to parse node error as json message + httpError, ok := nodeErr.(rpcclient.HTTPError) + if ok { + jsonMessage := &rpcclient.JsonrpcMessage{} + err := json.Unmarshal(httpError.Body, jsonMessage) + if err == nil { + utils.LavaFormatDebug("Successfully recovered HTTPError to node message", utils.LogAttr("jsonMessage", jsonMessage)) + return jsonMessage + } + } + return nil +} + type RestErrorHandler struct{ genericErrorHandler } // Validating if the error is related to the provider connection or not diff --git a/protocol/chainlib/protocol_message.go b/protocol/chainlib/protocol_message.go index a69da9a074..9a054d456d 100644 --- a/protocol/chainlib/protocol_message.go +++ b/protocol/chainlib/protocol_message.go @@ -3,8 +3,11 @@ package chainlib import ( "strings" + "github.com/lavanet/lava/v4/protocol/chainlib/extensionslib" "github.com/lavanet/lava/v4/protocol/common" + "github.com/lavanet/lava/v4/utils" pairingtypes "github.com/lavanet/lava/v4/x/pairing/types" + "github.com/lavanet/lava/v4/x/spec/types" ) type UserData struct { @@ -35,6 +38,48 @@ func (bpm *BaseProtocolMessage) HashCacheRequest(chainId string) ([]byte, func([ return HashCacheRequest(bpm.relayRequestData, chainId) } +// addMissingExtensions adds any extensions from updatedProtocolExtensions that are not in currentPrivateDataExtensions +func (bpm *BaseProtocolMessage) addMissingExtensions(updatedProtocolExtensions []*types.Extension, currentPrivateDataExtensions []string) []string { + // Create a map for O(1) lookups + existingExtensions := make(map[string]struct{}, len(currentPrivateDataExtensions)) + for _, ext := range currentPrivateDataExtensions { + existingExtensions[ext] = struct{}{} + } + + // Add missing extensions + for _, ext := range updatedProtocolExtensions { + if _, exists := existingExtensions[ext.Name]; !exists { + currentPrivateDataExtensions = append(currentPrivateDataExtensions, ext.Name) + if len(updatedProtocolExtensions) == len(currentPrivateDataExtensions) { + break + } + } + } + return currentPrivateDataExtensions +} + +func (bpm *BaseProtocolMessage) UpdateEarliestAndValidateExtensionRules(extensionParser *extensionslib.ExtensionParser, earliestBlockHashRequested int64, addon string, seenBlock int64) bool { + if earliestBlockHashRequested >= 0 { + success := bpm.UpdateEarliestInMessage(earliestBlockHashRequested) + // check if we successfully updated the earliest block in the message + if success { + // parse the extensions for the new updated earliest block + extensionParser.ExtensionParsing(addon, bpm, uint64(seenBlock)) + updatedProtocolExtensions := bpm.GetExtensions() + currentPrivateDataExtensions := bpm.RelayPrivateData().Extensions + utils.LavaFormatTrace("[Archive Debug] Trying to add extensions", utils.LogAttr("currentProtocolExtensions", updatedProtocolExtensions), utils.LogAttr("currentPrivateDataExtensions", currentPrivateDataExtensions)) + if len(updatedProtocolExtensions) > len(currentPrivateDataExtensions) { + // we need to add the missing extension to the private data. + currentPrivateDataExtensions = bpm.addMissingExtensions(updatedProtocolExtensions, currentPrivateDataExtensions) + bpm.RelayPrivateData().Extensions = currentPrivateDataExtensions + utils.LavaFormatTrace("[Archive Debug] After Swap", utils.LogAttr("bpm.RelayPrivateData().Extensions", bpm.RelayPrivateData().Extensions)) + return true + } + } + } + return false +} + func (bpm *BaseProtocolMessage) GetBlockedProviders() []string { if bpm.directiveHeaders == nil { return nil @@ -65,4 +110,5 @@ type ProtocolMessage interface { HashCacheRequest(chainId string) ([]byte, func([]byte) []byte, error) GetBlockedProviders() []string GetUserData() common.UserData + UpdateEarliestAndValidateExtensionRules(extensionParser *extensionslib.ExtensionParser, earliestBlockHashRequested int64, addon string, seenBlock int64) bool } diff --git a/protocol/chainlib/rest.go b/protocol/chainlib/rest.go index 47622a9c2c..c8d6513964 100644 --- a/protocol/chainlib/rest.go +++ b/protocol/chainlib/rest.go @@ -47,7 +47,7 @@ func (apip *RestChainParser) CraftMessage(parsing *spectypes.ParseDirective, con var data []byte = nil urlPath := string(craftData.Data) if craftData.ConnectionType == http.MethodPost { - // on post we need to send the data provided in the templace with the api as method + // on post we need to send the data provided in the template with the api as method data = craftData.Data urlPath = craftData.Path } @@ -69,12 +69,13 @@ func (apip *RestChainParser) CraftMessage(parsing *spectypes.ParseDirective, con if err != nil { return nil, err } - api := apiCont.api apiCollection, err := apip.getApiCollection(connectionType, apiCont.collectionKey.InternalPath, apiCont.collectionKey.Addon) if err != nil { return nil, err } - return apip.newChainMessage(api, spectypes.NOT_APPLICABLE, nil, restMessage, apiCollection), nil + parsedInput := parser.NewParsedInput() + parsedInput.SetBlock(spectypes.NOT_APPLICABLE) + return apip.newChainMessage(apiCont.api, parsedInput, restMessage, apiCollection), nil } // ParseMsg parses message data into chain message object @@ -126,26 +127,28 @@ func (apip *RestChainParser) ParseMsg(urlPath string, data []byte, connectionTyp utils.LogAttr("overwriteRequestedBlock", overwriteReqBlock), ) parsedInput.SetBlock(spectypes.NOT_APPLICABLE) + } else { + parsedInput.UsedDefaultValue = false } } - parsedBlock := parsedInput.GetBlock() - blockHashes, _ := parsedInput.GetBlockHashes() - - nodeMsg := apip.newChainMessage(apiCont.api, parsedBlock, blockHashes, &restMessage, apiCollection) + nodeMsg := apip.newChainMessage(apiCont.api, parsedInput, &restMessage, apiCollection) apip.BaseChainParser.ExtensionParsing(apiCollection.CollectionData.AddOn, nodeMsg, extensionInfo) return nodeMsg, apip.BaseChainParser.Validate(nodeMsg) } -func (*RestChainParser) newChainMessage(serviceApi *spectypes.Api, requestBlock int64, requestedHashes []string, restMessage *rpcInterfaceMessages.RestMessage, apiCollection *spectypes.ApiCollection) *baseChainMessageContainer { +func (*RestChainParser) newChainMessage(api *spectypes.Api, parsedInput *parser.ParsedInput, restMessage *rpcInterfaceMessages.RestMessage, apiCollection *spectypes.ApiCollection) *baseChainMessageContainer { + requestedBlock := parsedInput.GetBlock() + requestedHashes, _ := parsedInput.GetBlockHashes() nodeMsg := &baseChainMessageContainer{ - api: serviceApi, - apiCollection: apiCollection, + api: api, msg: restMessage, - latestRequestedBlock: requestBlock, + latestRequestedBlock: requestedBlock, requestedBlockHashes: requestedHashes, + apiCollection: apiCollection, resultErrorParsingMethod: restMessage.CheckResponseError, - parseDirective: GetParseDirective(serviceApi, apiCollection), + parseDirective: GetParseDirective(api, apiCollection), + usedDefaultValue: parsedInput.UsedDefaultValue, } return nodeMsg } @@ -173,6 +176,10 @@ func (apip *RestChainParser) getSupportedApi(name, connectionType string) (*ApiC // Return an error if spec does not exist if !ok { + if AllowMissingApisByDefault { + apiKey := ApiKey{Name: name, ConnectionType: connectionType, InternalPath: ""} + return apip.defaultApiContainer(apiKey) + } utils.LavaFormatDebug("rest api not supported", utils.LogAttr("name", name), utils.LogAttr("connectionType", connectionType), diff --git a/protocol/chainlib/rest_test.go b/protocol/chainlib/rest_test.go index 1521ac445d..18370af01a 100644 --- a/protocol/chainlib/rest_test.go +++ b/protocol/chainlib/rest_test.go @@ -88,9 +88,12 @@ func TestRestGetSupportedApi(t *testing.T) { serverApis: map[ApiKey]ApiContainer{{Name: "API1", ConnectionType: connectionType_test}: {api: &spectypes.Api{Name: "API1", Enabled: true}, collectionKey: CollectionKey{ConnectionType: connectionType_test}}}, }, } - _, err = apip.getSupportedApi("API2", connectionType_test) - assert.Error(t, err) - assert.ErrorIs(t, err, common.APINotSupportedError) + apiCont, err := apip.getSupportedApi("API2", connectionType_test) + if err == nil { + assert.Equal(t, "Default-API2", apiCont.api.Name) + } else { + assert.ErrorIs(t, err, common.APINotSupportedError) + } // Test case 3: Returns error if the API is disabled apip = &RestChainParser{ @@ -313,7 +316,11 @@ func TestRegexParsing(t *testing.T) { for _, api := range []string{ "/cosmos/staking/v1beta1/delegations/lava@17ym998u666u8w2qgjd5m7w7ydjqmu3mlgl7ua2/", } { - _, err := chainParser.ParseMsg(api, nil, http.MethodGet, nil, extensionslib.ExtensionInfo{LatestBlock: 0}) - require.Error(t, err) + chainMessage, err := chainParser.ParseMsg(api, nil, http.MethodGet, nil, extensionslib.ExtensionInfo{LatestBlock: 0}) + if err == nil { + require.Equal(t, "Default-"+api, chainMessage.GetApi().GetName()) + } else { + assert.ErrorIs(t, err, common.APINotSupportedError) + } } } diff --git a/protocol/chainlib/tendermintRPC.go b/protocol/chainlib/tendermintRPC.go index be0dae3648..3bb8867095 100644 --- a/protocol/chainlib/tendermintRPC.go +++ b/protocol/chainlib/tendermintRPC.go @@ -61,7 +61,12 @@ func (apip *TendermintChainParser) getSupportedApi(name, connectionType string) func (apip *TendermintChainParser) CraftMessage(parsing *spectypes.ParseDirective, connectionType string, craftData *CraftData, metadata []pairingtypes.Metadata) (ChainMessageForSend, error) { if craftData != nil { - chainMessage, err := apip.ParseMsg("", craftData.Data, craftData.ConnectionType, metadata, extensionslib.ExtensionInfo{LatestBlock: 0}) + path := craftData.Path + if craftData.InternalPath != "" { + path = craftData.InternalPath + } + + chainMessage, err := apip.ParseMsg(path, craftData.Data, craftData.ConnectionType, metadata, extensionslib.ExtensionInfo{LatestBlock: 0}) if err == nil { chainMessage.AppendHeader(metadata) } @@ -85,7 +90,7 @@ func (apip *TendermintChainParser) CraftMessage(parsing *spectypes.ParseDirectiv return nil, err } tenderMsg := rpcInterfaceMessages.TendermintrpcMessage{JsonrpcMessage: msg, Path: parsing.ApiName} - return apip.newChainMessage(apiCont.api, spectypes.NOT_APPLICABLE, nil, &tenderMsg, apiCollection), nil + return apip.newChainMessage(apiCont.api, spectypes.NOT_APPLICABLE, nil, &tenderMsg, apiCollection, false), nil } // ParseMsg parses message data into chain message object @@ -138,6 +143,7 @@ func (apip *TendermintChainParser) ParseMsg(urlPath string, data []byte, connect var apiCollection *spectypes.ApiCollection var latestRequestedBlock, earliestRequestedBlock int64 = 0, spectypes.LATEST_BLOCK blockHashes := []string{} + parsedDefault := true for idx, msg := range msgs { parsedInput := parser.NewParsedInput() // Check api is supported and save it in nodeMsg @@ -166,6 +172,9 @@ func (apip *TendermintChainParser) ParseMsg(urlPath string, data []byte, connect if hashes, err := parsedInput.GetBlockHashes(); err == nil { blockHashes = append(blockHashes, hashes...) } + if !parsedInput.UsedDefaultValue { + parsedDefault = false + } } else { parsedBlock, err := msg.ParseBlock(overwriteReqBlock) parsedInput.SetBlock(parsedBlock) @@ -175,6 +184,8 @@ func (apip *TendermintChainParser) ParseMsg(urlPath string, data []byte, connect utils.LogAttr("overwriteReqBlock", overwriteReqBlock), ) parsedInput.SetBlock(spectypes.NOT_APPLICABLE) + } else { + parsedInput.UsedDefaultValue = false } } @@ -232,10 +243,10 @@ func (apip *TendermintChainParser) ParseMsg(urlPath string, data []byte, connect if !isJsonrpc { tenderMsg.Path = urlPath // add path } - nodeMsg = apip.newChainMessage(api, latestRequestedBlock, blockHashes, &tenderMsg, apiCollection) + nodeMsg = apip.newChainMessage(api, latestRequestedBlock, blockHashes, &tenderMsg, apiCollection, parsedDefault) } else { var err error - nodeMsg, err = apip.newBatchChainMessage(api, latestRequestedBlock, earliestRequestedBlock, blockHashes, msgs, apiCollection) + nodeMsg, err = apip.newBatchChainMessage(api, latestRequestedBlock, earliestRequestedBlock, blockHashes, msgs, apiCollection, parsedDefault) if err != nil { return nil, err } @@ -245,7 +256,7 @@ func (apip *TendermintChainParser) ParseMsg(urlPath string, data []byte, connect return nodeMsg, apip.BaseChainParser.Validate(nodeMsg) } -func (*TendermintChainParser) newBatchChainMessage(serviceApi *spectypes.Api, requestedBlock int64, earliestRequestedBlock int64, requestedHashes []string, msgs []rpcInterfaceMessages.JsonrpcMessage, apiCollection *spectypes.ApiCollection) (*baseChainMessageContainer, error) { +func (*TendermintChainParser) newBatchChainMessage(serviceApi *spectypes.Api, requestedBlock int64, earliestRequestedBlock int64, requestedHashes []string, msgs []rpcInterfaceMessages.JsonrpcMessage, apiCollection *spectypes.ApiCollection, usedDefaultValue bool) (*baseChainMessageContainer, error) { batchMessage, err := rpcInterfaceMessages.NewBatchMessage(msgs) if err != nil { return nil, err @@ -259,11 +270,18 @@ func (*TendermintChainParser) newBatchChainMessage(serviceApi *spectypes.Api, re earliestRequestedBlock: earliestRequestedBlock, resultErrorParsingMethod: rpcInterfaceMessages.CheckResponseErrorForJsonRpcBatch, parseDirective: GetParseDirective(serviceApi, apiCollection), + usedDefaultValue: usedDefaultValue, } return nodeMsg, err } -func (*TendermintChainParser) newChainMessage(serviceApi *spectypes.Api, requestedBlock int64, requestedHashes []string, msg *rpcInterfaceMessages.TendermintrpcMessage, apiCollection *spectypes.ApiCollection) *baseChainMessageContainer { +// overwritten because tendermintrpc doesnt use POST but an empty connecionType +func (apip *TendermintChainParser) ExtractDataFromRequest(request *http.Request) (url string, data string, connectionType string, metadata []pairingtypes.Metadata, err error) { + url, data, _, metadata, err = apip.BaseChainParser.ExtractDataFromRequest(request) + return url, data, "", metadata, err +} + +func (*TendermintChainParser) newChainMessage(serviceApi *spectypes.Api, requestedBlock int64, requestedHashes []string, msg *rpcInterfaceMessages.TendermintrpcMessage, apiCollection *spectypes.ApiCollection, usedDefaultValue bool) *baseChainMessageContainer { nodeMsg := &baseChainMessageContainer{ api: serviceApi, apiCollection: apiCollection, @@ -272,6 +290,7 @@ func (*TendermintChainParser) newChainMessage(serviceApi *spectypes.Api, request msg: msg, resultErrorParsingMethod: msg.CheckResponseError, parseDirective: GetParseDirective(serviceApi, apiCollection), + usedDefaultValue: usedDefaultValue, } return nodeMsg } @@ -334,6 +353,7 @@ type TendermintRpcChainListener struct { refererData *RefererData consumerWsSubscriptionManager *ConsumerWSSubscriptionManager listeningAddress string + websocketConnectionLimiter *WebsocketConnectionLimiter } // NewTendermintRpcChainListener creates a new instance of TendermintRpcChainListener @@ -351,6 +371,7 @@ func NewTendermintRpcChainListener(ctx context.Context, listenEndpoint *lavasess logger: rpcConsumerLogs, refererData: refererData, consumerWsSubscriptionManager: consumerWsSubscriptionManager, + websocketConnectionLimiter: &WebsocketConnectionLimiter{ipToNumberOfActiveConnections: make(map[string]int64)}, } return chainListener @@ -369,6 +390,7 @@ func (apil *TendermintRpcChainListener) Serve(ctx context.Context, cmdFlags comm apiInterface := apil.endpoint.ApiInterface app.Use("/ws", func(c *fiber.Ctx) error { + apil.websocketConnectionLimiter.HandleFiberRateLimitFlags(c) // IsWebSocketUpgrade returns true if the client // requested upgrade to the WebSocket protocol. if websocket.IsWebSocketUpgrade(c) { @@ -378,6 +400,18 @@ func (apil *TendermintRpcChainListener) Serve(ctx context.Context, cmdFlags comm return fiber.ErrUpgradeRequired }) webSocketCallback := websocket.New(func(websocketConn *websocket.Conn) { + canOpenConnection, decreaseIpConnection := apil.websocketConnectionLimiter.CanOpenConnection(websocketConn) + defer decreaseIpConnection() + if !canOpenConnection { + return + } + + rateLimitInf := websocketConn.Locals(WebSocketRateLimitHeader) + rateLimit, assertionSuccessful := rateLimitInf.(int64) + if !assertionSuccessful || rateLimit < 0 { + rateLimit = 0 + } + utils.LavaFormatDebug("tendermintrpc websocket opened", utils.LogAttr("consumerIp", websocketConn.LocalAddr().String())) defer utils.LavaFormatDebug("tendermintrpc websocket closed", utils.LogAttr("consumerIp", websocketConn.LocalAddr().String())) @@ -394,6 +428,7 @@ func (apil *TendermintRpcChainListener) Serve(ctx context.Context, cmdFlags comm RelaySender: apil.relaySender, ConsumerWsSubscriptionManager: apil.consumerWsSubscriptionManager, WebsocketConnectionUID: strconv.FormatUint(utils.GenerateUniqueIdentifier(), 10), + headerRateLimit: uint64(rateLimit), }) consumerWebsocketManager.ListenToMessages() @@ -601,11 +636,11 @@ func NewtendermintRpcChainProxy(ctx context.Context, nConns uint, rpcProviderEnd ErrorHandler: &TendermintRPCErrorHandler{}, ChainID: rpcProviderEndpoint.ChainID, }, - conn: map[string]*chainproxy.Connector{}, + conn: nil, }, } - return cp, cp.start(ctx, nConns, nodeUrl, nil) + return cp, cp.start(ctx, nConns, nodeUrl) } func (cp *tendermintRpcChainProxy) SendNodeMsg(ctx context.Context, ch chan interface{}, chainMessage ChainMessageForSend) (relayReply *RelayReplyWrapper, subscriptionID string, relayReplyServer *rpcclient.ClientSubscription, err error) { @@ -646,8 +681,7 @@ func (cp *tendermintRpcChainProxy) SendURI(ctx context.Context, nodeMessage *rpc httpClient := cp.httpClient // appending hashed url - internalPath := chainMessage.GetApiCollection().GetCollectionData().InternalPath - grpc.SetTrailer(ctx, metadata.Pairs(RPCProviderNodeAddressHash, cp.conn[internalPath].GetUrlHash())) + grpc.SetTrailer(ctx, metadata.Pairs(RPCProviderNodeAddressHash, cp.conn.GetUrlHash())) // construct the url by concatenating the node url with the path variable url := cp.NodeUrl.Url + "/" + nodeMessage.Path @@ -723,23 +757,18 @@ func (cp *tendermintRpcChainProxy) SendURI(ctx context.Context, nodeMessage *rpc func (cp *tendermintRpcChainProxy) SendRPC(ctx context.Context, nodeMessage *rpcInterfaceMessages.TendermintrpcMessage, ch chan interface{}, chainMessage ChainMessageForSend) (relayReply *RelayReplyWrapper, subscriptionID string, relayReplyServer *rpcclient.ClientSubscription, err error) { // Get rpc connection from the connection pool var rpc *rpcclient.Client - internalPath := chainMessage.GetApiCollection().CollectionData.InternalPath - - connector := cp.conn[internalPath] - - rpc, err = connector.GetRpc(ctx, true) + rpc, err = cp.conn.GetRpc(ctx, true) if err != nil { return nil, "", nil, err } // return the rpc connection to the websocket pool after the function completes - defer connector.ReturnRpc(rpc) + defer cp.conn.ReturnRpc(rpc) // appending hashed url - grpc.SetTrailer(ctx, metadata.Pairs(RPCProviderNodeAddressHash, connector.GetUrlHash())) + grpc.SetTrailer(ctx, metadata.Pairs(RPCProviderNodeAddressHash, cp.conn.GetUrlHash())) // create variables for the rpc message and reply message var rpcMessage *rpcclient.JsonrpcMessage - var replyMessage *rpcInterfaceMessages.RPCResponse var sub *rpcclient.ClientSubscription if len(nodeMessage.GetHeaders()) > 0 { for _, metadata := range nodeMessage.GetHeaders() { @@ -768,13 +797,13 @@ func (cp *tendermintRpcChainProxy) SendRPC(ctx context.Context, nodeMessage *rpc cp.NodeUrl.SetIpForwardingIfNecessary(ctx, rpc.SetHeader) // perform the rpc call rpcMessage, nodeErr = rpc.CallContext(connectCtx, nodeMessage.ID, nodeMessage.Method, nodeMessage.Params, false, nodeMessage.GetDisableErrorHandling()) - if err != nil { - if common.StatusCodeError504.Is(err) || common.StatusCodeError429.Is(err) || common.StatusCodeErrorStrict.Is(err) { - return nil, "", nil, utils.LavaFormatWarning("Received invalid status code", err, utils.Attribute{Key: "chainID", Value: cp.BaseChainProxy.ChainID}, utils.Attribute{Key: "apiName", Value: chainMessage.GetApi().Name}) + if nodeErr != nil { + if common.StatusCodeError504.Is(nodeErr) || common.StatusCodeError429.Is(nodeErr) || common.StatusCodeErrorStrict.Is(nodeErr) { + return nil, "", nil, utils.LavaFormatWarning("Received invalid status code", nodeErr, utils.Attribute{Key: "chainID", Value: cp.BaseChainProxy.ChainID}, utils.Attribute{Key: "apiName", Value: chainMessage.GetApi().Name}) } // Validate if the error is related to the provider connection to the node or it is a valid error // in case the error is valid (e.g. bad input parameters) the error will return in the form of a valid error reply - if parsedError := cp.HandleNodeError(ctx, err); parsedError != nil { + if parsedError := cp.HandleNodeError(ctx, nodeErr); parsedError != nil { return nil, "", nil, parsedError } } @@ -782,27 +811,27 @@ func (cp *tendermintRpcChainProxy) SendRPC(ctx context.Context, nodeMessage *rpc var replyMsg *rpcInterfaceMessages.RPCResponse // the error check here would only wrap errors not from the rpc - if nodeErr != nil { - utils.LavaFormatDebug("got error from node", utils.LogAttr("GUID", ctx), utils.LogAttr("nodeErr", nodeErr)) - return nil, "", nil, nodeErr + rpcMessage = TryRecoverNodeErrorFromClientError(nodeErr) + if rpcMessage == nil { + utils.LavaFormatDebug("got error from node", utils.LogAttr("GUID", ctx), utils.LogAttr("nodeErr", nodeErr)) + return nil, "", nil, nodeErr + } } - replyMessage, err = rpcInterfaceMessages.ConvertTendermintMsg(rpcMessage) + replyMsg, err = rpcInterfaceMessages.ConvertTendermintMsg(rpcMessage) if err != nil { return nil, "", nil, utils.LavaFormatError("tendermintRPC error", err) } // if we didn't get a node error. - if replyMessage.Error == nil { + if replyMsg.Error == nil { // validate result is valid - responseIsNilValidationError := ValidateNilResponse(string(replyMessage.Result)) + responseIsNilValidationError := ValidateNilResponse(string(replyMsg.Result)) if responseIsNilValidationError != nil { return nil, "", nil, responseIsNilValidationError } } - replyMsg = replyMessage - err = cp.ValidateRequestAndResponseIds(nodeMessage.ID, rpcMessage.ID) if err != nil { return nil, "", nil, utils.LavaFormatError("tendermintRPC ID mismatch error", err, diff --git a/protocol/chainlib/tendermintRPC_test.go b/protocol/chainlib/tendermintRPC_test.go index 93e4930146..1fd6696ebb 100644 --- a/protocol/chainlib/tendermintRPC_test.go +++ b/protocol/chainlib/tendermintRPC_test.go @@ -10,6 +10,7 @@ import ( "github.com/lavanet/lava/v4/protocol/chainlib/chainproxy/rpcInterfaceMessages" "github.com/lavanet/lava/v4/protocol/chainlib/extensionslib" + "github.com/lavanet/lava/v4/protocol/common" spectypes "github.com/lavanet/lava/v4/x/spec/types" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -84,8 +85,12 @@ func TestTendermintGetSupportedApi(t *testing.T) { serverApis: map[ApiKey]ApiContainer{{Name: "API1", ConnectionType: connectionType_test}: {api: &spectypes.Api{Name: "API1", Enabled: true}, collectionKey: CollectionKey{ConnectionType: connectionType_test}}}, }, } - _, err = apip.getSupportedApi("API2", connectionType_test) - assert.Error(t, err) + apiCont, err := apip.getSupportedApi("API2", connectionType_test) + if err == nil { + assert.Equal(t, "Default-API2", apiCont.api.Name) + } else { + assert.ErrorIs(t, err, common.APINotSupportedError) + } // Test case 3: Returns error if the API is disabled apip = &TendermintChainParser{ diff --git a/protocol/chainlib/websocket_connection_limiter.go b/protocol/chainlib/websocket_connection_limiter.go new file mode 100644 index 0000000000..9be6a27e73 --- /dev/null +++ b/protocol/chainlib/websocket_connection_limiter.go @@ -0,0 +1,150 @@ +package chainlib + +import ( + "fmt" + "net" + "strconv" + "strings" + "sync" + + "github.com/gofiber/fiber/v2" + "github.com/gofiber/websocket/v2" + "github.com/lavanet/lava/v4/protocol/common" + "github.com/lavanet/lava/v4/utils" +) + +// WebsocketConnection defines the interface for websocket connections +type WebsocketConnection interface { + // Add only the methods you need to mock + RemoteAddr() net.Addr + Locals(key string) interface{} + WriteMessage(messageType int, data []byte) error +} + +// Will limit a certain amount of connections per IP +type WebsocketConnectionLimiter struct { + ipToNumberOfActiveConnections map[string]int64 + lock sync.RWMutex +} + +func (wcl *WebsocketConnectionLimiter) HandleFiberRateLimitFlags(c *fiber.Ctx) { + userAgent := c.Get(fiber.HeaderUserAgent) + // Store the User-Agent in locals for later use + c.Locals(fiber.HeaderUserAgent, userAgent) + + forwardedFor := c.Get(common.IP_FORWARDING_HEADER_NAME) + if forwardedFor == "" { + // If not present, fallback to c.IP() which retrieves the real IP + forwardedFor = c.IP() + } + // Store the X-Forwarded-For or real IP in the context + c.Locals(common.IP_FORWARDING_HEADER_NAME, forwardedFor) + + rateLimitString := c.Get(WebSocketRateLimitHeader) + rateLimit, err := strconv.ParseInt(rateLimitString, 10, 64) + if err != nil { + rateLimit = 0 + } + c.Locals(WebSocketRateLimitHeader, rateLimit) + + connectionLimitString := c.Get(WebSocketOpenConnectionsLimitHeader) + connectionLimit, err := strconv.ParseInt(connectionLimitString, 10, 64) + if err != nil { + connectionLimit = 0 + } + c.Locals(WebSocketOpenConnectionsLimitHeader, connectionLimit) +} + +func (wcl *WebsocketConnectionLimiter) getConnectionLimit(websocketConn WebsocketConnection) int64 { + connectionLimitHeaderValue, ok := websocketConn.Locals(WebSocketOpenConnectionsLimitHeader).(int64) + if !ok || connectionLimitHeaderValue < 0 { + connectionLimitHeaderValue = 0 + } + // Do not allow header to overwrite flag value if its set. + if MaximumNumberOfParallelWebsocketConnectionsPerIp > 0 && connectionLimitHeaderValue > MaximumNumberOfParallelWebsocketConnectionsPerIp { + return MaximumNumberOfParallelWebsocketConnectionsPerIp + } + // Return the larger of the global limit (if set) or the header value + return utils.Max(MaximumNumberOfParallelWebsocketConnectionsPerIp, connectionLimitHeaderValue) +} + +func (wcl *WebsocketConnectionLimiter) CanOpenConnection(websocketConn WebsocketConnection) (bool, func()) { + // Check which connection limit is higher and use that. + connectionLimit := wcl.getConnectionLimit(websocketConn) + decreaseIpConnectionCallback := func() {} + if connectionLimit > 0 { // 0 is disabled. + ipForwardedInterface := websocketConn.Locals(common.IP_FORWARDING_HEADER_NAME) + ipForwarded, assertionSuccessful := ipForwardedInterface.(string) + if !assertionSuccessful { + ipForwarded = "" + } + ip := websocketConn.RemoteAddr().String() + userAgent, assertionSuccessful := websocketConn.Locals("User-Agent").(string) + if !assertionSuccessful { + userAgent = "" + } + key := wcl.getKey(ip, ipForwarded, userAgent) + + // Check current connections before incrementing + currentConnections := wcl.getCurrentAmountOfConnections(key) + // If already at or exceeding limit, deny the connection + if currentConnections >= connectionLimit { + websocketConn.WriteMessage(websocket.CloseMessage, websocket.FormatCloseMessage(websocket.CloseNormalClosure, fmt.Sprintf("Too Many Open Connections, limited to %d", connectionLimit))) + return false, decreaseIpConnectionCallback + } + // If under limit, increment and return cleanup function + wcl.addIpConnection(key) + decreaseIpConnectionCallback = func() { wcl.decreaseIpConnection(key) } + } + return true, decreaseIpConnectionCallback +} + +func (wcl *WebsocketConnectionLimiter) getCurrentAmountOfConnections(key string) int64 { + wcl.lock.RLock() + defer wcl.lock.RUnlock() + return wcl.ipToNumberOfActiveConnections[key] +} + +func (wcl *WebsocketConnectionLimiter) addIpConnection(key string) { + wcl.lock.Lock() + defer wcl.lock.Unlock() + // wether it exists or not we add 1. + wcl.ipToNumberOfActiveConnections[key] += 1 +} + +func (wcl *WebsocketConnectionLimiter) decreaseIpConnection(key string) { + wcl.lock.Lock() + defer wcl.lock.Unlock() + // it must exist as we dont get here without adding it prior + wcl.ipToNumberOfActiveConnections[key] -= 1 + if wcl.ipToNumberOfActiveConnections[key] == 0 { + delete(wcl.ipToNumberOfActiveConnections, key) + } +} + +func (wcl *WebsocketConnectionLimiter) getKey(ip string, forwardedIp string, userAgent string) string { + returnedKey := "" + ipOriginal := net.ParseIP(ip) + if ipOriginal != nil { + returnedKey = ipOriginal.String() + } else { + ipPart, _, err := net.SplitHostPort(ip) + if err == nil { + returnedKey = ipPart + } + } + ips := strings.Split(forwardedIp, ",") + for _, ipStr := range ips { + ipParsed := net.ParseIP(strings.TrimSpace(ipStr)) + if ipParsed != nil { + returnedKey += SEP + ipParsed.String() + } else { + ipPart, _, err := net.SplitHostPort(ipStr) + if err == nil { + returnedKey += SEP + ipPart + } + } + } + returnedKey += SEP + userAgent + return returnedKey +} diff --git a/protocol/common/cobra_common.go b/protocol/common/cobra_common.go index 338b003f67..42392f19a5 100644 --- a/protocol/common/cobra_common.go +++ b/protocol/common/cobra_common.go @@ -41,12 +41,17 @@ const ( // optimizer qos server flags OptimizerQosServerAddressFlag = "optimizer-qos-server-address" // address of the optimizer qos server to send the qos reports + OptimizerQosListenFlag = "optimizer-qos-listen" // enable listening for qos reports on metrics endpoint OptimizerQosServerPushIntervalFlag = "optimizer-qos-push-interval" // interval to push the qos reports to the optimizer qos server OptimizerQosServerSamplingIntervalFlag = "optimizer-qos-sampling-interval" // interval to sample the qos reports // websocket flags RateLimitWebSocketFlag = "rate-limit-websocket-requests-per-connection" BanDurationForWebsocketRateLimitExceededFlag = "ban-duration-for-websocket-rate-limit-exceeded" + LimitParallelWebsocketConnectionsPerIpFlag = "limit-parallel-websocket-connections-per-ip" + LimitWebsocketIdleTimeFlag = "limit-websocket-connection-idle-time" RateLimitRequestPerSecondFlag = "rate-limit-requests-per-second" + // specification default flags + AllowMissingApisByDefaultFlagName = "allow-missing-apis-by-default" ) const ( diff --git a/protocol/common/endpoints.go b/protocol/common/endpoints.go index 26d08ca5a5..2379512708 100644 --- a/protocol/common/endpoints.go +++ b/protocol/common/endpoints.go @@ -29,6 +29,7 @@ const ( REPORTED_PROVIDERS_HEADER_NAME = "Lava-Reported-Providers" USER_REQUEST_TYPE = "lava-user-request-type" STATEFUL_API_HEADER = "lava-stateful-api" + REQUESTED_BLOCK_HEADER_NAME = "lava-parsed-requested-block" LAVA_IDENTIFIED_NODE_ERROR_HEADER = "lava-identified-node-error" LAVAP_VERSION_HEADER_NAME = "Lavap-Version" LAVA_CONSUMER_PROCESS_GUID = "lava-consumer-process-guid" @@ -76,7 +77,7 @@ func (nurl NodeUrl) String() string { urlStr := nurl.UrlStr() if len(nurl.Addons) > 0 { - return urlStr + ", addons: (" + strings.Join(nurl.Addons, ",") + ")" + return urlStr + ", addons: (" + strings.Join(nurl.Addons, ",") + ")" + ", internal-path: " + nurl.InternalPath } return urlStr } diff --git a/protocol/common/return_errors.go b/protocol/common/return_errors.go index 9020a26f17..299828e00f 100644 --- a/protocol/common/return_errors.go +++ b/protocol/common/return_errors.go @@ -36,6 +36,16 @@ var JsonRpcRateLimitError = JsonRPCErrorMessage{ }, } +var JsonRpcParseError = JsonRPCErrorMessage{ + JsonRPC: "2.0", + Id: -1, + Error: JsonRPCError{ + Code: -32700, + Message: "Parse error", + Data: "Failed to parse the request body as JSON", + }, +} + var JsonRpcSubscriptionNotFoundError = JsonRPCErrorMessage{ JsonRPC: "2.0", Id: 1, diff --git a/protocol/integration/protocol_test.go b/protocol/integration/protocol_test.go index 03ef7cdd2f..a5b2b86f84 100644 --- a/protocol/integration/protocol_test.go +++ b/protocol/integration/protocol_test.go @@ -6,19 +6,25 @@ import ( "encoding/json" "fmt" "io" + "log" "net/http" "net/url" "os" "strconv" "strings" "sync" + "sync/atomic" "testing" "time" + slices "github.com/lavanet/lava/v4/utils/lavaslices" + pairingtypes "github.com/lavanet/lava/v4/x/pairing/types" + "github.com/gorilla/websocket" "github.com/lavanet/lava/v4/ecosystem/cache" "github.com/lavanet/lava/v4/protocol/chainlib" "github.com/lavanet/lava/v4/protocol/chainlib/chainproxy/rpcInterfaceMessages" + "github.com/lavanet/lava/v4/protocol/chainlib/chainproxy/rpcclient" "github.com/lavanet/lava/v4/protocol/chaintracker" "github.com/lavanet/lava/v4/protocol/common" "github.com/lavanet/lava/v4/protocol/lavaprotocol/finalizationconsensus" @@ -33,6 +39,8 @@ import ( "github.com/lavanet/lava/v4/utils" "github.com/lavanet/lava/v4/utils/rand" "github.com/lavanet/lava/v4/utils/sigs" + epochstoragetypes "github.com/lavanet/lava/v4/x/epochstorage/types" + "github.com/stretchr/testify/require" "google.golang.org/grpc/connectivity" @@ -41,9 +49,10 @@ import ( ) var ( - seed int64 - randomizer *sigs.ZeroReader - addressGen uniqueAddressGenerator + seed int64 + randomizer *sigs.ZeroReader + addressGen uniqueAddressGenerator + numberOfRetriesOnNodeErrorsProviderSide int = 2 ) func TestMain(m *testing.M) { @@ -165,6 +174,24 @@ func createInMemoryRewardDb(specs []string) (*rewardserver.RewardDB, error) { return rewardDB, nil } +type PolicySt struct { + addons []string + extensions []string + apiInterface string +} + +func (a PolicySt) GetSupportedAddons(string) ([]string, error) { + return a.addons, nil +} + +func (a PolicySt) GetSupportedExtensions(string) ([]epochstoragetypes.EndpointService, error) { + ret := []epochstoragetypes.EndpointService{} + for _, ext := range a.extensions { + ret = append(ret, epochstoragetypes.EndpointService{Extension: ext, ApiInterface: a.apiInterface}) + } + return ret, nil +} + type rpcConsumerOptions struct { specId string apiInterface string @@ -180,6 +207,7 @@ type rpcConsumerOptions struct { type rpcConsumerOut struct { rpcConsumerServer *rpcconsumer.RPCConsumerServer mockConsumerStateTracker *mockConsumerStateTracker + cache *performance.Cache } func createRpcConsumer(t *testing.T, ctx context.Context, rpcConsumerOptions rpcConsumerOptions) rpcConsumerOut { @@ -209,6 +237,36 @@ func createRpcConsumer(t *testing.T, ctx context.Context, rpcConsumerOptions rpc consumerSessionManager := lavasession.NewConsumerSessionManager(rpcEndpoint, optimizer, nil, nil, "test", lavasession.NewActiveSubscriptionProvidersStorage()) consumerSessionManager.UpdateAllProviders(rpcConsumerOptions.epoch, rpcConsumerOptions.pairingList) + // Just setting the providers available extensions and policy so the consumer is aware of them + addons := []string{} + extensions := []string{} + for _, provider := range rpcConsumerOptions.pairingList { + for _, endpoint := range provider.Endpoints { + for addon := range endpoint.Addons { + if !slices.Contains(addons, addon) { + addons = append(addons, addon) + } + if !slices.Contains(extensions, addon) { + extensions = append(extensions, addon) + } + } + for extension := range endpoint.Extensions { + if !slices.Contains(extensions, extension) { + extensions = append(extensions, extension) + } + if !slices.Contains(addons, extension) { + addons = append(addons, extension) + } + } + } + } + policy := PolicySt{ + addons: addons, + extensions: extensions, + apiInterface: rpcConsumerOptions.apiInterface, + } + chainParser.SetPolicy(policy, rpcConsumerOptions.specId, rpcConsumerOptions.apiInterface) + var cache *performance.Cache = nil if rpcConsumerOptions.cacheListenAddress != "" { cache, err = performance.InitCache(ctx, rpcConsumerOptions.cacheListenAddress) @@ -239,7 +297,7 @@ func createRpcConsumer(t *testing.T, ctx context.Context, rpcConsumerOptions rpc require.True(t, consumerUp) } - return rpcConsumerOut{rpcConsumerServer, consumerStateTracker} + return rpcConsumerOut{rpcConsumerServer, consumerStateTracker, cache} } type rpcProviderOptions struct { @@ -344,7 +402,7 @@ func createRpcProvider(t *testing.T, ctx context.Context, rpcProviderOptions rpc chainTracker.StartAndServe(ctx) reliabilityManager := reliabilitymanager.NewReliabilityManager(chainTracker, &mockProviderStateTracker, rpcProviderOptions.account.Addr.String(), chainRouter, chainParser) mockReliabilityManager := NewMockReliabilityManager(reliabilityManager) - rpcProviderServer.ServeRPCRequests(ctx, rpcProviderEndpoint, chainParser, rws, providerSessionManager, mockReliabilityManager, rpcProviderOptions.account.SK, cache, chainRouter, &mockProviderStateTracker, rpcProviderOptions.account.Addr, rpcProviderOptions.lavaChainID, rpcprovider.DEFAULT_ALLOWED_MISSING_CU, nil, nil, nil, false, nil) + rpcProviderServer.ServeRPCRequests(ctx, rpcProviderEndpoint, chainParser, rws, providerSessionManager, mockReliabilityManager, rpcProviderOptions.account.SK, cache, chainRouter, &mockProviderStateTracker, rpcProviderOptions.account.Addr, rpcProviderOptions.lavaChainID, rpcprovider.DEFAULT_ALLOWED_MISSING_CU, nil, nil, nil, false, nil, numberOfRetriesOnNodeErrorsProviderSide) listener := rpcprovider.NewProviderListener(ctx, rpcProviderEndpoint.NetworkAddress, "/health") err = listener.RegisterReceiver(rpcProviderServer, rpcProviderEndpoint) require.NoError(t, err) @@ -1139,18 +1197,18 @@ func TestArchiveProvidersRetry(t *testing.T) { statusCode int }{ { - name: "happy flow", + name: "archive with 1 errored provider", numOfProviders: 3, archiveProviders: 3, - nodeErrorProviders: 0, + nodeErrorProviders: 1, expectedResult: `{"result": "success"}`, statusCode: 200, }, { - name: "archive with 1 errored provider", + name: "happy flow", numOfProviders: 3, archiveProviders: 3, - nodeErrorProviders: 1, + nodeErrorProviders: 0, expectedResult: `{"result": "success"}`, statusCode: 200, }, @@ -1182,7 +1240,6 @@ func TestArchiveProvidersRetry(t *testing.T) { numProviders := play.numOfProviders consumerListenAddress := addressGen.GetAddress() - pairingList := map[uint64]*lavasession.ConsumerSessionsWithProvider{} type providerData struct { account sigs.Account @@ -1228,14 +1285,21 @@ func TestArchiveProvidersRetry(t *testing.T) { } } + pairingList := map[uint64]*lavasession.ConsumerSessionsWithProvider{} for i := 0; i < numProviders; i++ { + extensions := map[string]struct{}{} + if i+1 <= play.archiveProviders { + extensions = map[string]struct{}{"archive": {}} + } pairingList[uint64(i)] = &lavasession.ConsumerSessionsWithProvider{ PublicLavaAddress: providers[i].account.Addr.String(), + Endpoints: []*lavasession.Endpoint{ { NetworkAddress: providers[i].endpoint.NetworkAddress.Address, Enabled: true, Geolocation: 1, + Extensions: extensions, }, }, Sessions: map[int64]*lavasession.SingleConsumerSession{}, @@ -1258,20 +1322,22 @@ func TestArchiveProvidersRetry(t *testing.T) { rpcConsumerOut := createRpcConsumer(t, ctx, rpcConsumerOptions) require.NotNil(t, rpcConsumerOut.rpcConsumerServer) - client := http.Client{Timeout: 1000 * time.Millisecond} - req, err := http.NewRequest(http.MethodGet, "http://"+consumerListenAddress+"/lavanet/lava/conflict/params", nil) - req.Header["lava-extension"] = []string{"archive"} - require.NoError(t, err) + for i := 0; i < 10; i++ { + client := http.Client{Timeout: 10000 * time.Millisecond} + req, err := http.NewRequest(http.MethodGet, "http://"+consumerListenAddress+"/lavanet/lava/conflict/params", nil) + req.Header["lava-extension"] = []string{"archive"} + require.NoError(t, err) - resp, err := client.Do(req) - require.NoError(t, err) - require.Equal(t, play.statusCode, resp.StatusCode) + resp, err := client.Do(req) + require.NoError(t, err) + require.Equal(t, play.statusCode, resp.StatusCode) - bodyBytes, err := io.ReadAll(resp.Body) - require.NoError(t, err) + bodyBytes, err := io.ReadAll(resp.Body) + require.NoError(t, err) - resp.Body.Close() - require.Equal(t, string(bodyBytes), play.expectedResult) + resp.Body.Close() + require.Equal(t, string(bodyBytes), play.expectedResult) + } }) } } @@ -1884,3 +1950,241 @@ func TestConsumerProviderWithProviderSideCache(t *testing.T) { // Verify that overall we have 2 cache hits require.Equal(t, 2, cacheHits) } + +func TestArchiveProvidersRetryOnParsedHash(t *testing.T) { + playbook := []struct { + name string + numOfProviders int + archiveProviders int + expectedResult string + statusCode int + }{ + { + name: "happy flow", + numOfProviders: 2, + archiveProviders: 1, + expectedResult: `{"jsonrpc":"2.0","id":1,"result":{"result":"success"}}`, + statusCode: 200, + }, + } + for _, play := range playbook { + t.Run(play.name, func(t *testing.T) { + ctx := context.Background() + // can be any spec and api interface + specId := "NEAR" + apiInterface := spectypes.APIInterfaceJsonRPC + numberOfRetriesOnNodeErrorsProviderSide = 0 + epoch := uint64(100) + lavaChainID := "lava" + numProviders := play.numOfProviders + cacheListenAddress := addressGen.GetAddress() + createCacheServer(t, ctx, cacheListenAddress) + blockHash := "5NFtBbExnjk4TFXpfXhJidcCm5KYPk7QCY51nWiwyQNU" + consumerListenAddress := addressGen.GetAddress() + + type providerData struct { + account sigs.Account + endpoint *lavasession.RPCProviderEndpoint + server *rpcprovider.RPCProviderServer + replySetter *ReplySetter + mockChainFetcher *MockChainFetcher + mockReliabilityManager *MockReliabilityManager + } + providers := []providerData{} + for i := 0; i < numProviders; i++ { + account := sigs.GenerateDeterministicFloatingKey(randomizer) + providerDataI := providerData{account: account} + providers = append(providers, providerDataI) + } + consumerAccount := sigs.GenerateDeterministicFloatingKey(randomizer) + pairingList := map[uint64]*lavasession.ConsumerSessionsWithProvider{} + + allowArchiveRet := atomic.Bool{} + allowArchiveRet.Store(false) + stageTwoCheckFirstTimeArchive := false + timesCalledProvidersOnSecondStage := 0 + + for i := 0; i < numProviders; i++ { + ctx := context.Background() + providerDataI := providers[i] + listenAddress := addressGen.GetAddress() + handler := func(req []byte, header http.Header) (data []byte, status int) { + var jsonRpcMessage rpcInterfaceMessages.JsonrpcMessage + fmt.Println("regular handler got request", string(req)) + err := json.Unmarshal(req, &jsonRpcMessage) + require.NoError(t, err) + if strings.Contains(string(req), blockHash) { + fmt.Println("hash request, allowing valid archive retry", string(req)) + allowArchiveRet.Store(true) + } + if stageTwoCheckFirstTimeArchive { + timesCalledProvidersOnSecondStage++ + } + id, _ := json.Marshal(1) + res := rpcclient.JsonrpcMessage{ + Version: "2.0", + ID: id, + Error: &rpcclient.JsonError{Code: 1, Message: "test"}, + } + resBytes, _ := json.Marshal(res) + return resBytes, 299 + } + addons := []string(nil) + extensions := map[string]struct{}{} + if i >= numProviders-play.archiveProviders { + extensions = map[string]struct{}{"archive": {}} + addons = []string{"archive", ""} + handler = func(req []byte, header http.Header) (data []byte, status int) { + fmt.Println("archive handler got request", string(req)) + var jsonRpcMessage rpcInterfaceMessages.JsonrpcMessage + err := json.Unmarshal(req, &jsonRpcMessage) + require.NoError(t, err) + if stageTwoCheckFirstTimeArchive { + timesCalledProvidersOnSecondStage++ + } + if allowArchiveRet.Load() == true { + id, _ := json.Marshal(1) + resultBody, _ := json.Marshal(map[string]string{"result": "success"}) + res := rpcclient.JsonrpcMessage{ + Version: "2.0", + ID: id, + Result: resultBody, + } + resBytes, _ := json.Marshal(res) + fmt.Println("returning success", string(resBytes)) + return resBytes, http.StatusOK + } + id, _ := json.Marshal(1) + res := rpcclient.JsonrpcMessage{ + Version: "2.0", + ID: id, + Error: &rpcclient.JsonError{Code: 1, Message: "test"}, + } + resBytes, _ := json.Marshal(res) + fmt.Println("returning 299", string(resBytes)) + if strings.Contains(string(req), blockHash) { + allowArchiveRet.Store(true) + fmt.Println(allowArchiveRet.Load(), "hash request", string(req)) + } + return resBytes, 299 + } + } + + rpcProviderOptions := rpcProviderOptions{ + consumerAddress: consumerAccount.Addr.String(), + specId: specId, + apiInterface: apiInterface, + listenAddress: listenAddress, + account: providerDataI.account, + lavaChainID: lavaChainID, + addons: addons, + providerUniqueId: fmt.Sprintf("provider%d", i), + } + providers[i].server, providers[i].endpoint, providers[i].replySetter, providers[i].mockChainFetcher, providers[i].mockReliabilityManager = createRpcProvider(t, ctx, rpcProviderOptions) + providers[i].replySetter.handler = handler + + pairingList[uint64(i)] = &lavasession.ConsumerSessionsWithProvider{ + PublicLavaAddress: providers[i].account.Addr.String(), + Endpoints: []*lavasession.Endpoint{ + { + NetworkAddress: providers[i].endpoint.NetworkAddress.Address, + Enabled: true, + Geolocation: 1, + Extensions: extensions, + }, + }, + Sessions: map[int64]*lavasession.SingleConsumerSession{}, + MaxComputeUnits: 10000, + UsedComputeUnits: 0, + PairingEpoch: epoch, + } + } + + rpcConsumerOptions := rpcConsumerOptions{ + specId: specId, + apiInterface: apiInterface, + account: consumerAccount, + consumerListenAddress: consumerListenAddress, + epoch: epoch, + pairingList: pairingList, + requiredResponses: 1, + lavaChainID: lavaChainID, + cacheListenAddress: cacheListenAddress, + } + rpcConsumerOut := createRpcConsumer(t, ctx, rpcConsumerOptions) + require.NotNil(t, rpcConsumerOut.rpcConsumerServer) + // wait for consumer to bootstrap + params, _ := json.Marshal([]string{blockHash}) + id, _ := json.Marshal(1) + reqBody := rpcclient.JsonrpcMessage{ + Version: "2.0", + Method: "block", // Query latest block + Params: params, // Use "final" to get the latest final block + ID: id, + } + + // Convert request to JSON + jsonData, err := json.Marshal(reqBody) + if err != nil { + log.Fatalf("Error marshalling request: %v", err) + } + + client := http.Client{Timeout: 100000000 * time.Millisecond} + req, err := http.NewRequest(http.MethodPost, "http://"+consumerListenAddress, bytes.NewBuffer(jsonData)) + require.NoError(t, err) + + resp, err := client.Do(req) + require.NoError(t, err) + require.Equal(t, play.statusCode, resp.StatusCode) + + bodyBytes, err := io.ReadAll(resp.Body) + require.NoError(t, err) + + resp.Body.Close() + require.Equal(t, string(bodyBytes), play.expectedResult) + fmt.Println("timesCalledProviders", timesCalledProvidersOnSecondStage) + + // Allow relay to hit cache. + for { + time.Sleep(time.Millisecond) + cacheCtx, cancel := context.WithTimeout(ctx, time.Second) + cacheReply, err := rpcConsumerOut.cache.GetEntry(cacheCtx, &pairingtypes.RelayCacheGet{ + RequestHash: []byte("test"), + RequestedBlock: 1005, + ChainId: specId, + SeenBlock: 1005, + BlocksHashesToHeights: []*pairingtypes.BlockHashToHeight{{Hash: blockHash, Height: spectypes.NOT_APPLICABLE}}, + }) // caching in the consumer doesn't care about hashes, and we don't have data on finalization yet + cancel() + if err != nil { + continue + } + if len(cacheReply.BlocksHashesToHeights) > 0 && cacheReply.BlocksHashesToHeights[0].Height != spectypes.NOT_APPLICABLE { + fmt.Println("cache has this entry", cacheReply.BlocksHashesToHeights) + break + } else { + fmt.Println("cache does not have this entry", cacheReply.BlocksHashesToHeights) + } + } + // attempt 2nd time, this time we should have only one retry + // set stageTwoCheckFirstTimeArchive to true + stageTwoCheckFirstTimeArchive = true + // create new client + client = http.Client{Timeout: 10000 * time.Millisecond} + req, err = http.NewRequest(http.MethodPost, "http://"+consumerListenAddress, bytes.NewBuffer(jsonData)) + require.NoError(t, err) + + resp, err = client.Do(req) + require.NoError(t, err) + require.Equal(t, play.statusCode, resp.StatusCode) + + bodyBytes, err = io.ReadAll(resp.Body) + require.NoError(t, err) + + resp.Body.Close() + require.Equal(t, string(bodyBytes), play.expectedResult) + require.Equal(t, 1, timesCalledProvidersOnSecondStage) // must go directly to archive as we have it in cache. + fmt.Println("timesCalledProviders", timesCalledProvidersOnSecondStage) + }) + } +} diff --git a/protocol/lavaprotocol/request_builder.go b/protocol/lavaprotocol/request_builder.go index 1d00942355..194d04aba8 100644 --- a/protocol/lavaprotocol/request_builder.go +++ b/protocol/lavaprotocol/request_builder.go @@ -151,7 +151,14 @@ func compareRelaysFindConflict(ctx context.Context, reply1 pairingtypes.RelayRep } // they have different data! report! - utils.LavaFormatWarning("Simulation: DataReliability detected mismatching results, Reporting...", nil, utils.Attribute{Key: "GUID", Value: ctx}, utils.Attribute{Key: "Data0", Value: string(reply1.Data)}, utils.Attribute{Key: "Data1", Value: reply2.Data}) + utils.LavaFormatWarning("Simulation: DataReliability detected mismatching results, Reporting...", nil, + utils.LogAttr("GUID", ctx), + utils.LogAttr("Request0", request1.RelayData), + utils.LogAttr("Data0", string(reply1.Data)), + utils.LogAttr("Request1", request2.RelayData), + utils.LogAttr("Data1", string(reply2.Data)), + ) + responseConflict = &conflicttypes.ResponseConflict{ ConflictRelayData0: conflictconstruct.ConstructConflictRelayData(&reply1, &request1), ConflictRelayData1: conflictconstruct.ConstructConflictRelayData(&reply2, &request2), diff --git a/protocol/lavasession/consumer_session_manager.go b/protocol/lavasession/consumer_session_manager.go index 2a2de957e7..7c06c50bf3 100644 --- a/protocol/lavasession/consumer_session_manager.go +++ b/protocol/lavasession/consumer_session_manager.go @@ -52,7 +52,7 @@ type ConsumerSessionManager struct { // contains a sorted list of blocked addresses, sorted by their cu used this epoch for higher chance of response currentlyBlockedProviderAddresses []string - addonAddresses map[RouterKey][]string + addonAddresses map[string][]string // key is RouterKey.String() reportedProviders *ReportedProviders // pairingPurge - contains all pairings that are unwanted this epoch, keeps them in memory in order to avoid release. // (if a consumer session still uses one of them or we want to report it.) @@ -113,7 +113,7 @@ func (csm *ConsumerSessionManager) UpdateAllProviders(epoch uint64, pairingList } csm.setValidAddressesToDefaultValue("", nil) // the starting point is that valid addresses are equal to pairing addresses. // reset session related metrics - csm.consumerMetricsManager.ResetSessionRelatedMetrics() + go csm.consumerMetricsManager.ResetSessionRelatedMetrics() go csm.providerOptimizer.UpdateWeights(CalcWeightsByStake(pairingList), epoch) utils.LavaFormatDebug("updated providers", utils.Attribute{Key: "epoch", Value: epoch}, utils.Attribute{Key: "spec", Value: csm.rpcEndpoint.Key()}) @@ -129,13 +129,13 @@ func (csm *ConsumerSessionManager) Initialized() bool { func (csm *ConsumerSessionManager) RemoveAddonAddresses(addon string, extensions []string) { if addon == "" && len(extensions) == 0 { // purge all - csm.addonAddresses = make(map[RouterKey][]string) + csm.addonAddresses = make(map[string][]string) } else { routerKey := NewRouterKey(append(extensions, addon)) if csm.addonAddresses == nil { - csm.addonAddresses = make(map[RouterKey][]string) + csm.addonAddresses = make(map[string][]string) } - csm.addonAddresses[routerKey] = []string{} + csm.addonAddresses[routerKey.String()] = []string{} } } @@ -153,10 +153,11 @@ func (csm *ConsumerSessionManager) CalculateAddonValidAddresses(addon string, ex // assuming csm is Rlocked func (csm *ConsumerSessionManager) getValidAddresses(addon string, extensions []string) (addresses []string) { routerKey := NewRouterKey(append(extensions, addon)) - if csm.addonAddresses == nil || csm.addonAddresses[routerKey] == nil { + routerKeyString := routerKey.String() + if csm.addonAddresses == nil || csm.addonAddresses[routerKeyString] == nil { return csm.CalculateAddonValidAddresses(addon, extensions) } - return csm.addonAddresses[routerKey] + return csm.addonAddresses[routerKeyString] } // After 2 epochs we need to close all open connections. @@ -332,7 +333,7 @@ func (csm *ConsumerSessionManager) setValidAddressesToDefaultValue(addon string, } } csm.RemoveAddonAddresses(addon, extensions) // refresh the list - csm.addonAddresses[NewRouterKey(append(extensions, addon))] = csm.CalculateAddonValidAddresses(addon, extensions) + csm.addonAddresses[NewRouterKey(append(extensions, addon)).String()] = csm.CalculateAddonValidAddresses(addon, extensions) } } @@ -375,11 +376,12 @@ func (csm *ConsumerSessionManager) cacheAddonAddresses(addon string, extensions csm.lock.Lock() // lock to set validAddresses[addon] if it's not cached defer csm.lock.Unlock() routerKey := NewRouterKey(append(extensions, addon)) - if csm.addonAddresses == nil || csm.addonAddresses[routerKey] == nil { + routerKeyString := routerKey.String() + if csm.addonAddresses == nil || csm.addonAddresses[routerKeyString] == nil { csm.RemoveAddonAddresses(addon, extensions) - csm.addonAddresses[routerKey] = csm.CalculateAddonValidAddresses(addon, extensions) + csm.addonAddresses[routerKeyString] = csm.CalculateAddonValidAddresses(addon, extensions) } - return csm.addonAddresses[routerKey] + return csm.addonAddresses[routerKeyString] } // validating we still have providers, otherwise reset valid addresses list diff --git a/protocol/lavasession/provider_types.go b/protocol/lavasession/provider_types.go index 955abe4b3d..7cc985e1da 100644 --- a/protocol/lavasession/provider_types.go +++ b/protocol/lavasession/provider_types.go @@ -50,7 +50,7 @@ func (endpoint *RPCProviderEndpoint) AddonsString() string { } func (endpoint *RPCProviderEndpoint) String() string { - return endpoint.ChainID + ":" + endpoint.ApiInterface + " Network Address:" + endpoint.NetworkAddress.Address + " Node: " + endpoint.UrlsString() + " Geolocation:" + strconv.FormatUint(endpoint.Geolocation, 10) + " Addons:" + endpoint.AddonsString() + return endpoint.ChainID + ":" + endpoint.ApiInterface + " Network Address:" + endpoint.NetworkAddress.Address + " Node:" + endpoint.UrlsString() + " Geolocation:" + strconv.FormatUint(endpoint.Geolocation, 10) + " Addons:" + endpoint.AddonsString() } func (endpoint *RPCProviderEndpoint) Validate() error { diff --git a/protocol/lavasession/router_key.go b/protocol/lavasession/router_key.go index 671f3e780d..175d60b921 100644 --- a/protocol/lavasession/router_key.go +++ b/protocol/lavasession/router_key.go @@ -5,48 +5,89 @@ import ( "strconv" "strings" + "github.com/lavanet/lava/v4/utils/lavaslices" spectypes "github.com/lavanet/lava/v4/x/spec/types" ) const ( - sep = "|" - methodRouteSep = "method-route:" + RouterKeySeparator = "|" + methodRouteSep = "method-route:" + internalPathSep = "internal-path:" ) -type RouterKey string +type RouterKey struct { + methodsRouteUniqueKey int + uniqueExtensions []string + internalPath string +} + +func NewRouterKey(extensions []string) RouterKey { + routerKey := RouterKey{} + routerKey.SetExtensions(extensions) + return routerKey +} + +func NewRouterKeyFromExtensions(extensions []*spectypes.Extension) RouterKey { + extensionsStr := lavaslices.Map(extensions, func(extension *spectypes.Extension) string { + return extension.Name + }) + + return NewRouterKey(extensionsStr) +} -func (rk *RouterKey) ApplyMethodsRoute(routeNum int) RouterKey { - additionalPath := strconv.FormatInt(int64(routeNum), 10) - return RouterKey(string(*rk) + methodRouteSep + additionalPath) +func GetEmptyRouterKey() RouterKey { + return NewRouterKey([]string{}) } -func newRouterKeyInner(uniqueExtensions map[string]struct{}) RouterKey { +func (rk *RouterKey) SetExtensions(extensions []string) { + // make sure addons have no repetitions + uniqueExtensions := map[string]struct{}{} // init with the empty extension + if len(extensions) == 0 { + uniqueExtensions[""] = struct{}{} + } else { + for _, extension := range extensions { + uniqueExtensions[extension] = struct{}{} + } + } + uniqueExtensionsSlice := []string{} for addon := range uniqueExtensions { // we are sorting this anyway so we don't have to keep order uniqueExtensionsSlice = append(uniqueExtensionsSlice, addon) } + sort.Strings(uniqueExtensionsSlice) - return RouterKey(sep + strings.Join(uniqueExtensionsSlice, sep) + sep) + + rk.uniqueExtensions = uniqueExtensionsSlice } -func NewRouterKey(extensions []string) RouterKey { - // make sure addons have no repetitions - uniqueExtensions := map[string]struct{}{} - for _, extension := range extensions { - uniqueExtensions[extension] = struct{}{} - } - return newRouterKeyInner(uniqueExtensions) +func (rk *RouterKey) ApplyMethodsRoute(routeNum int) { + rk.methodsRouteUniqueKey = routeNum } -func NewRouterKeyFromExtensions(extensions []*spectypes.Extension) RouterKey { - // make sure addons have no repetitions - uniqueExtensions := map[string]struct{}{} - for _, extension := range extensions { - uniqueExtensions[extension.Name] = struct{}{} - } - return newRouterKeyInner(uniqueExtensions) +func (rk *RouterKey) ApplyInternalPath(internalPath string) { + rk.internalPath = internalPath } -func GetEmptyRouterKey() RouterKey { - return NewRouterKey([]string{}) +func (rk RouterKey) HasExtension(extension string) bool { + return lavaslices.Contains(rk.uniqueExtensions, extension) +} + +func (rk RouterKey) String() string { + // uniqueExtensions are sorted on init + retStr := rk.uniqueExtensions + if len(retStr) == 0 { + retStr = append(retStr, "") + } + + // if we have a route number, we add it to the key + if rk.methodsRouteUniqueKey != 0 { + retStr = append(retStr, methodRouteSep+strconv.FormatInt(int64(rk.methodsRouteUniqueKey), 10)) + } + + // if we have an internal path, we add it to the key + if rk.internalPath != "" { + retStr = append(retStr, internalPathSep+rk.internalPath) + } + + return RouterKeySeparator + strings.Join(retStr, RouterKeySeparator) + RouterKeySeparator } diff --git a/protocol/lavasession/router_key_test.go b/protocol/lavasession/router_key_test.go new file mode 100644 index 0000000000..cbd7983ebb --- /dev/null +++ b/protocol/lavasession/router_key_test.go @@ -0,0 +1,90 @@ +package lavasession + +import ( + "testing" + + spectypes "github.com/lavanet/lava/v4/x/spec/types" + "github.com/stretchr/testify/require" +) + +func TestRouterKey_SetExtensions(t *testing.T) { + rk := NewRouterKey([]string{"ext1", "ext2", "ext1"}) + require.Equal(t, "|ext1|ext2|", rk.String()) + + rk.SetExtensions([]string{"ext3", "ext2"}) + require.Equal(t, "|ext2|ext3|", rk.String()) +} + +func TestRouterKey_NewRouterKeyFromExtensions(t *testing.T) { + rk := NewRouterKeyFromExtensions([]*spectypes.Extension{ + {Name: "ext1"}, + {Name: "ext2"}, + {Name: "ext3"}, + }) + require.Equal(t, "|ext1|ext2|ext3|", rk.String()) +} + +func TestRouterKey_HasExtension(t *testing.T) { + rk := NewRouterKey([]string{"ext1", "ext2"}) + require.True(t, rk.HasExtension("ext1")) + require.False(t, rk.HasExtension("ext3")) +} + +func TestRouterKey_ApplyMethodsRoute(t *testing.T) { + rk := NewRouterKey([]string{}) + rk.ApplyMethodsRoute(42) + require.Equal(t, "||method-route:42|", rk.String()) +} + +func TestRouterKey_ApplyInternalPath(t *testing.T) { + rk := NewRouterKey([]string{}) + rk.ApplyInternalPath("/x") + require.Equal(t, "||internal-path:/x|", rk.String()) +} + +func TestRouterKey_String_NoExtensionsNoRouteNoPath(t *testing.T) { + rk := NewRouterKey([]string{}) + require.Equal(t, "||", rk.String()) +} + +func TestRouterKey_String_WithExtensionsNoRouteNoPath(t *testing.T) { + rk := NewRouterKey([]string{"ext2", "ext1"}) + require.Equal(t, "|ext1|ext2|", rk.String()) +} + +func TestRouterKey_String_WithExtensionsAndRouteNoPath(t *testing.T) { + rk := NewRouterKey([]string{"ext1", "ext2"}) + rk.ApplyMethodsRoute(42) + require.Equal(t, "|ext1|ext2|method-route:42|", rk.String()) +} + +func TestRouterKey_String_WithExtensionsRouteAndPath(t *testing.T) { + rk := NewRouterKey([]string{"ext1", "ext2"}) + rk.ApplyMethodsRoute(42) + rk.ApplyInternalPath("/x") + require.Equal(t, "|ext1|ext2|method-route:42|internal-path:/x|", rk.String()) +} + +func TestRouterKey_String_NoExtensionsWithRouteAndPath(t *testing.T) { + rk := NewRouterKey([]string{}) + rk.ApplyMethodsRoute(42) + rk.ApplyInternalPath("/x") + require.Equal(t, "||method-route:42|internal-path:/x|", rk.String()) +} + +func TestRouterKey_String_WithPathNoRouteNoExtensions(t *testing.T) { + rk := NewRouterKey([]string{}) + rk.ApplyInternalPath("/another/path") + require.Equal(t, "||internal-path:/another/path|", rk.String()) +} + +func TestGetEmptyRouterKey(t *testing.T) { + rk := GetEmptyRouterKey() + require.Equal(t, "||", rk.String()) +} + +func TestRouterKey_SetExtensions_EmptySlice(t *testing.T) { + rk := NewRouterKey([]string{}) + rk.SetExtensions([]string{}) + require.Equal(t, "||", rk.String()) +} diff --git a/protocol/lavasession/used_providers.go b/protocol/lavasession/used_providers.go index ec5820f9a3..e83d619cd0 100644 --- a/protocol/lavasession/used_providers.go +++ b/protocol/lavasession/used_providers.go @@ -16,16 +16,19 @@ type BlockedProvidersInf interface { func NewUsedProviders(blockedProviders BlockedProvidersInf) *UsedProviders { unwantedProviders := map[string]struct{}{} + originalUnwantedProviders := map[string]struct{}{} // we need a new map as map changes are changed by pointer if blockedProviders != nil { providerAddressesToBlock := blockedProviders.GetBlockedProviders() if len(providerAddressesToBlock) > 0 { for _, providerAddress := range providerAddressesToBlock { unwantedProviders[providerAddress] = struct{}{} + originalUnwantedProviders[providerAddress] = struct{}{} } } } + return &UsedProviders{ - uniqueUsedProviders: map[RouterKey]*UniqueUsedProviders{NewRouterKey([]string{}): { + uniqueUsedProviders: map[string]*UniqueUsedProviders{GetEmptyRouterKey().String(): { providers: map[string]struct{}{}, unwantedProviders: unwantedProviders, blockOnSyncLoss: map[string]struct{}{}, @@ -33,7 +36,7 @@ func NewUsedProviders(blockedProviders BlockedProvidersInf) *UsedProviders { }}, // we keep the original unwanted providers so when we create more unique used providers // we can reuse it as its the user's instructions. - originalUnwantedProviders: unwantedProviders, + originalUnwantedProviders: originalUnwantedProviders, } } @@ -48,7 +51,7 @@ type UniqueUsedProviders struct { type UsedProviders struct { lock sync.RWMutex - uniqueUsedProviders map[RouterKey]*UniqueUsedProviders + uniqueUsedProviders map[string]*UniqueUsedProviders originalUnwantedProviders map[string]struct{} selecting bool sessionsLatestBatch int @@ -125,7 +128,8 @@ func (up *UsedProviders) AllUnwantedAddresses() []string { // if it does, return it. If it doesn't // creating a new instance and returning it. func (up *UsedProviders) createOrUseUniqueUsedProvidersForKey(key RouterKey) *UniqueUsedProviders { - uniqueUsedProviders, ok := up.uniqueUsedProviders[key] + keyString := key.String() + uniqueUsedProviders, ok := up.uniqueUsedProviders[keyString] if !ok { uniqueUsedProviders = &UniqueUsedProviders{ providers: map[string]struct{}{}, @@ -133,7 +137,7 @@ func (up *UsedProviders) createOrUseUniqueUsedProvidersForKey(key RouterKey) *Un blockOnSyncLoss: map[string]struct{}{}, erroredProviders: map[string]struct{}{}, } - up.uniqueUsedProviders[key] = uniqueUsedProviders + up.uniqueUsedProviders[keyString] = uniqueUsedProviders } return uniqueUsedProviders } diff --git a/protocol/metrics/consumer_metrics_manager.go b/protocol/metrics/consumer_metrics_manager.go index b3ac3e910e..ae4ee74319 100644 --- a/protocol/metrics/consumer_metrics_manager.go +++ b/protocol/metrics/consumer_metrics_manager.go @@ -1,6 +1,7 @@ package metrics import ( + "encoding/json" "fmt" "net/http" "sync" @@ -44,6 +45,8 @@ type ConsumerMetricsManager struct { totalFailedWsSubscriptionRequestsMetric *prometheus.CounterVec totalWsSubscriptionDissconnectMetric *prometheus.CounterVec totalDuplicatedWsSubscriptionRequestsMetric *prometheus.CounterVec + totalLoLSuccessMetric prometheus.Counter + totalLoLErrorsMetric prometheus.Counter totalWebSocketConnectionsActive *prometheus.GaugeVec blockMetric *prometheus.GaugeVec latencyMetric *prometheus.GaugeVec @@ -64,11 +67,14 @@ type ConsumerMetricsManager struct { relayProcessingLatencyBeforeProvider *prometheus.GaugeVec relayProcessingLatencyAfterProvider *prometheus.GaugeVec averageProcessingLatency map[string]*LatencyTracker + consumerOptimizerQoSClient *ConsumerOptimizerQoSClient } type ConsumerMetricsManagerOptions struct { - NetworkAddress string - AddMethodsApiGauge bool + NetworkAddress string + AddMethodsApiGauge bool + EnableQoSListener bool + ConsumerOptimizerQoSClient *ConsumerOptimizerQoSClient } func NewConsumerMetricsManager(options ConsumerMetricsManagerOptions) *ConsumerMetricsManager { @@ -114,6 +120,16 @@ func NewConsumerMetricsManager(options ConsumerMetricsManagerOptions) *ConsumerM Help: "The total number of duplicated webscket subscription requests over time per chain id per api interface.", }, []string{"spec", "apiInterface"}) + totalLoLSuccessMetric := prometheus.NewCounter(prometheus.CounterOpts{ + Name: "lava_consumer_total_lol_successes", + Help: "The total number of requests sent to lava over lava successfully", + }) + + totalLoLErrorsMetric := prometheus.NewCounter(prometheus.CounterOpts{ + Name: "lava_consumer_total_lol_errors", + Help: "The total number of requests sent to lava over lava and failed", + }) + totalWebSocketConnectionsActive := prometheus.NewGaugeVec(prometheus.GaugeOpts{ Name: "lava_consumer_total_websocket_connections_active", Help: "The total number of currently active websocket connections with users", @@ -237,6 +253,8 @@ func NewConsumerMetricsManager(options ConsumerMetricsManagerOptions) *ConsumerM prometheus.MustRegister(totalFailedWsSubscriptionRequestsMetric) prometheus.MustRegister(totalDuplicatedWsSubscriptionRequestsMetric) prometheus.MustRegister(totalWsSubscriptionDissconnectMetric) + prometheus.MustRegister(totalLoLSuccessMetric) + prometheus.MustRegister(totalLoLErrorsMetric) consumerMetricsManager := &ConsumerMetricsManager{ totalCURequestedMetric: totalCURequestedMetric, @@ -270,9 +288,26 @@ func NewConsumerMetricsManager(options ConsumerMetricsManagerOptions) *ConsumerM relayProcessingLatencyBeforeProvider: relayProcessingLatencyBeforeProvider, relayProcessingLatencyAfterProvider: relayProcessingLatencyAfterProvider, averageProcessingLatency: map[string]*LatencyTracker{}, + totalLoLSuccessMetric: totalLoLSuccessMetric, + totalLoLErrorsMetric: totalLoLErrorsMetric, + consumerOptimizerQoSClient: options.ConsumerOptimizerQoSClient, } http.Handle("/metrics", promhttp.Handler()) + http.HandleFunc("/provider_optimizer_metrics", func(w http.ResponseWriter, r *http.Request) { + if r.Method != http.MethodGet { + http.Error(w, "Method not allowed", http.StatusMethodNotAllowed) + return + } + reports := consumerMetricsManager.consumerOptimizerQoSClient.GetReportsToSend() + jsonData, err := json.Marshal(reports) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + w.Header().Set("Content-Type", "application/json") + w.Write(jsonData) + }) overallHealthHandler := func(w http.ResponseWriter, r *http.Request) { statusCode := http.StatusOK @@ -545,3 +580,38 @@ func (pme *ConsumerMetricsManager) SetWsSubscriptioDisconnectRequestMetric(chain } pme.totalWsSubscriptionDissconnectMetric.WithLabelValues(chainId, apiInterface, disconnectReason).Inc() } + +func (pme *ConsumerMetricsManager) SetLoLResponse(success bool) { + if pme == nil { + return + } + if success { + pme.totalLoLSuccessMetric.Inc() + } else { + pme.totalLoLErrorsMetric.Inc() + } +} + +func (pme *ConsumerMetricsManager) handleOptimizerQoS(w http.ResponseWriter, r *http.Request) { + if r.Method != http.MethodPost { + http.Error(w, "Method not allowed", http.StatusMethodNotAllowed) + return + } + + var report OptimizerQoSReportToSend + if err := json.NewDecoder(r.Body).Decode(&report); err != nil { + http.Error(w, "Invalid request body", http.StatusBadRequest) + return + } + + // Process the received QoS report here + utils.LavaFormatDebug("Received QoS report", + utils.LogAttr("provider", report.ProviderAddress), + utils.LogAttr("chain_id", report.ChainId), + utils.LogAttr("sync_score", report.SyncScore), + utils.LogAttr("availability_score", report.AvailabilityScore), + utils.LogAttr("latency_score", report.LatencyScore), + ) + + w.WriteHeader(http.StatusOK) +} diff --git a/protocol/metrics/consumer_optimizer_qos_client.go b/protocol/metrics/consumer_optimizer_qos_client.go index f204107e17..625a67433a 100644 --- a/protocol/metrics/consumer_optimizer_qos_client.go +++ b/protocol/metrics/consumer_optimizer_qos_client.go @@ -21,15 +21,18 @@ var ( ) type ConsumerOptimizerQoSClient struct { - consumerOrigin string - queueSender *QueueSender - optimizers map[string]OptimizerInf // keys are chain ids + consumerHostname string + consumerAddress string + queueSender *QueueSender + optimizers map[string]OptimizerInf // keys are chain ids // keys are chain ids, values are maps with provider addresses as keys chainIdToProviderToRelaysCount map[string]map[string]uint64 chainIdToProviderToNodeErrorsCount map[string]map[string]uint64 chainIdToProviderToEpochToStake map[string]map[string]map[uint64]int64 // third key is epoch currentEpoch atomic.Uint64 lock sync.RWMutex + reportsToSend []OptimizerQoSReportToSend + geoLocation uint64 } type OptimizerQoSReport struct { @@ -41,22 +44,24 @@ type OptimizerQoSReport struct { EntryIndex int } -type optimizerQoSReportToSend struct { +type OptimizerQoSReportToSend struct { Timestamp time.Time `json:"timestamp"` SyncScore float64 `json:"sync_score"` AvailabilityScore float64 `json:"availability_score"` LatencyScore float64 `json:"latency_score"` GenericScore float64 `json:"generic_score"` ProviderAddress string `json:"provider"` - ConsumerOrigin string `json:"consumer"` + ConsumerHostname string `json:"consumer_hostname"` + ConsumerAddress string `json:"consumer_pub_address"` ChainId string `json:"chain_id"` NodeErrorRate float64 `json:"node_error_rate"` Epoch uint64 `json:"epoch"` ProviderStake int64 `json:"provider_stake"` EntryIndex int `json:"entry_index"` + GeoLocation uint64 `json:"geo_location"` } -func (oqosr optimizerQoSReportToSend) String() string { +func (oqosr OptimizerQoSReportToSend) String() string { bytes, err := json.Marshal(oqosr) if err != nil { return "" @@ -68,20 +73,21 @@ type OptimizerInf interface { CalculateQoSScoresForMetrics(allAddresses []string, ignoredProviders map[string]struct{}, cu uint64, requestedBlock int64) []*OptimizerQoSReport } -func NewConsumerOptimizerQoSClient(endpointAddress string, interval ...time.Duration) *ConsumerOptimizerQoSClient { +func NewConsumerOptimizerQoSClient(consumerAddress, endpointAddress string, geoLocation uint64, interval ...time.Duration) *ConsumerOptimizerQoSClient { hostname, err := os.Hostname() if err != nil { utils.LavaFormatWarning("Error while getting hostname for ConsumerOptimizerQoSClient", err) hostname = "unknown" + strconv.FormatUint(rand.Uint64(), 10) // random seed for different unknowns } - return &ConsumerOptimizerQoSClient{ - consumerOrigin: hostname, + consumerHostname: hostname, + consumerAddress: consumerAddress, queueSender: NewQueueSender(endpointAddress, "ConsumerOptimizerQoS", nil, interval...), optimizers: map[string]OptimizerInf{}, chainIdToProviderToRelaysCount: map[string]map[string]uint64{}, chainIdToProviderToNodeErrorsCount: map[string]map[string]uint64{}, chainIdToProviderToEpochToStake: map[string]map[string]map[uint64]int64{}, + geoLocation: geoLocation, } } @@ -126,12 +132,12 @@ func (coqc *ConsumerOptimizerQoSClient) calculateNodeErrorRate(chainId, provider return 0 } -func (coqc *ConsumerOptimizerQoSClient) appendOptimizerQoSReport(report *OptimizerQoSReport, chainId string, epoch uint64) { +func (coqc *ConsumerOptimizerQoSClient) appendOptimizerQoSReport(report *OptimizerQoSReport, chainId string, epoch uint64) OptimizerQoSReportToSend { // must be called under read lock - - optimizerQoSReportToSend := optimizerQoSReportToSend{ + optimizerQoSReportToSend := OptimizerQoSReportToSend{ Timestamp: time.Now(), - ConsumerOrigin: coqc.consumerOrigin, + ConsumerHostname: coqc.consumerHostname, + ConsumerAddress: coqc.consumerAddress, SyncScore: report.SyncScore, AvailabilityScore: report.AvailabilityScore, LatencyScore: report.LatencyScore, @@ -142,12 +148,14 @@ func (coqc *ConsumerOptimizerQoSClient) appendOptimizerQoSReport(report *Optimiz Epoch: epoch, NodeErrorRate: coqc.calculateNodeErrorRate(chainId, report.ProviderAddress), ProviderStake: coqc.getProviderChainStake(chainId, report.ProviderAddress, epoch), + GeoLocation: coqc.geoLocation, } coqc.queueSender.appendQueue(optimizerQoSReportToSend) + return optimizerQoSReportToSend } -func (coqc *ConsumerOptimizerQoSClient) getReportsFromOptimizers() { +func (coqc *ConsumerOptimizerQoSClient) getReportsFromOptimizers() []OptimizerQoSReportToSend { coqc.lock.RLock() // we only read from the maps here defer coqc.lock.RUnlock() @@ -156,7 +164,7 @@ func (coqc *ConsumerOptimizerQoSClient) getReportsFromOptimizers() { requestedBlock := spectypes.LATEST_BLOCK currentEpoch := coqc.currentEpoch.Load() - + reportsToSend := []OptimizerQoSReportToSend{} for chainId, optimizer := range coqc.optimizers { providersMap, ok := coqc.chainIdToProviderToEpochToStake[chainId] if !ok { @@ -165,9 +173,22 @@ func (coqc *ConsumerOptimizerQoSClient) getReportsFromOptimizers() { reports := optimizer.CalculateQoSScoresForMetrics(maps.Keys(providersMap), ignoredProviders, cu, requestedBlock) for _, report := range reports { - coqc.appendOptimizerQoSReport(report, chainId, currentEpoch) + reportsToSend = append(reportsToSend, coqc.appendOptimizerQoSReport(report, chainId, currentEpoch)) } } + return reportsToSend +} + +func (coqc *ConsumerOptimizerQoSClient) SetReportsToSend(reports []OptimizerQoSReportToSend) { + coqc.lock.Lock() + defer coqc.lock.Unlock() + coqc.reportsToSend = reports +} + +func (coqc *ConsumerOptimizerQoSClient) GetReportsToSend() []OptimizerQoSReportToSend { + coqc.lock.RLock() + defer coqc.lock.RUnlock() + return coqc.reportsToSend } func (coqc *ConsumerOptimizerQoSClient) StartOptimizersQoSReportsCollecting(ctx context.Context, samplingInterval time.Duration) { @@ -183,7 +204,7 @@ func (coqc *ConsumerOptimizerQoSClient) StartOptimizersQoSReportsCollecting(ctx utils.LavaFormatTrace("ConsumerOptimizerQoSClient context done") return case <-time.After(samplingInterval): - coqc.getReportsFromOptimizers() + coqc.SetReportsToSend(coqc.getReportsFromOptimizers()) } } }() diff --git a/protocol/metrics/rpcconsumer_logs.go b/protocol/metrics/rpcconsumer_logs.go index d09f988716..0bc0359384 100644 --- a/protocol/metrics/rpcconsumer_logs.go +++ b/protocol/metrics/rpcconsumer_logs.go @@ -92,6 +92,13 @@ func NewRPCConsumerLogs(consumerMetricsManager *ConsumerMetricsManager, consumer return rpcConsumerLogs, err } +func (rpccl *RPCConsumerLogs) SetLoLResponse(success bool) { + if rpccl == nil { + return + } + rpccl.consumerMetricsManager.SetLoLResponse(success) +} + func (rpccl *RPCConsumerLogs) SetWebSocketConnectionActive(chainId string, apiInterface string, add bool) { rpccl.consumerMetricsManager.SetWebSocketConnectionActive(chainId, apiInterface, add) } diff --git a/protocol/parser/parser.go b/protocol/parser/parser.go index 8d5622e6a6..884bccaadb 100644 --- a/protocol/parser/parser.go +++ b/protocol/parser/parser.go @@ -11,7 +11,9 @@ import ( "github.com/itchyny/gojq" sdkerrors "cosmossdk.io/errors" + "github.com/lavanet/lava/v4/protocol/chainlib/chainproxy/rpcclient" "github.com/lavanet/lava/v4/utils" + "github.com/lavanet/lava/v4/utils/lavaslices" pairingtypes "github.com/lavanet/lava/v4/x/pairing/types" spectypes "github.com/lavanet/lava/v4/x/spec/types" ) @@ -28,6 +30,7 @@ var ValueNotSetError = sdkerrors.New("Value Not Set ", 6662, "when trying to par type RPCInput interface { GetParams() interface{} GetResult() json.RawMessage + GetError() *rpcclient.JsonError ParseBlock(block string) (int64, error) GetHeaders() []pairingtypes.Metadata GetMethod() string @@ -69,59 +72,24 @@ func ParseDefaultBlockParameter(block string) (int64, error) { return blockNum, nil } -func getParserTypeMap(parserType int) map[spectypes.PARSER_TYPE]struct{} { - switch parserType { - case PARSE_PARAMS: - return map[spectypes.PARSER_TYPE]struct{}{ - spectypes.PARSER_TYPE_BLOCK_LATEST: {}, - spectypes.PARSER_TYPE_DEFAULT_VALUE: {}, - spectypes.PARSER_TYPE_BLOCK_HASH: {}, - } - case PARSE_RESULT: - return map[spectypes.PARSER_TYPE]struct{}{ - spectypes.PARSER_TYPE_RESULT: {}, - } - default: - utils.LavaFormatError("missing parserType", nil, utils.LogAttr("parserType", parserType)) - return map[spectypes.PARSER_TYPE]struct{}{} - } -} - -func filterGenericParsersByType(genericParsers []spectypes.GenericParser, filterMap map[spectypes.PARSER_TYPE]struct{}) []spectypes.GenericParser { - retGenericParsers := []spectypes.GenericParser{} - for _, parser := range genericParsers { - if _, ok := filterMap[parser.ParseType]; ok { - retGenericParsers = append(retGenericParsers, parser) - } - } - return retGenericParsers -} - func parseInputWithGenericParsers(rpcInput RPCInput, genericParsers []spectypes.GenericParser) (*ParsedInput, bool) { - managedToParseRawBlock := false + managedToParse := false if len(genericParsers) == 0 { - return nil, managedToParseRawBlock + return nil, managedToParse } - genericParserResult, genericParserErr := ParseWithGenericParsers(rpcInput, filterGenericParsersByType(genericParsers, getParserTypeMap(PARSE_PARAMS))) + genericParserResult, genericParserErr := ParseWithGenericParsers(rpcInput, genericParsers) if genericParserErr != nil { - return nil, managedToParseRawBlock - } - - parsed := NewParsedInput() - rawParsedData := genericParserResult.GetRawParsedData() - if rawParsedData != "" { - managedToParseRawBlock = true - parsed.parsedDataRaw = rawParsedData + return nil, managedToParse } - parsedBlockHashes, err := genericParserResult.GetBlockHashes() - if err == nil { - managedToParseRawBlock = true - parsed.parsedHashes = parsedBlockHashes + _, err := genericParserResult.GetBlockHashes() + if genericParserResult.GetParserError() == "" && (err == nil || genericParserResult.GetRawParsedData() != "") { + // if we got here, there is no parser error and we successfully parsed either the block hashes or the raw parsed data + managedToParse = true } - return parsed, managedToParseRawBlock + return genericParserResult, managedToParse } // ParseRawBlock attempts to parse a block from rpcInput and store it in parsedInput. @@ -136,18 +104,21 @@ func ParseRawBlock(rpcInput RPCInput, parsedInput *ParsedInput, defaultValue str } if rawBlock == "" || err != nil { if defaultValue != "" { - utils.LavaFormatDebug("Failed parsing block from string, assuming default value", - utils.LogAttr("params", rpcInput.GetParams()), - utils.LogAttr("failed_parsed_value", rawBlock), - utils.LogAttr("default_value", defaultValue), - ) parsedBlock, err = rpcInput.ParseBlock(defaultValue) if err != nil { utils.LavaFormatError("Failed parsing default value, setting to NOT_APPLICABLE", err, utils.LogAttr("default_value", defaultValue), ) parsedBlock = spectypes.NOT_APPLICABLE + } else { + parsedInput.UsedDefaultValue = true } + utils.LavaFormatDebug("Failed parsing block from string, assuming default value", + utils.LogAttr("params", rpcInput.GetParams()), + utils.LogAttr("failed_parsed_value", rawBlock), + utils.LogAttr("default_value", defaultValue), + utils.LogAttr("parsedBlock", parsedBlock), + ) } else { parsedBlock = spectypes.NOT_APPLICABLE } @@ -155,23 +126,22 @@ func ParseRawBlock(rpcInput RPCInput, parsedInput *ParsedInput, defaultValue str parsedInput.SetBlock(parsedBlock) } -func parseInputWithLegacyBlockParser(rpcInput RPCInput, blockParser spectypes.BlockParser, source int) (string, error) { - result, err := legacyParse(rpcInput, blockParser, source) +func parseInputWithLegacyBlockParser(rpcInput RPCInput, blockParser spectypes.BlockParser, dataSource int) (string, bool, error) { + if rpcInput.GetError() != nil { + return "", false, utils.LavaFormatError("blockParsing - rpcInput is error", nil, utils.LogAttr("rpcInput.GetError()", rpcInput.GetError())) + } + + result, usedDefaultValue, err := legacyParse(rpcInput, blockParser, dataSource) if err != nil || result == nil { - return "", utils.LavaFormatDebug("blockParsing - parse failed", - utils.LogAttr("error", err), - utils.LogAttr("result", result), - utils.LogAttr("blockParser", blockParser), - utils.LogAttr("rpcInput", rpcInput), - ) + return "", usedDefaultValue, fmt.Errorf("blockParsing - parse failed. result=%v error=%w", result, err) } resString, ok := result[spectypes.DEFAULT_PARSED_RESULT_INDEX].(string) if !ok { - return "", utils.LavaFormatDebug("blockParsing - result[0].(string) - type assertion failed", utils.LogAttr("result[0]", result[0])) + return "", usedDefaultValue, utils.LavaFormatDebug("blockParsing - result[0].(string) - type assertion failed", utils.LogAttr("result[0]", result[0])) } - return resString, nil + return resString, usedDefaultValue, nil } // parseBlock processes the given RPC input using either generic parsers or a legacy block parser. @@ -183,11 +153,11 @@ func parseInputWithLegacyBlockParser(rpcInput RPCInput, blockParser spectypes.Bl // - rpcInput: The input data to be parsed. // - blockParser: The legacy block parser to use if generic parsing fails. // - genericParsers: A slice of generic parsers to attempt first. -// - source: An integer representing the source of the input: either PARSE_PARAMS or PARSE_RESULT. +// - dataSource: An integer representing the source of the input: either PARSE_PARAMS or PARSE_RESULT. // // Returns: // - A pointer to a ParsedInput struct containing the parsed data. -func parseBlock(rpcInput RPCInput, blockParser spectypes.BlockParser, genericParsers []spectypes.GenericParser, source int) *ParsedInput { +func parseBlock(rpcInput RPCInput, blockParser spectypes.BlockParser, genericParsers []spectypes.GenericParser, dataSource int) *ParsedInput { parsedBlockInfo, _ := parseInputWithGenericParsers(rpcInput, genericParsers) if parsedBlockInfo == nil { parsedBlockInfo = NewParsedInput() @@ -199,7 +169,10 @@ func parseBlock(rpcInput RPCInput, blockParser spectypes.BlockParser, genericPar } } - parsedRawBlock, _ := parseInputWithLegacyBlockParser(rpcInput, blockParser, source) + parsedRawBlock, usedDefaultValue, err := parseInputWithLegacyBlockParser(rpcInput, blockParser, dataSource) + if err == nil { + parsedBlockInfo.UsedDefaultValue = usedDefaultValue + } parsedBlockInfo.parsedDataRaw = unquoteString(parsedRawBlock) return parsedBlockInfo } @@ -207,12 +180,14 @@ func parseBlock(rpcInput RPCInput, blockParser spectypes.BlockParser, genericPar func ParseBlockFromParams(rpcInput RPCInput, blockParser spectypes.BlockParser, genericParsers []spectypes.GenericParser) *ParsedInput { parsedInput := parseBlock(rpcInput, blockParser, genericParsers, PARSE_PARAMS) ParseRawBlock(rpcInput, parsedInput, blockParser.DefaultValue) + utils.LavaFormatTrace("ParseBlockFromParams result", utils.LogAttr("parsedInput", parsedInput)) return parsedInput } func ParseBlockFromReply(rpcInput RPCInput, blockParser spectypes.BlockParser, genericParsers []spectypes.GenericParser) *ParsedInput { parsedInput := parseBlock(rpcInput, blockParser, genericParsers, PARSE_RESULT) ParseRawBlock(rpcInput, parsedInput, blockParser.DefaultValue) + utils.LavaFormatTrace("ParseBlockFromReply result", utils.LogAttr("parsedInput", parsedInput)) return parsedInput } @@ -230,11 +205,23 @@ func unquoteString(str string) string { // This returns the parsed response after decoding func ParseBlockHashFromReplyAndDecode(rpcInput RPCInput, resultParser spectypes.BlockParser, genericParsers []spectypes.GenericParser) (string, error) { - parsedInput, _ := parseInputWithGenericParsers(rpcInput, genericParsers) + parsedInput, parsedSuccessfully := parseInputWithGenericParsers(rpcInput, genericParsers) if parsedInput == nil { - parsedBlockHashFromBlockParser, err := parseInputWithLegacyBlockParser(rpcInput, resultParser, PARSE_RESULT) + parsedBlockHashFromBlockParser, _, err := parseInputWithLegacyBlockParser(rpcInput, resultParser, PARSE_RESULT) if err != nil { - return "", err + parseErrorLogLevel := utils.LAVA_LOG_WARN + if parsedSuccessfully { + // found a hash, no need to log warning later + parseErrorLogLevel = utils.LAVA_LOG_DEBUG + } + + return "", utils.LavaFormatLog("failed to parse with legacy block parser", err, + lavaslices.Slice( + utils.LogAttr("rpcInput", rpcInput), + utils.LogAttr("resultParser", resultParser), + ), + uint(parseErrorLogLevel), + ) } return parseResponseByEncoding([]byte(parsedBlockHashFromBlockParser), resultParser.Encoding) } @@ -251,43 +238,49 @@ func ParseBlockHashFromReplyAndDecode(rpcInput RPCInput, resultParser spectypes. return parseResponseByEncoding([]byte(parsedBlockHashes[0]), resultParser.Encoding) } -func legacyParse(rpcInput RPCInput, blockParser spectypes.BlockParser, dataSource int) ([]interface{}, error) { +func legacyParse(rpcInput RPCInput, blockParser spectypes.BlockParser, dataSource int) ([]interface{}, bool, error) { var retval []interface{} var err error - + var usedDefaultValue bool switch blockParser.ParserFunc { case spectypes.PARSER_FUNC_EMPTY: - return nil, nil + return nil, usedDefaultValue, nil case spectypes.PARSER_FUNC_PARSE_BY_ARG: retval, err = parseByArg(rpcInput, blockParser.ParserArg, dataSource) case spectypes.PARSER_FUNC_PARSE_CANONICAL: retval, err = parseCanonical(rpcInput, blockParser.ParserArg, dataSource) case spectypes.PARSER_FUNC_PARSE_DICTIONARY: + // currently, parseDictionary does not log warnings. If it ever will, we need to pass parserOptions retval, err = parseDictionary(rpcInput, blockParser.ParserArg, dataSource) case spectypes.PARSER_FUNC_PARSE_DICTIONARY_OR_ORDERED: + // currently, parseDictionaryOrOrdered does not log warnings. If it ever will, we need to pass parserOptions retval, err = parseDictionaryOrOrdered(rpcInput, blockParser.ParserArg, dataSource) case spectypes.PARSER_FUNC_DEFAULT: retval = parseDefault(blockParser.ParserArg) + usedDefaultValue = true default: - return nil, fmt.Errorf("unsupported block parser parserFunc") + return nil, usedDefaultValue, fmt.Errorf("unsupported block parser parserFunc") } if err != nil { if ValueNotSetError.Is(err) && blockParser.DefaultValue != "" { // means this parsing failed because the value did not exist on an optional param retval = appendInterfaceToInterfaceArray(blockParser.DefaultValue) + usedDefaultValue = true } else { - return nil, err + return nil, usedDefaultValue, err } } - return retval, nil + return retval, usedDefaultValue, nil } type ParsedInput struct { - parsedDataRaw string - parsedBlock int64 - parsedHashes []string + parsedDataRaw string + parsedBlock int64 + parsedHashes []string + parserError string + UsedDefaultValue bool } const RAW_NOT_APPLICABLE = "-1" @@ -312,6 +305,10 @@ func (p *ParsedInput) GetBlock() int64 { return p.parsedBlock } +func (p *ParsedInput) GetParserError() string { + return p.parserError +} + func (p *ParsedInput) GetBlockHashes() ([]string, error) { if len(p.parsedHashes) == 0 { return nil, fmt.Errorf("no parsed hashes found") @@ -325,7 +322,8 @@ func getMapForParse(rpcInput RPCInput) map[string]interface{} { if rpcInputResult != nil { json.Unmarshal(rpcInputResult, &result) } - return map[string]interface{}{"params": rpcInput.GetParams(), "result": result} + + return map[string]interface{}{"params": rpcInput.GetParams(), "result": result, "error": rpcInput.GetError().ToMap()} } func ParseWithGenericParsers(rpcInput RPCInput, genericParsers []spectypes.GenericParser) (*ParsedInput, error) { @@ -397,6 +395,7 @@ func parseGeneric(input interface{}, genericParser spectypes.GenericParser) (*Pa if !parseRule(genericParser.Rule, value) { return nil, utils.LavaFormatWarning("PARSER_TYPE_DEFAULT_VALUE Did not match any rule", nil, utils.LogAttr("value", value), utils.LogAttr("rules", genericParser.Rule)) } + utils.LavaFormatTrace("parsed generic value", utils.LogAttr("input", input), utils.LogAttr("genericParser", genericParser), @@ -409,13 +408,21 @@ func parseGeneric(input interface{}, genericParser spectypes.GenericParser) (*Pa case spectypes.PARSER_TYPE_DEFAULT_VALUE: parsed := NewParsedInput() parsed.parsedDataRaw = genericParser.Value + return parsed, nil // Case Block Latest, setting the value set by the user given a json path hit. // Example: block_id: 100, will result in requested block 100. + case spectypes.PARSER_TYPE_RESULT: + parsed := NewParsedInput() + strValue := blockInterfaceToString(value) + parsed.parsedDataRaw = strValue + if genericParser.Value != "*" && strValue != genericParser.Value { + parsed.parserError = fmt.Sprintf("expected %s, received %s", genericParser.Value, strValue) + } + return parsed, nil case spectypes.PARSER_TYPE_BLOCK_LATEST: parsed := NewParsedInput() - block := blockInterfaceToString(value) - parsed.parsedDataRaw = block + parsed.parsedDataRaw = blockInterfaceToString(value) return parsed, nil case spectypes.PARSER_TYPE_BLOCK_HASH: return parseGenericParserBlockHash(value) @@ -549,18 +556,20 @@ func blockInterfaceToString(block interface{}) string { func parseByArg(rpcInput RPCInput, input []string, dataSource int) ([]interface{}, error) { // specified block is one of the direct parameters, input should be one string defining the location of the block if len(input) != 1 { - return nil, utils.LavaFormatProduction("invalid input format, input length", nil, utils.LogAttr("input_len", strconv.Itoa(len(input)))) + return nil, fmt.Errorf("invalid input format, input length: %d and needs to be 1", len(input)) } + inp := input[0] param_index, err := strconv.ParseUint(inp, 10, 32) if err != nil { - return nil, utils.LavaFormatProduction("invalid input format, input isn't an unsigned index", err, utils.LogAttr("input", inp)) + return nil, fmt.Errorf("invalid input format, input isn't an unsigned index. input=%s error=%w", inp, err) } unmarshalledData, err := getDataToParse(rpcInput, dataSource) if err != nil { - return nil, utils.LavaFormatProduction("invalid input format, data is not json", err, utils.LogAttr("data", unmarshalledData)) + return nil, fmt.Errorf("invalid input format, data is not json. data=%s err=%w", unmarshalledData, err) } + switch unmarshaledDataTyped := unmarshalledData.(type) { case []interface{}: if uint64(len(unmarshaledDataTyped)) <= param_index { @@ -573,11 +582,8 @@ func parseByArg(rpcInput RPCInput, input []string, dataSource int) ([]interface{ retArr = append(retArr, blockInterfaceToString(block)) return retArr, nil default: - // Parse by arg can be only list as we dont have the name of the height property. - return nil, utils.LavaFormatProduction("Parse type unsupported in parse by arg, only list parameters are currently supported", nil, - utils.LogAttr("params", rpcInput.GetParams()), - utils.LogAttr("request", unmarshaledDataTyped), - ) + // Parse by arg can be only list as we don't have the name of the height property. + return nil, fmt.Errorf("parse type unsupported in parse by arg, only list parameters are currently supported. param=%s request=%s", rpcInput.GetParams(), unmarshaledDataTyped) } } @@ -610,28 +616,18 @@ func parseCanonical(rpcInput RPCInput, input []string, dataSource int) ([]interf } blockContainer := unmarshalledDataTyped[param_index] for _, key := range input[1:] { - // type assertion for blockcontainer + // type assertion for blockContainer if blockContainer, ok := blockContainer.(map[string]interface{}); !ok { - return nil, utils.LavaFormatWarning("invalid parser input format, blockContainer is not map[string]interface{}", ValueNotSetError, - utils.LogAttr("params", rpcInput.GetParams()), - utils.LogAttr("method", rpcInput.GetMethod()), - utils.LogAttr("blockContainer", fmt.Sprintf("%v", blockContainer)), - utils.LogAttr("key", key), - utils.LogAttr("unmarshaledDataTyped", unmarshalledDataTyped), - ) + return nil, ValueNotSetError.Wrapf("invalid parser input format, blockContainer is not map[string]interface{}. "+ + "params=%v method=%v blockContainer=%v key=%s unmarshaledDataTyped=%v", rpcInput.GetParams(), rpcInput.GetMethod(), blockContainer, key, unmarshalledDataTyped) } // assertion for key if container, ok := blockContainer.(map[string]interface{})[key]; ok { blockContainer = container } else { - return nil, utils.LavaFormatWarning("invalid parser input format, blockContainer does not have the field searched inside", ValueNotSetError, - utils.LogAttr("params", rpcInput.GetParams()), - utils.LogAttr("method", rpcInput.GetMethod()), - utils.LogAttr("blockContainer", fmt.Sprintf("%v", blockContainer)), - utils.LogAttr("key", key), - utils.LogAttr("unmarshaledDataTyped", unmarshalledDataTyped), - ) + return nil, ValueNotSetError.Wrapf("invalid parser input format, blockContainer does not have the field searched inside."+ + "params=%v method=%v blockContainer=%v key=%s unmarshaledDataTyped=%v", rpcInput.GetParams(), rpcInput.GetMethod(), blockContainer, key, unmarshalledDataTyped) } } retArr := make([]interface{}, 0) diff --git a/protocol/parser/parser_test.go b/protocol/parser/parser_test.go index dbb06f2ddb..5b739a6a2f 100644 --- a/protocol/parser/parser_test.go +++ b/protocol/parser/parser_test.go @@ -6,6 +6,7 @@ import ( "reflect" "testing" + "github.com/lavanet/lava/v4/protocol/chainlib/chainproxy/rpcclient" pairingtypes "github.com/lavanet/lava/v4/x/pairing/types" spectypes "github.com/lavanet/lava/v4/x/spec/types" "github.com/stretchr/testify/require" @@ -35,6 +36,10 @@ func (rpcInputTest *RPCInputTest) GetID() json.RawMessage { return nil } +func (rpcInputTest *RPCInputTest) GetError() *rpcclient.JsonError { + return nil +} + func (rpcInputTest *RPCInputTest) ParseBlock(block string) (int64, error) { if rpcInputTest.ParseBlockFunc == nil { return ParseDefaultBlockParameter(block) @@ -70,7 +75,6 @@ func TestAppendInterfaceToInterfaceArray(t *testing.T) { }, } for _, test := range tests { - test := test t.Run(test.name, func(t *testing.T) { t.Parallel() @@ -121,7 +125,6 @@ func TestParseArrayOfInterfaces(t *testing.T) { }, } for _, test := range tests { - test := test t.Run(test.name, func(t *testing.T) { t.Parallel() @@ -580,7 +583,6 @@ func TestParseBlockFromParams(t *testing.T) { } for _, test := range tests { - test := test t.Run(test.name, func(t *testing.T) { t.Parallel() result := ParseBlockFromParams(test.rpcInput, test.blockParser, test.genericParsers) @@ -596,6 +598,7 @@ func TestParseBlockFromReply(t *testing.T) { blockParser spectypes.BlockParser genericParsers []spectypes.GenericParser expected int64 + expectedError string }{ { name: "generic_parser_happy_flow_default_value", @@ -713,13 +716,58 @@ func TestParseBlockFromReply(t *testing.T) { }, expected: spectypes.LATEST_BLOCK, }, + { + name: "generic_parser_parse_from_result_happy_flow", + rpcInput: &RPCInputTest{ + Result: []byte(` + { + "foo": { + "bar": 123 + } + } + `), + }, + genericParsers: []spectypes.GenericParser{ + { + ParsePath: ".result.foo.bar", + Value: "123", + ParseType: spectypes.PARSER_TYPE_RESULT, + }, + }, + expected: 123, + }, + { + name: "generic_parser_parse_from_result_error", + rpcInput: &RPCInputTest{ + Result: []byte(` + { + "foo": { + "bar": 123 + } + } + `), + }, + genericParsers: []spectypes.GenericParser{ + { + ParsePath: ".result.foo.bar", + Value: "321", + ParseType: spectypes.PARSER_TYPE_RESULT, + }, + }, + expected: 123, + expectedError: "expected 321, received 123", + }, } for _, test := range tests { - test := test t.Run(test.name, func(t *testing.T) { t.Parallel() parsedInput := ParseBlockFromReply(test.rpcInput, test.blockParser, test.genericParsers) + if test.expectedError != "" { + require.Equal(t, test.expectedError, parsedInput.GetParserError()) + } else { + require.Empty(t, parsedInput.GetParserError()) + } require.Equal(t, test.expected, parsedInput.GetBlock()) }) } @@ -775,9 +823,8 @@ func TestParseBlockFromParamsHash(t *testing.T) { } for _, test := range tests { - test := test t.Run(test.name, func(t *testing.T) { - // t.Parallel() + t.Parallel() parsedInput := ParseBlockFromParams(test.rpcInput, test.blockParser, test.genericParsers) parsedHashes, err := parsedInput.GetBlockHashes() if test.expectedHash == "" { diff --git a/protocol/provideroptimizer/provider_optimizer_test.go b/protocol/provideroptimizer/provider_optimizer_test.go index 61e7a9c0e2..0e857ec16b 100644 --- a/protocol/provideroptimizer/provider_optimizer_test.go +++ b/protocol/provideroptimizer/provider_optimizer_test.go @@ -239,7 +239,7 @@ func TestProviderOptimizerAvailability(t *testing.T) { time.Sleep(4 * time.Millisecond) results, tierResults := runChooseManyTimesAndReturnResults(t, providerOptimizer, providersGen.providersAddresses, nil, requestCU, requestBlock, 1000) require.Greater(t, tierResults[0], 300, tierResults) // 0.42 chance for top tier due to the algorithm to rebalance chances - require.Greater(t, results[providersGen.providersAddresses[skipIndex]]+results[providersGen.providersAddresses[skipIndex+1]]+results[providersGen.providersAddresses[skipIndex+2]], 300) + require.Greater(t, results[providersGen.providersAddresses[skipIndex]]+results[providersGen.providersAddresses[skipIndex+1]]+results[providersGen.providersAddresses[skipIndex+2]], 275) require.InDelta(t, results[providersGen.providersAddresses[skipIndex]], results[providersGen.providersAddresses[skipIndex+1]], 50) results, _ = runChooseManyTimesAndReturnResults(t, providerOptimizer, providersGen.providersAddresses, map[string]struct{}{providersGen.providersAddresses[skipIndex]: {}}, requestCU, requestBlock, 1000) require.Zero(t, results[providersGen.providersAddresses[skipIndex]]) @@ -781,7 +781,7 @@ func TestProviderOptimizerWithOptimizerQoSClient(t *testing.T) { chainId := "dontcare" - consumerOptimizerQoSClient := metrics.NewConsumerOptimizerQoSClient(mockHttpServer.URL, 1*time.Second) + consumerOptimizerQoSClient := metrics.NewConsumerOptimizerQoSClient("lava@test", mockHttpServer.URL, 1, 1*time.Second) consumerOptimizerQoSClient.StartOptimizersQoSReportsCollecting(context.Background(), 900*time.Millisecond) providerOptimizer := NewProviderOptimizer(STRATEGY_BALANCED, TEST_AVERAGE_BLOCK_TIME, TEST_BASE_WORLD_LATENCY, 10, consumerOptimizerQoSClient, chainId) diff --git a/protocol/provideroptimizer/selection_weight.go b/protocol/provideroptimizer/selection_weight.go index e0fdc30f38..f391f5ef91 100644 --- a/protocol/provideroptimizer/selection_weight.go +++ b/protocol/provideroptimizer/selection_weight.go @@ -28,6 +28,11 @@ func NewSelectionWeighter() SelectionWeighter { func (sw *selectionWeighterInst) Weight(address string) int64 { sw.lock.RLock() defer sw.lock.RUnlock() + return sw.weightInner(address) +} + +// assumes lock is held +func (sw *selectionWeighterInst) weightInner(address string) int64 { weight, ok := sw.weights[address] if !ok { // default weight is 1 @@ -52,12 +57,12 @@ func (sw *selectionWeighterInst) WeightedChoice(entries []Entry) string { defer sw.lock.RUnlock() totalWeight := int64(0) for _, entry := range entries { - totalWeight += int64(float64(sw.Weight(entry.Address)) * entry.Part) + totalWeight += int64(float64(sw.weightInner(entry.Address)) * entry.Part) } randWeight := rand.Int63n(totalWeight) currentWeight := int64(0) for _, entry := range entries { - currentWeight += int64(float64(sw.Weight(entry.Address)) * entry.Part) + currentWeight += int64(float64(sw.weightInner(entry.Address)) * entry.Part) if currentWeight > randWeight { return entry.Address } diff --git a/protocol/rpcconsumer/consumer_relay_state_machine.go b/protocol/rpcconsumer/consumer_relay_state_machine.go index ca0df4c4b9..b45e82a502 100644 --- a/protocol/rpcconsumer/consumer_relay_state_machine.go +++ b/protocol/rpcconsumer/consumer_relay_state_machine.go @@ -2,11 +2,15 @@ package rpcconsumer import ( context "context" + "sync" "sync/atomic" "time" + pairingtypes "github.com/lavanet/lava/v4/x/pairing/types" + "github.com/lavanet/lava/v4/protocol/chainlib" common "github.com/lavanet/lava/v4/protocol/common" + "github.com/lavanet/lava/v4/protocol/lavaprotocol" lavasession "github.com/lavanet/lava/v4/protocol/lavasession" "github.com/lavanet/lava/v4/protocol/metrics" "github.com/lavanet/lava/v4/utils" @@ -15,16 +19,31 @@ import ( type RelayStateMachine interface { GetProtocolMessage() chainlib.ProtocolMessage GetDebugState() bool - GetRelayTaskChannel() chan RelayStateSendInstructions + GetRelayTaskChannel() (chan RelayStateSendInstructions, error) UpdateBatch(err error) GetSelection() Selection GetUsedProviders() *lavasession.UsedProviders - SetRelayProcessor(relayProcessor *RelayProcessor) + SetResultsChecker(resultsChecker ResultsCheckerInf) + SetRelayRetriesManager(relayRetriesManager *lavaprotocol.RelayRetriesManager) +} + +type ResultsCheckerInf interface { + WaitForResults(ctx context.Context) error + HasRequiredNodeResults() (bool, int) } type ConsumerRelaySender interface { getProcessingTimeout(chainMessage chainlib.ChainMessage) (processingTimeout time.Duration, relayTimeout time.Duration) GetChainIdAndApiInterface() (string, string) + ParseRelay( + ctx context.Context, + url string, + req string, + connectionType string, + dappID string, + consumerIp string, + metadata []pairingtypes.Metadata, + ) (protocolMessage chainlib.ProtocolMessage, err error) } type tickerMetricSetterInf interface { @@ -32,16 +51,19 @@ type tickerMetricSetterInf interface { } type ConsumerRelayStateMachine struct { - ctx context.Context // same context as user context. - relaySender ConsumerRelaySender - parentRelayProcessor *RelayProcessor - protocolMessage chainlib.ProtocolMessage // only one should make changes to protocol message is ConsumerRelayStateMachine. - analytics *metrics.RelayMetrics // first relay metrics - selection Selection - debugRelays bool - tickerMetricSetter tickerMetricSetterInf - batchUpdate chan error - usedProviders *lavasession.UsedProviders + ctx context.Context // same context as user context. + relaySender ConsumerRelaySender + resultsChecker ResultsCheckerInf + analytics *metrics.RelayMetrics // first relay metrics + selection Selection + debugRelays bool + tickerMetricSetter tickerMetricSetterInf + batchUpdate chan error + usedProviders *lavasession.UsedProviders + relayRetriesManager *lavaprotocol.RelayRetriesManager + relayState []*RelayState + protocolMessage chainlib.ProtocolMessage + relayStateLock sync.RWMutex } func NewRelayStateMachine( @@ -68,11 +90,20 @@ func NewRelayStateMachine( debugRelays: debugRelays, tickerMetricSetter: tickerMetricSetter, batchUpdate: make(chan error, MaximumNumberOfTickerRelayRetries), + relayState: make([]*RelayState, 0), } } -func (crsm *ConsumerRelayStateMachine) SetRelayProcessor(relayProcessor *RelayProcessor) { - crsm.parentRelayProcessor = relayProcessor +func (crsm *ConsumerRelayStateMachine) Initialized() bool { + return crsm.relayRetriesManager != nil && crsm.resultsChecker != nil +} + +func (crsm *ConsumerRelayStateMachine) SetRelayRetriesManager(relayRetriesManager *lavaprotocol.RelayRetriesManager) { + crsm.relayRetriesManager = relayRetriesManager +} + +func (crsm *ConsumerRelayStateMachine) SetResultsChecker(resultsChecker ResultsCheckerInf) { + crsm.resultsChecker = resultsChecker } func (crsm *ConsumerRelayStateMachine) GetUsedProviders() *lavasession.UsedProviders { @@ -83,13 +114,46 @@ func (crsm *ConsumerRelayStateMachine) GetSelection() Selection { return crsm.selection } -func (crsm *ConsumerRelayStateMachine) shouldRetryOnResult(numberOfRetriesLaunched int, numberOfNodeErrors uint64) bool { - shouldRetry := crsm.shouldRetryInner(numberOfRetriesLaunched) - // archive functionality will be added here. +func (crsm *ConsumerRelayStateMachine) appendRelayState(nextState *RelayState) { + crsm.relayStateLock.Lock() + defer crsm.relayStateLock.Unlock() + crsm.relayState = append(crsm.relayState, nextState) +} + +func (crsm *ConsumerRelayStateMachine) getLatestState() *RelayState { + crsm.relayStateLock.RLock() + defer crsm.relayStateLock.RUnlock() + if len(crsm.relayState) == 0 { + return nil + } + return crsm.relayState[len(crsm.relayState)-1] +} + +func (crsm *ConsumerRelayStateMachine) stateTransition(relayState *RelayState, numberOfNodeErrors uint64) { + batchNumber := crsm.usedProviders.BatchNumber() + var nextState *RelayState + if relayState == nil { // initial state + nextState = NewRelayState(crsm.ctx, crsm.protocolMessage, 0, crsm.relayRetriesManager, crsm.relaySender, &ArchiveStatus{}) + } else { + nextState = NewRelayState(crsm.ctx, crsm.GetProtocolMessage(), relayState.GetStateNumber()+1, crsm.relayRetriesManager, crsm.relaySender, relayState.archiveStatus.Copy()) + nextState.upgradeToArchiveIfNeeded(batchNumber, numberOfNodeErrors) + } + crsm.appendRelayState(nextState) +} + +// Should retry implements the logic for when to send another relay. +// As well as the decision of changing the protocol message, +// into different extensions or addons based on certain conditions +func (crsm *ConsumerRelayStateMachine) shouldRetry(numberOfNodeErrors uint64) bool { + batchNumber := crsm.usedProviders.BatchNumber() + shouldRetry := crsm.retryCondition(batchNumber) + if shouldRetry { + crsm.stateTransition(crsm.getLatestState(), numberOfNodeErrors) + } return shouldRetry } -func (crsm *ConsumerRelayStateMachine) shouldRetryInner(numberOfRetriesLaunched int) bool { +func (crsm *ConsumerRelayStateMachine) retryCondition(numberOfRetriesLaunched int) bool { if numberOfRetriesLaunched >= MaximumNumberOfTickerRelayRetries { return false } @@ -97,30 +161,33 @@ func (crsm *ConsumerRelayStateMachine) shouldRetryInner(numberOfRetriesLaunched return crsm.selection != BestResult } -func (crsm *ConsumerRelayStateMachine) shouldRetryTicker(numberOfRetriesLaunched int) bool { - return crsm.shouldRetryInner(numberOfRetriesLaunched) -} - func (crsm *ConsumerRelayStateMachine) GetDebugState() bool { return crsm.debugRelays } func (crsm *ConsumerRelayStateMachine) GetProtocolMessage() chainlib.ProtocolMessage { - return crsm.protocolMessage + latestState := crsm.getLatestState() + if latestState == nil { // failed fetching latest state + return crsm.protocolMessage + } + return latestState.GetProtocolMessage() } type RelayStateSendInstructions struct { - protocolMessage chainlib.ProtocolMessage - analytics *metrics.RelayMetrics - err error - done bool + analytics *metrics.RelayMetrics + err error + done bool + relayState *RelayState } func (rssi *RelayStateSendInstructions) IsDone() bool { return rssi.done || rssi.err != nil } -func (crsm *ConsumerRelayStateMachine) GetRelayTaskChannel() chan RelayStateSendInstructions { +func (crsm *ConsumerRelayStateMachine) GetRelayTaskChannel() (chan RelayStateSendInstructions, error) { + if !crsm.Initialized() { + return nil, utils.LavaFormatError("ConsumerRelayStateMachine was not initialized properly", nil) + } relayTaskChannel := make(chan RelayStateSendInstructions) go func() { // A channel to be notified processing was done, true means we have results and can return @@ -137,9 +204,9 @@ func (crsm *ConsumerRelayStateMachine) GetRelayTaskChannel() chan RelayStateSend readResultsFromProcessor := func() { // ProcessResults is reading responses while blocking until the conditions are met utils.LavaFormatTrace("[StateMachine] Waiting for results", utils.LogAttr("batch", crsm.usedProviders.BatchNumber())) - crsm.parentRelayProcessor.WaitForResults(processingCtx) + crsm.resultsChecker.WaitForResults(processingCtx) // Decide if we need to resend or not - metRequiredNodeResults, numberOfNodeErrors := crsm.parentRelayProcessor.HasRequiredNodeResults() + metRequiredNodeResults, numberOfNodeErrors := crsm.resultsChecker.HasRequiredNodeResults() numberOfNodeErrorsAtomic.Store(uint64(numberOfNodeErrors)) if metRequiredNodeResults { gotResults <- true @@ -160,10 +227,12 @@ func (crsm *ConsumerRelayStateMachine) GetRelayTaskChannel() chan RelayStateSend } } + // initialize relay state + crsm.stateTransition(nil, 0) // Send First Message, with analytics and without waiting for batch update. relayTaskChannel <- RelayStateSendInstructions{ - protocolMessage: crsm.GetProtocolMessage(), - analytics: crsm.analytics, + analytics: crsm.analytics, + relayState: crsm.getLatestState(), } // Initialize parameters @@ -187,9 +256,7 @@ func (crsm *ConsumerRelayStateMachine) GetRelayTaskChannel() chan RelayStateSend } else { utils.LavaFormatTrace("[StateMachine] batchUpdate - err != nil - batch fail retry attempt", utils.LogAttr("batch", crsm.usedProviders.BatchNumber()), utils.LogAttr("consecutiveBatchErrors", consecutiveBatchErrors)) // Failed sending message, but we still want to attempt sending more. - relayTaskChannel <- RelayStateSendInstructions{ - protocolMessage: crsm.GetProtocolMessage(), - } + relayTaskChannel <- RelayStateSendInstructions{relayState: crsm.getLatestState()} } continue } @@ -205,18 +272,18 @@ func (crsm *ConsumerRelayStateMachine) GetRelayTaskChannel() chan RelayStateSend return } // If should retry == true, send a new batch. (success == false) - if crsm.shouldRetryOnResult(crsm.usedProviders.BatchNumber(), numberOfNodeErrorsAtomic.Load()) { + if crsm.shouldRetry(numberOfNodeErrorsAtomic.Load()) { utils.LavaFormatTrace("[StateMachine] success := <-gotResults - crsm.ShouldRetry(batchNumber)", utils.LogAttr("batch", crsm.usedProviders.BatchNumber())) - relayTaskChannel <- RelayStateSendInstructions{protocolMessage: crsm.GetProtocolMessage()} + relayTaskChannel <- RelayStateSendInstructions{relayState: crsm.getLatestState()} } else { go validateReturnCondition(nil) } go readResultsFromProcessor() case <-startNewBatchTicker.C: // Only trigger another batch for non BestResult relays or if we didn't pass the retry limit. - if crsm.shouldRetryTicker(crsm.usedProviders.BatchNumber()) { + if crsm.shouldRetry(numberOfNodeErrorsAtomic.Load()) { utils.LavaFormatTrace("[StateMachine] ticker triggered", utils.LogAttr("batch", crsm.usedProviders.BatchNumber())) - relayTaskChannel <- RelayStateSendInstructions{protocolMessage: crsm.GetProtocolMessage()} + relayTaskChannel <- RelayStateSendInstructions{relayState: crsm.getLatestState()} // Add ticker launch metrics go crsm.tickerMetricSetter.SetRelaySentByNewBatchTickerMetric(crsm.relaySender.GetChainIdAndApiInterface()) } @@ -249,7 +316,7 @@ func (crsm *ConsumerRelayStateMachine) GetRelayTaskChannel() chan RelayStateSend } } }() - return relayTaskChannel + return relayTaskChannel, nil } func (crsm *ConsumerRelayStateMachine) UpdateBatch(err error) { diff --git a/protocol/rpcconsumer/consumer_relay_state_machine_test.go b/protocol/rpcconsumer/consumer_relay_state_machine_test.go index dfd6eeb871..a6804c56fc 100644 --- a/protocol/rpcconsumer/consumer_relay_state_machine_test.go +++ b/protocol/rpcconsumer/consumer_relay_state_machine_test.go @@ -3,27 +3,49 @@ package rpcconsumer import ( context "context" "fmt" + "log" "net/http" "testing" "time" + "github.com/goccy/go-json" "github.com/lavanet/lava/v4/protocol/chainlib" + "github.com/lavanet/lava/v4/protocol/chainlib/chainproxy/rpcclient" "github.com/lavanet/lava/v4/protocol/chainlib/extensionslib" + common "github.com/lavanet/lava/v4/protocol/common" + "github.com/lavanet/lava/v4/protocol/lavaprotocol" lavasession "github.com/lavanet/lava/v4/protocol/lavasession" - "github.com/lavanet/lava/v4/protocol/metrics" + "github.com/lavanet/lava/v4/utils" + "github.com/lavanet/lava/v4/utils/lavaslices" + epochstoragetypes "github.com/lavanet/lava/v4/x/epochstorage/types" + pairingtypes "github.com/lavanet/lava/v4/x/pairing/types" spectypes "github.com/lavanet/lava/v4/x/spec/types" "github.com/stretchr/testify/require" ) +type PolicySt struct { + addons []string + extensions []string + apiInterface string +} + +func (a PolicySt) GetSupportedAddons(string) ([]string, error) { + return a.addons, nil +} + +func (a PolicySt) GetSupportedExtensions(string) ([]epochstoragetypes.EndpointService, error) { + ret := []epochstoragetypes.EndpointService{} + for _, ext := range a.extensions { + ret = append(ret, epochstoragetypes.EndpointService{Extension: ext, ApiInterface: a.apiInterface}) + } + return ret, nil +} + type ConsumerRelaySenderMock struct { retValue error tickerValue time.Duration } -func (crsm *ConsumerRelaySenderMock) sendRelayToProvider(ctx context.Context, protocolMessage chainlib.ProtocolMessage, relayProcessor *RelayProcessor, analytics *metrics.RelayMetrics) (errRet error) { - return crsm.retValue -} - func (crsm *ConsumerRelaySenderMock) getProcessingTimeout(chainMessage chainlib.ChainMessage) (processingTimeout time.Duration, relayTimeout time.Duration) { if crsm.tickerValue != 0 { return time.Second * 50000, crsm.tickerValue @@ -35,6 +57,43 @@ func (crsm *ConsumerRelaySenderMock) GetChainIdAndApiInterface() (string, string return "testUno", "testDos" } +func (crsm *ConsumerRelaySenderMock) ParseRelay( + ctx context.Context, + url string, + req string, + connectionType string, + dappID string, + consumerIp string, + metadata []pairingtypes.Metadata, +) (protocolMessage chainlib.ProtocolMessage, err error) { + foundArchive := false + for _, md := range metadata { + if md.Value == "archive" { + foundArchive = true + } + } + if !foundArchive { + utils.LavaFormatFatal("misuse in mocked parse relay", nil) + } + serverHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + // Handle the incoming request and provide the desired response + w.WriteHeader(http.StatusOK) + }) + specId := "NEAR" + chainParser, _, _, closeServer, _, err := chainlib.CreateChainLibMocks(ctx, specId, spectypes.APIInterfaceJsonRPC, serverHandler, nil, "../../", []string{"archive"}) + defer closeServer() + policy := PolicySt{ + addons: []string{}, + extensions: []string{"archive"}, + apiInterface: spectypes.APIInterfaceJsonRPC, + } + chainParser.SetPolicy(policy, specId, spectypes.APIInterfaceJsonRPC) + chainMsg, err := chainParser.ParseMsg(url, []byte(req), connectionType, metadata, extensionslib.ExtensionInfo{LatestBlock: 0, ExtensionOverride: []string{"archive"}}) + relayRequestData := lavaprotocol.NewRelayData(ctx, connectionType, url, []byte(req), 0, -2, spectypes.APIInterfaceJsonRPC, chainMsg.GetRPCMessage().GetHeaders(), chainlib.GetAddon(chainMsg), common.GetExtensionNames(chainMsg.GetExtensions())) + protocolMessage = chainlib.NewProtocolMessage(chainMsg, nil, relayRequestData, dappID, consumerIp) + return protocolMessage, nil +} + func TestConsumerStateMachineHappyFlow(t *testing.T) { t.Run("happy", func(t *testing.T) { ctx := context.Background() @@ -66,7 +125,8 @@ func TestConsumerStateMachineHappyFlow(t *testing.T) { require.Zero(t, usedProviders.SessionsLatestBatch()) consumerSessionsMap := lavasession.ConsumerSessionsMap{"lava@test": &lavasession.SessionInfo{}, "lava@test2": &lavasession.SessionInfo{}} - relayTaskChannel := relayProcessor.GetRelayTaskChannel() + relayTaskChannel, err := relayProcessor.GetRelayTaskChannel() + require.NoError(t, err) taskNumber := 0 for task := range relayTaskChannel { switch taskNumber { @@ -135,7 +195,8 @@ func TestConsumerStateMachineExhaustRetries(t *testing.T) { require.Zero(t, usedProviders.CurrentlyUsed()) require.Zero(t, usedProviders.SessionsLatestBatch()) - relayTaskChannel := relayProcessor.GetRelayTaskChannel() + relayTaskChannel, err := relayProcessor.GetRelayTaskChannel() + require.NoError(t, err) taskNumber := 0 for task := range relayTaskChannel { switch taskNumber { @@ -151,3 +212,105 @@ func TestConsumerStateMachineExhaustRetries(t *testing.T) { } }) } + +func TestConsumerStateMachineArchiveRetry(t *testing.T) { + t.Run("retries_archive", func(t *testing.T) { + ctx := context.Background() + serverHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + // Handle the incoming request and provide the desired response + w.WriteHeader(http.StatusOK) + }) + specId := "NEAR" + chainParser, _, _, closeServer, _, err := chainlib.CreateChainLibMocks(ctx, specId, spectypes.APIInterfaceJsonRPC, serverHandler, nil, "../../", nil) + if closeServer != nil { + defer closeServer() + } + require.NoError(t, err) + + params, _ := json.Marshal([]string{"5NFtBbExnjk4TFXpfXhJidcCm5KYPk7QCY51nWiwyQNU"}) + id, _ := json.Marshal(1) + reqBody := rpcclient.JsonrpcMessage{ + Version: "2.0", + Method: "block", // Query latest block + Params: params, // Use "final" to get the latest final block + ID: id, + } + + // Convert request to JSON + jsonData, err := json.Marshal(reqBody) + if err != nil { + log.Fatalf("Error marshalling request: %v", err) + } + + chainMsg, err := chainParser.ParseMsg("", jsonData, http.MethodPost, nil, extensionslib.ExtensionInfo{LatestBlock: 0}) + require.NoError(t, err) + dappId := "dapp" + consumerIp := "123.11" + reqBlock, _ := chainMsg.RequestedBlock() + var seenBlock int64 = 0 + + relayRequestData := lavaprotocol.NewRelayData(ctx, http.MethodPost, "", jsonData, seenBlock, reqBlock, spectypes.APIInterfaceJsonRPC, chainMsg.GetRPCMessage().GetHeaders(), chainlib.GetAddon(chainMsg), common.GetExtensionNames(chainMsg.GetExtensions())) + protocolMessage := chainlib.NewProtocolMessage(chainMsg, nil, relayRequestData, dappId, consumerIp) + consistency := NewConsumerConsistency(specId) + usedProviders := lavasession.NewUsedProviders(nil) + relayProcessor := NewRelayProcessor( + ctx, + 1, + consistency, + relayProcessorMetrics, + relayProcessorMetrics, + relayRetriesManagerInstance, + NewRelayStateMachine( + ctx, + usedProviders, + &ConsumerRelaySenderMock{retValue: nil, tickerValue: 100 * time.Second}, + protocolMessage, + nil, + false, + relayProcessorMetrics, + )) + + ctx, cancel := context.WithTimeout(context.Background(), time.Millisecond*10) + defer cancel() + canUse := usedProviders.TryLockSelection(ctx) + require.NoError(t, ctx.Err()) + require.Nil(t, canUse) + require.Zero(t, usedProviders.CurrentlyUsed()) + require.Zero(t, usedProviders.SessionsLatestBatch()) + + consumerSessionsMap := lavasession.ConsumerSessionsMap{"lava@test": &lavasession.SessionInfo{}, "lava@test2": &lavasession.SessionInfo{}} + relayTaskChannel, err := relayProcessor.GetRelayTaskChannel() + require.NoError(t, err) + taskNumber := 0 + for task := range relayTaskChannel { + switch taskNumber { + case 0: + require.False(t, task.IsDone()) + usedProviders.AddUsed(consumerSessionsMap, nil) + relayProcessor.UpdateBatch(nil) + sendNodeErrorJsonRpc(relayProcessor, "lava2@test", time.Millisecond*1) + case 1: + require.False(t, task.IsDone()) + require.True(t, + lavaslices.ContainsPredicate( + task.relayState.GetProtocolMessage().GetExtensions(), + func(predicate *spectypes.Extension) bool { return predicate.Name == "archive" }), + ) + usedProviders.AddUsed(consumerSessionsMap, nil) + relayProcessor.UpdateBatch(nil) + sendSuccessRespJsonRpc(relayProcessor, "lava4@test", time.Millisecond*1) + case 2: + require.True(t, task.IsDone()) + results, _ := relayProcessor.HasRequiredNodeResults() + require.True(t, results) + returnedResult, err := relayProcessor.ProcessingResult() + require.NoError(t, err) + require.Equal(t, string(returnedResult.Reply.Data), `{"jsonrpc":"2.0","id":1,"result":{"result":"success"}}`) + require.Equal(t, http.StatusOK, returnedResult.StatusCode) + fmt.Println(relayProcessor.GetProtocolMessage().GetExtensions()) + return // end test. + } + taskNumber++ + } + }) +} diff --git a/protocol/rpcconsumer/custom_transport.go b/protocol/rpcconsumer/custom_transport.go new file mode 100644 index 0000000000..415fea1f85 --- /dev/null +++ b/protocol/rpcconsumer/custom_transport.go @@ -0,0 +1,58 @@ +package rpcconsumer + +import ( + "net/http" + "sync" + "sync/atomic" + + "github.com/lavanet/lava/v4/utils" +) + +type CustomLavaTransport struct { + transport http.RoundTripper + lock sync.RWMutex + secondaryTransport http.RoundTripper + consecutiveFails atomic.Uint64 // TODO: export to metrics +} + +func NewCustomLavaTransport(httpTransport http.RoundTripper, secondaryTransport http.RoundTripper) *CustomLavaTransport { + return &CustomLavaTransport{transport: httpTransport, secondaryTransport: secondaryTransport} +} + +func (c *CustomLavaTransport) SetSecondaryTransport(secondaryTransport http.RoundTripper) { + c.lock.Lock() + defer c.lock.Unlock() + utils.LavaFormatDebug("Setting secondary transport for CustomLavaTransport") + c.secondaryTransport = secondaryTransport +} + +// used to switch the primary and secondary transports, in case the primary one fails too much +func (c *CustomLavaTransport) TogglePrimarySecondaryTransport() { + c.lock.Lock() + defer c.lock.Unlock() + primaryTransport := c.transport + secondaryTransport := c.secondaryTransport + c.secondaryTransport = primaryTransport + c.transport = secondaryTransport +} + +func (c *CustomLavaTransport) RoundTrip(req *http.Request) (*http.Response, error) { + // Custom logic before the request + c.lock.RLock() + primaryTransport := c.transport + secondaryTransport := c.secondaryTransport + c.lock.RUnlock() + // Delegate to the underlying RoundTripper (usually http.Transport) + resp, err := primaryTransport.RoundTrip(req) + // Custom logic after the request + if err != nil { + c.consecutiveFails.Add(1) + // If the primary transport fails, use the secondary transport + if secondaryTransport != nil { + resp, err = secondaryTransport.RoundTrip(req) + } + } else { + c.consecutiveFails.Store(0) + } + return resp, err +} diff --git a/protocol/rpcconsumer/relay_processor.go b/protocol/rpcconsumer/relay_processor.go index 589c054fcc..d68520a8a8 100644 --- a/protocol/rpcconsumer/relay_processor.go +++ b/protocol/rpcconsumer/relay_processor.go @@ -86,7 +86,8 @@ func NewRelayProcessor( selection: relayStateMachine.GetSelection(), usedProviders: relayStateMachine.GetUsedProviders(), } - relayProcessor.RelayStateMachine.SetRelayProcessor(relayProcessor) + relayProcessor.RelayStateMachine.SetResultsChecker(relayProcessor) + relayProcessor.RelayStateMachine.SetRelayRetriesManager(relayRetriesManager) return relayProcessor } diff --git a/protocol/rpcconsumer/relay_processor_test.go b/protocol/rpcconsumer/relay_processor_test.go index a814a4a9f6..1c7c60205c 100644 --- a/protocol/rpcconsumer/relay_processor_test.go +++ b/protocol/rpcconsumer/relay_processor_test.go @@ -7,7 +7,9 @@ import ( "testing" "time" + "github.com/goccy/go-json" "github.com/lavanet/lava/v4/protocol/chainlib" + "github.com/lavanet/lava/v4/protocol/chainlib/chainproxy/rpcclient" "github.com/lavanet/lava/v4/protocol/chainlib/extensionslib" "github.com/lavanet/lava/v4/protocol/common" "github.com/lavanet/lava/v4/protocol/lavaprotocol" @@ -40,6 +42,32 @@ var ( relayProcessorMetrics = &relayProcessorMetricsMock{} ) +func sendSuccessRespJsonRpc(relayProcessor *RelayProcessor, provider string, delay time.Duration) { + time.Sleep(delay) + id, _ := json.Marshal(1) + resultBody, _ := json.Marshal(map[string]string{"result": "success"}) + res := rpcclient.JsonrpcMessage{ + Version: "2.0", + ID: id, + Result: resultBody, + } + resBytes, _ := json.Marshal(res) + relayProcessor.GetUsedProviders().RemoveUsed(provider, lavasession.NewRouterKey(nil), nil) + response := &relayResponse{ + relayResult: common.RelayResult{ + Request: &pairingtypes.RelayRequest{ + RelaySession: &pairingtypes.RelaySession{}, + RelayData: &pairingtypes.RelayPrivateData{}, + }, + Reply: &pairingtypes.RelayReply{Data: resBytes, LatestBlock: 1}, + ProviderInfo: common.ProviderInfo{ProviderAddress: provider}, + StatusCode: http.StatusOK, + }, + err: nil, + } + relayProcessor.SetResponse(response) +} + func sendSuccessResp(relayProcessor *RelayProcessor, provider string, delay time.Duration) { time.Sleep(delay) relayProcessor.GetUsedProviders().RemoveUsed(provider, lavasession.NewRouterKey(nil), nil) @@ -94,6 +122,32 @@ func sendNodeError(relayProcessor *RelayProcessor, provider string, delay time.D relayProcessor.SetResponse(response) } +func sendNodeErrorJsonRpc(relayProcessor *RelayProcessor, provider string, delay time.Duration) { + time.Sleep(delay) + id, _ := json.Marshal(1) + res := rpcclient.JsonrpcMessage{ + Version: "2.0", + ID: id, + Error: &rpcclient.JsonError{Code: 1, Message: "test"}, + } + resBytes, _ := json.Marshal(res) + + relayProcessor.GetUsedProviders().RemoveUsed(provider, lavasession.NewRouterKey(nil), nil) + response := &relayResponse{ + relayResult: common.RelayResult{ + Request: &pairingtypes.RelayRequest{ + RelaySession: &pairingtypes.RelaySession{}, + RelayData: &pairingtypes.RelayPrivateData{}, + }, + Reply: &pairingtypes.RelayReply{Data: resBytes}, + ProviderInfo: common.ProviderInfo{ProviderAddress: provider}, + StatusCode: http.StatusInternalServerError, + }, + err: nil, + } + relayProcessor.SetResponse(response) +} + func TestRelayProcessorHappyFlow(t *testing.T) { t.Run("happy", func(t *testing.T) { ctx := context.Background() @@ -172,7 +226,7 @@ func TestRelayProcessorNodeErrorRetryFlow(t *testing.T) { usedProviders := lavasession.NewUsedProviders(nil) relayProcessor := NewRelayProcessor(ctx, 1, nil, relayProcessorMetrics, relayProcessorMetrics, relayRetriesManagerInstance, NewRelayStateMachine(ctx, usedProviders, &RPCConsumerServer{}, protocolMessage, nil, false, relayProcessorMetrics)) - ctx, cancel := context.WithTimeout(context.Background(), time.Millisecond*10) + ctx, cancel := context.WithTimeout(context.Background(), time.Millisecond*50) defer cancel() canUse := usedProviders.TryLockSelection(ctx) require.NoError(t, ctx.Err()) diff --git a/protocol/rpcconsumer/relay_state.go b/protocol/rpcconsumer/relay_state.go new file mode 100644 index 0000000000..37264979ea --- /dev/null +++ b/protocol/rpcconsumer/relay_state.go @@ -0,0 +1,206 @@ +package rpcconsumer + +import ( + "context" + "strings" + "sync" + "sync/atomic" + + "github.com/lavanet/lava/v4/protocol/chainlib" + "github.com/lavanet/lava/v4/protocol/chainlib/extensionslib" + common "github.com/lavanet/lava/v4/protocol/common" + "github.com/lavanet/lava/v4/utils" + slices "github.com/lavanet/lava/v4/utils/lavaslices" + pairingtypes "github.com/lavanet/lava/v4/x/pairing/types" +) + +type RetryHashCacheInf interface { + CheckHashInCache(hash string) bool + AddHashToCache(hash string) +} + +type RelayParserInf interface { + ParseRelay( + ctx context.Context, + url string, + req string, + connectionType string, + dappID string, + consumerIp string, + metadata []pairingtypes.Metadata, + ) (protocolMessage chainlib.ProtocolMessage, err error) +} + +type ArchiveStatus struct { + isArchive atomic.Bool + isUpgraded atomic.Bool + isHashCached atomic.Bool + isEarliestUsed atomic.Bool +} + +func (as *ArchiveStatus) Copy() *ArchiveStatus { + archiveStatus := &ArchiveStatus{} + archiveStatus.isArchive.Store(as.isArchive.Load()) + archiveStatus.isUpgraded.Store(as.isUpgraded.Load()) + archiveStatus.isHashCached.Store(as.isHashCached.Load()) + archiveStatus.isEarliestUsed.Store(as.isEarliestUsed.Load()) + return archiveStatus +} + +type RelayState struct { + archiveStatus *ArchiveStatus + stateNumber int + protocolMessage chainlib.ProtocolMessage + cache RetryHashCacheInf + relayParser RelayParserInf + ctx context.Context + lock sync.RWMutex +} + +func GetEmptyRelayState(ctx context.Context, protocolMessage chainlib.ProtocolMessage) *RelayState { + archiveStatus := &ArchiveStatus{} + archiveStatus.isEarliestUsed.Store(true) + return &RelayState{ + ctx: ctx, + protocolMessage: protocolMessage, + archiveStatus: archiveStatus, + } +} + +func NewRelayState(ctx context.Context, protocolMessage chainlib.ProtocolMessage, stateNumber int, cache RetryHashCacheInf, relayParser RelayParserInf, archiveStatus *ArchiveStatus) *RelayState { + relayRequestData := protocolMessage.RelayPrivateData() + if archiveStatus == nil { + utils.LavaFormatError("misuse detected archiveStatus is nil", nil, utils.Attribute{Key: "protocolMessage.GetApi", Value: protocolMessage.GetApi()}) + archiveStatus = &ArchiveStatus{} + } + rs := &RelayState{ + ctx: ctx, + protocolMessage: protocolMessage, + stateNumber: stateNumber, + cache: cache, + relayParser: relayParser, + archiveStatus: archiveStatus, + } + rs.archiveStatus.isArchive.Store(rs.CheckIsArchive(relayRequestData)) + return rs +} + +func (rs *RelayState) CheckIsArchive(relayRequestData *pairingtypes.RelayPrivateData) bool { + return relayRequestData != nil && slices.Contains(relayRequestData.Extensions, extensionslib.ArchiveExtension) +} + +func (rs *RelayState) GetIsEarliestUsed() bool { + if rs == nil || rs.archiveStatus == nil { + return true + } + return rs.archiveStatus.isEarliestUsed.Load() +} + +func (rs *RelayState) GetIsArchive() bool { + if rs == nil { + return false + } + return rs.archiveStatus.isArchive.Load() +} + +func (rs *RelayState) GetIsUpgraded() bool { + if rs == nil { + return false + } + return rs.archiveStatus.isUpgraded.Load() +} + +func (rs *RelayState) SetIsEarliestUsed() { + if rs == nil || rs.archiveStatus == nil { + return + } + rs.archiveStatus.isEarliestUsed.Store(true) +} + +func (rs *RelayState) SetIsArchive(isArchive bool) { + if rs == nil || rs.archiveStatus == nil { + return + } + rs.archiveStatus.isArchive.Store(isArchive) +} + +func (rs *RelayState) GetStateNumber() int { + if rs == nil { + return 0 + } + return rs.stateNumber +} + +func (rs *RelayState) GetProtocolMessage() chainlib.ProtocolMessage { + if rs == nil { + return nil + } + rs.lock.RLock() + defer rs.lock.RUnlock() + return rs.protocolMessage +} + +func (rs *RelayState) SetProtocolMessage(protocolMessage chainlib.ProtocolMessage) { + if rs == nil { + return + } + rs.lock.Lock() + defer rs.lock.Unlock() + rs.protocolMessage = protocolMessage +} + +func (rs *RelayState) upgradeToArchiveIfNeeded(numberOfRetriesLaunched int, numberOfNodeErrors uint64) { + if rs == nil || rs.archiveStatus == nil { + return + } + hashes := rs.GetProtocolMessage().GetRequestedBlocksHashes() + // If we got upgraded and we still got a node error (>= 2) we know upgrade didn't work + if rs.archiveStatus.isUpgraded.Load() && numberOfNodeErrors >= 2 { + // Validate the following. + // 1. That we have applied archive + // 2. That we had more than one node error (meaning the 2nd was a successful archive [node error] 100%) + // Now - + // We know we have applied archive and failed. + // 1. We can remove the archive, return to the original protocol message, + // 2. Set all hashes as irrelevant for future queries. + if !rs.archiveStatus.isHashCached.Load() { + for _, hash := range hashes { + rs.cache.AddHashToCache(hash) + } + rs.archiveStatus.isHashCached.Store(true) + } + return + } + if !rs.archiveStatus.isArchive.Load() && len(hashes) > 0 { + // Launch archive only on the second retry attempt. + if numberOfRetriesLaunched == 1 { + // Iterate over all hashes found in relay, if we don't have them in the cache we can try retry on archive. + // If we are familiar with all, we don't want to allow archive. + for _, hash := range hashes { + if !rs.cache.CheckHashInCache(hash) { + // If we didn't find the hash in the cache we can try archive relay. + protocolMessage := rs.GetProtocolMessage() + relayRequestData := protocolMessage.RelayPrivateData() + // We need to set archive. + // Create a new relay private data containing the extension. + userData := protocolMessage.GetUserData() + // add all existing extensions including archive split by "," so the override will work + existingExtensionsPlusArchive := strings.Join(append(relayRequestData.Extensions, extensionslib.ArchiveExtension), ",") + metaDataForArchive := []pairingtypes.Metadata{{Name: common.EXTENSION_OVERRIDE_HEADER_NAME, Value: existingExtensionsPlusArchive}} + newProtocolMessage, err := rs.relayParser.ParseRelay(rs.ctx, relayRequestData.ApiUrl, string(relayRequestData.Data), relayRequestData.ConnectionType, userData.DappId, userData.ConsumerIp, metaDataForArchive) + if err != nil { + utils.LavaFormatError("Failed converting to archive message in shouldRetry", err, utils.LogAttr("relayRequestData", relayRequestData), utils.LogAttr("metadata", metaDataForArchive)) + } else { + // Creating an archive protocol message, and set it to current protocol message + rs.SetProtocolMessage(newProtocolMessage) + // for future batches. + rs.archiveStatus.isUpgraded.Store(true) + rs.archiveStatus.isArchive.Store(true) + } + break + } + } + // We had node error, and we have a hash parsed. + } + } +} diff --git a/protocol/rpcconsumer/rpcconsumer.go b/protocol/rpcconsumer/rpcconsumer.go index bfd554925e..445c9058dd 100644 --- a/protocol/rpcconsumer/rpcconsumer.go +++ b/protocol/rpcconsumer/rpcconsumer.go @@ -11,11 +11,14 @@ import ( "sync" "time" + rpchttp "github.com/cometbft/cometbft/rpc/client/http" + jsonrpcclient "github.com/cometbft/cometbft/rpc/jsonrpc/client" "github.com/cosmos/cosmos-sdk/client" "github.com/cosmos/cosmos-sdk/client/config" "github.com/cosmos/cosmos-sdk/client/flags" "github.com/cosmos/cosmos-sdk/client/tx" sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/decred/dcrd/dcrec/secp256k1/v4" "github.com/lavanet/lava/v4/app" "github.com/lavanet/lava/v4/protocol/chainlib" "github.com/lavanet/lava/v4/protocol/common" @@ -46,6 +49,7 @@ const ( refererBackendAddressFlagName = "referer-be-address" refererMarkerFlagName = "referer-marker" reportsSendBEAddress = "reports-be-address" + LavaOverLavaBackupFlagName = "use-lava-over-lava-backup" ) var ( @@ -106,6 +110,7 @@ type AnalyticsServerAddresses struct { RelayServerAddress string ReportsAddressFlag string OptimizerQoSAddress string + OptimizerQoSListen bool } type RPCConsumer struct { consumerStateTracker ConsumerStateTrackerInf @@ -124,6 +129,34 @@ type rpcConsumerStartOptions struct { stateShare bool refererData *chainlib.RefererData staticProvidersList []*lavasession.RPCProviderEndpoint // define static providers as backup to lava providers + geoLocation uint64 +} + +func getConsumerAddressAndKeys(clientCtx client.Context) (sdk.AccAddress, *secp256k1.PrivateKey, error) { + keyName, err := sigs.GetKeyName(clientCtx) + if err != nil { + return nil, nil, fmt.Errorf("failed getting key name from clientCtx: %w", err) + } + + privKey, err := sigs.GetPrivKey(clientCtx, keyName) + if err != nil { + return nil, nil, fmt.Errorf("failed getting private key from key name %s: %w", keyName, err) + } + + clientKey, _ := clientCtx.Keyring.Key(keyName) + pubkey, err := clientKey.GetPubKey() + if err != nil { + return nil, nil, fmt.Errorf("failed getting public key from key name %s: %w", keyName, err) + } + + var consumerAddr sdk.AccAddress + err = consumerAddr.Unmarshal(pubkey.Address()) + if err != nil { + return nil, nil, fmt.Errorf("failed unmarshaling public address for key %s (pubkey: %v): %w", + keyName, pubkey.Address(), err) + } + + return consumerAddr, privKey, nil } // spawns a new RPCConsumer server with all it's processes and internals ready for communications @@ -133,19 +166,40 @@ func (rpcc *RPCConsumer) Start(ctx context.Context, options *rpcConsumerStartOpt } options.refererData.ReferrerClient = metrics.NewConsumerReferrerClient(options.refererData.Address) consumerReportsManager := metrics.NewConsumerReportsClient(options.analyticsServerAddresses.ReportsAddressFlag) - consumerMetricsManager := metrics.NewConsumerMetricsManager(metrics.ConsumerMetricsManagerOptions{NetworkAddress: options.analyticsServerAddresses.MetricsListenAddress, AddMethodsApiGauge: options.analyticsServerAddresses.AddApiMethodCallsMetrics}) // start up prometheus metrics - consumerUsageServeManager := metrics.NewConsumerRelayServerClient(options.analyticsServerAddresses.RelayServerAddress) // start up relay server reporting - var consumerOptimizerQoSClient *metrics.ConsumerOptimizerQoSClient - if options.analyticsServerAddresses.OptimizerQoSAddress != "" { - consumerOptimizerQoSClient = metrics.NewConsumerOptimizerQoSClient(options.analyticsServerAddresses.OptimizerQoSAddress, metrics.OptimizerQosServerPushInterval) // start up optimizer qos client - consumerOptimizerQoSClient.StartOptimizersQoSReportsCollecting(ctx, metrics.OptimizerQosServerSamplingInterval) // start up optimizer qos client + + consumerAddr, privKey, err := getConsumerAddressAndKeys(options.clientCtx) + if err != nil { + utils.LavaFormatFatal("failed to get consumer address and keys", err) } + consumerUsageServeManager := metrics.NewConsumerRelayServerClient(options.analyticsServerAddresses.RelayServerAddress) // start up relay server reporting + var consumerOptimizerQoSClient *metrics.ConsumerOptimizerQoSClient + if options.analyticsServerAddresses.OptimizerQoSAddress != "" || options.analyticsServerAddresses.OptimizerQoSListen { + consumerOptimizerQoSClient = metrics.NewConsumerOptimizerQoSClient(consumerAddr.String(), options.analyticsServerAddresses.OptimizerQoSAddress, options.geoLocation, metrics.OptimizerQosServerPushInterval) // start up optimizer qos client + consumerOptimizerQoSClient.StartOptimizersQoSReportsCollecting(ctx, metrics.OptimizerQosServerSamplingInterval) + } + consumerMetricsManager := metrics.NewConsumerMetricsManager(metrics.ConsumerMetricsManagerOptions{ + NetworkAddress: options.analyticsServerAddresses.MetricsListenAddress, + AddMethodsApiGauge: options.analyticsServerAddresses.AddApiMethodCallsMetrics, + EnableQoSListener: options.analyticsServerAddresses.OptimizerQoSListen, + ConsumerOptimizerQoSClient: consumerOptimizerQoSClient, + }) // start up prometheus metrics rpcConsumerMetrics, err := metrics.NewRPCConsumerLogs(consumerMetricsManager, consumerUsageServeManager, consumerOptimizerQoSClient) if err != nil { utils.LavaFormatFatal("failed creating RPCConsumer logs", err) } + consumerMetricsManager.SetVersion(upgrade.GetCurrentVersion().ConsumerVersion) + var customLavaTransport *CustomLavaTransport + httpClient, err := jsonrpcclient.DefaultHTTPClient(options.clientCtx.NodeURI) + if err == nil { + customLavaTransport = NewCustomLavaTransport(httpClient.Transport, nil) + httpClient.Transport = customLavaTransport + client, err := rpchttp.NewWithClient(options.clientCtx.NodeURI, "/websocket", httpClient) + if err == nil { + options.clientCtx = options.clientCtx.WithClient(client) + } + } // spawn up ConsumerStateTracker lavaChainFetcher := chainlib.NewLavaChainFetcher(ctx, options.clientCtx) @@ -155,27 +209,10 @@ func (rpcc *RPCConsumer) Start(ctx context.Context, options *rpcConsumerStartOpt } rpcc.consumerStateTracker = consumerStateTracker - lavaChainID := options.clientCtx.ChainID - keyName, err := sigs.GetKeyName(options.clientCtx) - if err != nil { - utils.LavaFormatFatal("failed getting key name from clientCtx", err) - } - privKey, err := sigs.GetPrivKey(options.clientCtx, keyName) - if err != nil { - utils.LavaFormatFatal("failed getting private key from key name", err, utils.Attribute{Key: "keyName", Value: keyName}) - } - clientKey, _ := options.clientCtx.Keyring.Key(keyName) + lavaChainFetcher.FetchLatestBlockNum(ctx) - pubkey, err := clientKey.GetPubKey() - if err != nil { - utils.LavaFormatFatal("failed getting public key from key name", err, utils.Attribute{Key: "keyName", Value: keyName}) - } + lavaChainID := options.clientCtx.ChainID - var consumerAddr sdk.AccAddress - err = consumerAddr.Unmarshal(pubkey.Address()) - if err != nil { - utils.LavaFormatFatal("failed unmarshaling public address", err, utils.Attribute{Key: "keyName", Value: keyName}, utils.Attribute{Key: "pubkey", Value: pubkey.Address()}) - } // we want one provider optimizer per chain so we will store them for reuse across rpcEndpoints chainMutexes := map[string]*sync.Mutex{} for _, endpoint := range options.rpcEndpoints { @@ -207,119 +244,26 @@ func (rpcc *RPCConsumer) Start(ctx context.Context, options *rpcConsumerStartOpt for _, rpcEndpoint := range options.rpcEndpoints { go func(rpcEndpoint *lavasession.RPCEndpoint) error { defer wg.Done() - chainParser, err := chainlib.NewChainParser(rpcEndpoint.ApiInterface) - if err != nil { - err = utils.LavaFormatError("failed creating chain parser", err, utils.Attribute{Key: "endpoint", Value: rpcEndpoint}) - errCh <- err - return err - } - chainID := rpcEndpoint.ChainID - // create policyUpdaters per chain - newPolicyUpdater := updaters.NewPolicyUpdater(chainID, consumerStateTracker, consumerAddr.String(), chainParser, *rpcEndpoint) - policyUpdater, ok, err := policyUpdaters.LoadOrStore(chainID, newPolicyUpdater) - if err != nil { - errCh <- err - return utils.LavaFormatError("failed loading or storing policy updater", err, utils.LogAttr("endpoint", rpcEndpoint)) - } - if ok { - err := policyUpdater.AddPolicySetter(chainParser, *rpcEndpoint) - if err != nil { - errCh <- err - return utils.LavaFormatError("failed adding policy setter", err) + rpcConsumerServer, err := rpcc.CreateConsumerEndpoint(ctx, rpcEndpoint, errCh, consumerAddr, consumerStateTracker, + policyUpdaters, optimizers, consumerConsistencies, finalizationConsensuses, chainMutexes, + options, privKey, lavaChainID, rpcConsumerMetrics, consumerReportsManager, consumerOptimizerQoSClient, + consumerMetricsManager, relaysMonitorAggregator) + if err == nil { + if customLavaTransport != nil && statetracker.IsLavaNativeSpec(rpcEndpoint.ChainID) && rpcEndpoint.ApiInterface == spectypes.APIInterfaceTendermintRPC { + // we can add lava over lava to the custom transport as a secondary source + go func() { + ticker := time.NewTicker(100 * time.Millisecond) + defer ticker.Stop() + for range ticker.C { + if rpcConsumerServer.IsInitialized() { + customLavaTransport.SetSecondaryTransport(rpcConsumerServer) + return + } + } + }() } } - - err = statetracker.RegisterForSpecUpdatesOrSetStaticSpec(ctx, chainParser, options.cmdFlags.StaticSpecPath, *rpcEndpoint, rpcc.consumerStateTracker) - if err != nil { - err = utils.LavaFormatError("failed registering for spec updates", err, utils.Attribute{Key: "endpoint", Value: rpcEndpoint}) - errCh <- err - return err - } - - _, averageBlockTime, _, _ := chainParser.ChainBlockStats() - var optimizer *provideroptimizer.ProviderOptimizer - var consumerConsistency *ConsumerConsistency - var finalizationConsensus *finalizationconsensus.FinalizationConsensus - getOrCreateChainAssets := func() error { - // this is locked so we don't race optimizers creation - chainMutexes[chainID].Lock() - defer chainMutexes[chainID].Unlock() - var loaded bool - var err error - - baseLatency := common.AverageWorldLatency / 2 // we want performance to be half our timeout or better - - // Create / Use existing optimizer - newOptimizer := provideroptimizer.NewProviderOptimizer(options.strategy, averageBlockTime, baseLatency, options.maxConcurrentProviders, consumerOptimizerQoSClient, chainID) - optimizer, loaded, err = optimizers.LoadOrStore(chainID, newOptimizer) - if err != nil { - return utils.LavaFormatError("failed loading optimizer", err, utils.LogAttr("endpoint", rpcEndpoint.Key())) - } - - if !loaded { - // if this is a new optimizer, register it in the consumerOptimizerQoSClient - consumerOptimizerQoSClient.RegisterOptimizer(optimizer, chainID) - } - - // Create / Use existing ConsumerConsistency - newConsumerConsistency := NewConsumerConsistency(chainID) - consumerConsistency, _, err = consumerConsistencies.LoadOrStore(chainID, newConsumerConsistency) - if err != nil { - return utils.LavaFormatError("failed loading consumer consistency", err, utils.LogAttr("endpoint", rpcEndpoint.Key())) - } - - // Create / Use existing FinalizationConsensus - newFinalizationConsensus := finalizationconsensus.NewFinalizationConsensus(rpcEndpoint.ChainID) - finalizationConsensus, loaded, err = finalizationConsensuses.LoadOrStore(chainID, newFinalizationConsensus) - if err != nil { - return utils.LavaFormatError("failed loading finalization consensus", err, utils.LogAttr("endpoint", rpcEndpoint.Key())) - } - if !loaded { // when creating new finalization consensus instance we need to register it to updates - consumerStateTracker.RegisterFinalizationConsensusForUpdates(ctx, finalizationConsensus) - } - return nil - } - err = getOrCreateChainAssets() - if err != nil { - errCh <- err - return err - } - - if finalizationConsensus == nil || optimizer == nil || consumerConsistency == nil { - err = utils.LavaFormatError("failed getting assets, found a nil", nil, utils.Attribute{Key: "endpoint", Value: rpcEndpoint.Key()}) - errCh <- err - return err - } - - // Create active subscription provider storage for each unique chain - activeSubscriptionProvidersStorage := lavasession.NewActiveSubscriptionProvidersStorage() - consumerSessionManager := lavasession.NewConsumerSessionManager(rpcEndpoint, optimizer, consumerMetricsManager, consumerReportsManager, consumerAddr.String(), activeSubscriptionProvidersStorage) - // Register For Updates - rpcc.consumerStateTracker.RegisterConsumerSessionManagerForPairingUpdates(ctx, consumerSessionManager, options.staticProvidersList) - - var relaysMonitor *metrics.RelaysMonitor - if options.cmdFlags.RelaysHealthEnableFlag { - relaysMonitor = metrics.NewRelaysMonitor(options.cmdFlags.RelaysHealthIntervalFlag, rpcEndpoint.ChainID, rpcEndpoint.ApiInterface) - relaysMonitorAggregator.RegisterRelaysMonitor(rpcEndpoint.String(), relaysMonitor) - } - - rpcConsumerServer := &RPCConsumerServer{} - - var consumerWsSubscriptionManager *chainlib.ConsumerWSSubscriptionManager - var specMethodType string - if rpcEndpoint.ApiInterface == spectypes.APIInterfaceJsonRPC { - specMethodType = http.MethodPost - } - consumerWsSubscriptionManager = chainlib.NewConsumerWSSubscriptionManager(consumerSessionManager, rpcConsumerServer, options.refererData, specMethodType, chainParser, activeSubscriptionProvidersStorage, consumerMetricsManager) - - utils.LavaFormatInfo("RPCConsumer Listening", utils.Attribute{Key: "endpoints", Value: rpcEndpoint.String()}) - err = rpcConsumerServer.ServeRPCRequests(ctx, rpcEndpoint, rpcc.consumerStateTracker, chainParser, finalizationConsensus, consumerSessionManager, options.requiredResponses, privKey, lavaChainID, options.cache, rpcConsumerMetrics, consumerAddr, consumerConsistency, relaysMonitor, options.cmdFlags, options.stateShare, options.refererData, consumerReportsManager, consumerWsSubscriptionManager) - if err != nil { - err = utils.LavaFormatError("failed serving rpc requests", err, utils.Attribute{Key: "endpoint", Value: rpcEndpoint}) - errCh <- err - return err - } - return nil + return err }(rpcEndpoint) } @@ -355,6 +299,141 @@ func (rpcc *RPCConsumer) Start(ctx context.Context, options *rpcConsumerStartOpt return nil } +func (rpcc *RPCConsumer) CreateConsumerEndpoint( + ctx context.Context, + rpcEndpoint *lavasession.RPCEndpoint, + errCh chan error, + consumerAddr sdk.AccAddress, + consumerStateTracker *statetracker.ConsumerStateTracker, + policyUpdaters *common.SafeSyncMap[string, *updaters.PolicyUpdater], + optimizers *common.SafeSyncMap[string, *provideroptimizer.ProviderOptimizer], + consumerConsistencies *common.SafeSyncMap[string, *ConsumerConsistency], + finalizationConsensuses *common.SafeSyncMap[string, *finalizationconsensus.FinalizationConsensus], + chainMutexes map[string]*sync.Mutex, + options *rpcConsumerStartOptions, + privKey *secp256k1.PrivateKey, + lavaChainID string, + rpcConsumerMetrics *metrics.RPCConsumerLogs, + consumerReportsManager *metrics.ConsumerReportsClient, + consumerOptimizerQoSClient *metrics.ConsumerOptimizerQoSClient, + consumerMetricsManager *metrics.ConsumerMetricsManager, + relaysMonitorAggregator *metrics.RelaysMonitorAggregator, +) (*RPCConsumerServer, error) { + chainParser, err := chainlib.NewChainParser(rpcEndpoint.ApiInterface) + if err != nil { + err = utils.LavaFormatError("failed creating chain parser", err, utils.Attribute{Key: "endpoint", Value: rpcEndpoint}) + errCh <- err + return nil, err + } + chainID := rpcEndpoint.ChainID + // create policyUpdaters per chain + newPolicyUpdater := updaters.NewPolicyUpdater(chainID, consumerStateTracker, consumerAddr.String(), chainParser, *rpcEndpoint) + policyUpdater, ok, err := policyUpdaters.LoadOrStore(chainID, newPolicyUpdater) + if err != nil { + errCh <- err + return nil, utils.LavaFormatError("failed loading or storing policy updater", err, utils.LogAttr("endpoint", rpcEndpoint)) + } + if ok { + err := policyUpdater.AddPolicySetter(chainParser, *rpcEndpoint) + if err != nil { + errCh <- err + return nil, utils.LavaFormatError("failed adding policy setter", err) + } + } + + err = statetracker.RegisterForSpecUpdatesOrSetStaticSpec(ctx, chainParser, options.cmdFlags.StaticSpecPath, *rpcEndpoint, rpcc.consumerStateTracker) + if err != nil { + err = utils.LavaFormatError("failed registering for spec updates", err, utils.Attribute{Key: "endpoint", Value: rpcEndpoint}) + errCh <- err + return nil, err + } + + _, averageBlockTime, _, _ := chainParser.ChainBlockStats() + var optimizer *provideroptimizer.ProviderOptimizer + var consumerConsistency *ConsumerConsistency + var finalizationConsensus *finalizationconsensus.FinalizationConsensus + getOrCreateChainAssets := func() error { + // this is locked so we don't race optimizers creation + chainMutexes[chainID].Lock() + defer chainMutexes[chainID].Unlock() + var loaded bool + var err error + + baseLatency := common.AverageWorldLatency / 2 // we want performance to be half our timeout or better + + // Create / Use existing optimizer + newOptimizer := provideroptimizer.NewProviderOptimizer(options.strategy, averageBlockTime, baseLatency, options.maxConcurrentProviders, consumerOptimizerQoSClient, chainID) + optimizer, loaded, err = optimizers.LoadOrStore(chainID, newOptimizer) + if err != nil { + return utils.LavaFormatError("failed loading optimizer", err, utils.LogAttr("endpoint", rpcEndpoint.Key())) + } + + if !loaded { + // if this is a new optimizer, register it in the consumerOptimizerQoSClient + consumerOptimizerQoSClient.RegisterOptimizer(optimizer, chainID) + } + + // Create / Use existing ConsumerConsistency + newConsumerConsistency := NewConsumerConsistency(chainID) + consumerConsistency, _, err = consumerConsistencies.LoadOrStore(chainID, newConsumerConsistency) + if err != nil { + return utils.LavaFormatError("failed loading consumer consistency", err, utils.LogAttr("endpoint", rpcEndpoint.Key())) + } + + // Create / Use existing FinalizationConsensus + newFinalizationConsensus := finalizationconsensus.NewFinalizationConsensus(rpcEndpoint.ChainID) + finalizationConsensus, loaded, err = finalizationConsensuses.LoadOrStore(chainID, newFinalizationConsensus) + if err != nil { + return utils.LavaFormatError("failed loading finalization consensus", err, utils.LogAttr("endpoint", rpcEndpoint.Key())) + } + if !loaded { // when creating new finalization consensus instance we need to register it to updates + consumerStateTracker.RegisterFinalizationConsensusForUpdates(ctx, finalizationConsensus) + } + return nil + } + err = getOrCreateChainAssets() + if err != nil { + errCh <- err + return nil, err + } + + if finalizationConsensus == nil || optimizer == nil || consumerConsistency == nil { + err = utils.LavaFormatError("failed getting assets, found a nil", nil, utils.Attribute{Key: "endpoint", Value: rpcEndpoint.Key()}) + errCh <- err + return nil, err + } + + // Create active subscription provider storage for each unique chain + activeSubscriptionProvidersStorage := lavasession.NewActiveSubscriptionProvidersStorage() + consumerSessionManager := lavasession.NewConsumerSessionManager(rpcEndpoint, optimizer, consumerMetricsManager, consumerReportsManager, consumerAddr.String(), activeSubscriptionProvidersStorage) + // Register For Updates + rpcc.consumerStateTracker.RegisterConsumerSessionManagerForPairingUpdates(ctx, consumerSessionManager, options.staticProvidersList) + + var relaysMonitor *metrics.RelaysMonitor + if options.cmdFlags.RelaysHealthEnableFlag { + relaysMonitor = metrics.NewRelaysMonitor(options.cmdFlags.RelaysHealthIntervalFlag, rpcEndpoint.ChainID, rpcEndpoint.ApiInterface) + relaysMonitorAggregator.RegisterRelaysMonitor(rpcEndpoint.String(), relaysMonitor) + } + + rpcConsumerServer := &RPCConsumerServer{} + + var consumerWsSubscriptionManager *chainlib.ConsumerWSSubscriptionManager + var specMethodType string + if rpcEndpoint.ApiInterface == spectypes.APIInterfaceJsonRPC { + specMethodType = http.MethodPost + } + consumerWsSubscriptionManager = chainlib.NewConsumerWSSubscriptionManager(consumerSessionManager, rpcConsumerServer, options.refererData, specMethodType, chainParser, activeSubscriptionProvidersStorage, consumerMetricsManager) + + utils.LavaFormatInfo("RPCConsumer Listening", utils.Attribute{Key: "endpoints", Value: rpcEndpoint.String()}) + err = rpcConsumerServer.ServeRPCRequests(ctx, rpcEndpoint, rpcc.consumerStateTracker, chainParser, finalizationConsensus, consumerSessionManager, options.requiredResponses, privKey, lavaChainID, options.cache, rpcConsumerMetrics, consumerAddr, consumerConsistency, relaysMonitor, options.cmdFlags, options.stateShare, options.refererData, consumerReportsManager, consumerWsSubscriptionManager) + if err != nil { + err = utils.LavaFormatError("failed serving rpc requests", err, utils.Attribute{Key: "endpoint", Value: rpcEndpoint}) + errCh <- err + return nil, err + } + return rpcConsumerServer, nil +} + func ParseEndpoints(viper_endpoints *viper.Viper, geolocation uint64) (endpoints []*lavasession.RPCEndpoint, err error) { err = viper_endpoints.UnmarshalKey(common.EndpointsConfigName, &endpoints) if err != nil { @@ -557,6 +636,7 @@ rpcconsumer consumer_examples/full_consumer_example.yml --cache-be "127.0.0.1:77 RelayServerAddress: viper.GetString(metrics.RelayServerFlagName), ReportsAddressFlag: viper.GetString(reportsSendBEAddress), OptimizerQoSAddress: viper.GetString(common.OptimizerQosServerAddressFlag), + OptimizerQoSListen: viper.GetBool(common.OptimizerQosListenFlag), } var refererData *chainlib.RefererData @@ -586,6 +666,43 @@ rpcconsumer consumer_examples/full_consumer_example.yml --cache-be "127.0.0.1:77 utils.LavaFormatFatal("offline spec modifications are supported only in single chain bootstrapping", nil, utils.LogAttr("len(rpcEndpoints)", len(rpcEndpoints)), utils.LogAttr("rpcEndpoints", rpcEndpoints)) } + if viper.GetBool(LavaOverLavaBackupFlagName) { + additionalEndpoint := func() *lavasession.RPCEndpoint { + for _, endpoint := range rpcEndpoints { + if statetracker.IsLavaNativeSpec(endpoint.ChainID) { + // native spec already exists, no need to add + return nil + } + } + // need to add an endpoint for the native lava chain + if strings.Contains(networkChainId, "mainnet") { + return &lavasession.RPCEndpoint{ + NetworkAddress: chainlib.INTERNAL_ADDRESS, + ChainID: statetracker.MAINNET_SPEC, + ApiInterface: spectypes.APIInterfaceTendermintRPC, + } + } else if strings.Contains(networkChainId, "testnet") { + return &lavasession.RPCEndpoint{ + NetworkAddress: chainlib.INTERNAL_ADDRESS, + ChainID: statetracker.TESTNET_SPEC, + ApiInterface: spectypes.APIInterfaceTendermintRPC, + } + } else if strings.Contains(networkChainId, "testnet") || networkChainId == "lava" { + return &lavasession.RPCEndpoint{ + NetworkAddress: chainlib.INTERNAL_ADDRESS, + ChainID: statetracker.TESTNET_SPEC, + ApiInterface: spectypes.APIInterfaceTendermintRPC, + } + } + utils.LavaFormatError("could not find a native lava chain for the current network", nil, utils.LogAttr("networkChainId", networkChainId)) + return nil + }() + if additionalEndpoint != nil { + utils.LavaFormatInfo("Lava over Lava backup is enabled", utils.Attribute{Key: "additionalEndpoint", Value: additionalEndpoint.ChainID}) + rpcEndpoints = append(rpcEndpoints, additionalEndpoint) + } + } + rpcConsumerSharedState := viper.GetBool(common.SharedStateFlag) err = rpcConsumer.Start(ctx, &rpcConsumerStartOptions{ txFactory, @@ -600,6 +717,7 @@ rpcconsumer consumer_examples/full_consumer_example.yml --cache-be "127.0.0.1:77 rpcConsumerSharedState, refererData, staticProviderEndpoints, + geolocation, }) return err }, @@ -646,10 +764,15 @@ rpcconsumer consumer_examples/full_consumer_example.yml --cache-be "127.0.0.1:77 cmdRPCConsumer.Flags().IntVar(&provideroptimizer.OptimizerNumTiers, common.SetProviderOptimizerNumberOfTiersToCreate, 4, "set the number of groups to create, default is 4") // optimizer qos reports cmdRPCConsumer.Flags().String(common.OptimizerQosServerAddressFlag, "", "address to send optimizer qos reports to") + cmdRPCConsumer.Flags().Bool(common.OptimizerQosListenFlag, false, "enable listening for optimizer qos reports on metrics endpoint i.e GET -> localhost:7779/provider_optimizer_metrics") cmdRPCConsumer.Flags().DurationVar(&metrics.OptimizerQosServerPushInterval, common.OptimizerQosServerPushIntervalFlag, time.Minute*5, "interval to push optimizer qos reports") cmdRPCConsumer.Flags().DurationVar(&metrics.OptimizerQosServerSamplingInterval, common.OptimizerQosServerSamplingIntervalFlag, time.Second*1, "interval to sample optimizer qos reports") cmdRPCConsumer.Flags().IntVar(&chainlib.WebSocketRateLimit, common.RateLimitWebSocketFlag, chainlib.WebSocketRateLimit, "rate limit (per second) websocket requests per user connection, default is unlimited") + cmdRPCConsumer.Flags().Int64Var(&chainlib.MaximumNumberOfParallelWebsocketConnectionsPerIp, common.LimitParallelWebsocketConnectionsPerIpFlag, chainlib.MaximumNumberOfParallelWebsocketConnectionsPerIp, "limit number of parallel connections to websocket, per ip, default is unlimited (0)") + cmdRPCConsumer.Flags().Int64Var(&chainlib.MaxIdleTimeInSeconds, common.LimitWebsocketIdleTimeFlag, chainlib.MaxIdleTimeInSeconds, "limit the idle time in seconds for a websocket connection, default is 20 minutes ( 20 * 60 )") cmdRPCConsumer.Flags().DurationVar(&chainlib.WebSocketBanDuration, common.BanDurationForWebsocketRateLimitExceededFlag, chainlib.WebSocketBanDuration, "once websocket rate limit is reached, user will be banned Xfor a duration, default no ban") + cmdRPCConsumer.Flags().Bool(LavaOverLavaBackupFlagName, true, "enable lava over lava backup to regular rpc calls") + cmdRPCConsumer.Flags().BoolVar(&chainlib.AllowMissingApisByDefault, common.AllowMissingApisByDefaultFlagName, true, "allows missing apis to be proxied to the provider by default, set flase to block missing apis in the spec") common.AddRollingLogConfig(cmdRPCConsumer) return cmdRPCConsumer } diff --git a/protocol/rpcconsumer/rpcconsumer_server.go b/protocol/rpcconsumer/rpcconsumer_server.go index 4b3612dd1d..5204d4adce 100644 --- a/protocol/rpcconsumer/rpcconsumer_server.go +++ b/protocol/rpcconsumer/rpcconsumer_server.go @@ -4,9 +4,11 @@ import ( "context" "errors" "fmt" + "net/http" "strconv" "strings" "sync" + "sync/atomic" "time" "github.com/goccy/go-json" @@ -77,6 +79,7 @@ type RPCConsumerServer struct { chainListener chainlib.ChainListener connectedSubscriptionsLock sync.RWMutex relayRetriesManager *lavaprotocol.RelayRetriesManager + initialized atomic.Bool } type relayResponse struct { @@ -166,8 +169,11 @@ func (rpccs *RPCConsumerServer) sendCraftedRelaysWrapper(initialRelays bool) (bo // Only start after everything is initialized - check consumer session manager rpccs.waitForPairing() } - - return rpccs.sendCraftedRelays(MaxRelayRetries, initialRelays) + success, err := rpccs.sendCraftedRelays(MaxRelayRetries, initialRelays) + if success { + rpccs.initialized.Store(true) + } + return success, err } func (rpccs *RPCConsumerServer) waitForPairing() { @@ -246,11 +252,11 @@ func (rpccs *RPCConsumerServer) sendRelayWithRetries(ctx context.Context, retrie usedProvidersResets++ relayProcessor.GetUsedProviders().ClearUnwanted() } - err = rpccs.sendRelayToProvider(ctx, protocolMessage, relayProcessor, nil) + err = rpccs.sendRelayToProvider(ctx, GetEmptyRelayState(ctx, protocolMessage), relayProcessor, nil) if lavasession.PairingListEmptyError.Is(err) { // we don't have pairings anymore, could be related to unwanted providers relayProcessor.GetUsedProviders().ClearUnwanted() - err = rpccs.sendRelayToProvider(ctx, protocolMessage, relayProcessor, nil) + err = rpccs.sendRelayToProvider(ctx, GetEmptyRelayState(ctx, protocolMessage), relayProcessor, nil) } if err != nil { utils.LavaFormatError("[-] failed sending init relay", err, []utils.Attribute{{Key: "chainID", Value: rpccs.listenEndpoint.ChainID}, {Key: "APIInterface", Value: rpccs.listenEndpoint.ApiInterface}, {Key: "relayProcessor", Value: relayProcessor}}...) @@ -326,7 +332,7 @@ func (rpccs *RPCConsumerServer) SendRelay( analytics *metrics.RelayMetrics, metadata []pairingtypes.Metadata, ) (relayResult *common.RelayResult, errRet error) { - protocolMessage, err := rpccs.ParseRelay(ctx, url, req, connectionType, dappID, consumerIp, analytics, metadata) + protocolMessage, err := rpccs.ParseRelay(ctx, url, req, connectionType, dappID, consumerIp, metadata) if err != nil { return nil, err } @@ -341,7 +347,6 @@ func (rpccs *RPCConsumerServer) ParseRelay( connectionType string, dappID string, consumerIp string, - analytics *metrics.RelayMetrics, metadata []pairingtypes.Metadata, ) (protocolMessage chainlib.ProtocolMessage, err error) { // gets the relay request data from the ChainListener @@ -438,12 +443,15 @@ func (rpccs *RPCConsumerServer) ProcessRelaySend(ctx context.Context, protocolMe NewRelayStateMachine(ctx, usedProviders, rpccs, protocolMessage, analytics, rpccs.debugRelays, rpccs.rpcConsumerLogs), ) - relayTaskChannel := relayProcessor.GetRelayTaskChannel() + relayTaskChannel, err := relayProcessor.GetRelayTaskChannel() + if err != nil { + return relayProcessor, err + } for task := range relayTaskChannel { if task.IsDone() { return relayProcessor, task.err } - err := rpccs.sendRelayToProvider(ctx, task.protocolMessage, relayProcessor, task.analytics) + err := rpccs.sendRelayToProvider(ctx, task.relayState, relayProcessor, task.analytics) relayProcessor.UpdateBatch(err) } @@ -469,9 +477,111 @@ func (rpccs *RPCConsumerServer) CancelSubscriptionContext(subscriptionKey string } } +func (rpccs *RPCConsumerServer) getEarliestBlockHashRequestedFromCacheReply(cacheReply *pairingtypes.CacheRelayReply) (int64, int64) { + blocksHashesToHeights := cacheReply.GetBlocksHashesToHeights() + earliestRequestedBlock := spectypes.NOT_APPLICABLE + latestRequestedBlock := spectypes.NOT_APPLICABLE + + for _, blockHashToHeight := range blocksHashesToHeights { + if blockHashToHeight.Height >= 0 && (earliestRequestedBlock == spectypes.NOT_APPLICABLE || blockHashToHeight.Height < earliestRequestedBlock) { + earliestRequestedBlock = blockHashToHeight.Height + } + if blockHashToHeight.Height >= 0 && (latestRequestedBlock == spectypes.NOT_APPLICABLE || blockHashToHeight.Height > latestRequestedBlock) { + latestRequestedBlock = blockHashToHeight.Height + } + } + return latestRequestedBlock, earliestRequestedBlock +} + +func (rpccs *RPCConsumerServer) resolveRequestedBlock(reqBlock int64, seenBlock int64, latestBlockHashRequested int64, protocolMessage chainlib.ProtocolMessage) int64 { + if reqBlock == spectypes.LATEST_BLOCK && seenBlock != 0 { + // make optimizer select a provider that is likely to have the latest seen block + reqBlock = seenBlock + } + + // Following logic to set the requested block as a new value fetched from the cache reply. + // 1. We managed to get a value from the cache reply. (latestBlockHashRequested >= 0) + // 2. We didn't manage to parse the block and used the default value meaning we didnt have knowledge of the requested block (reqBlock == spectypes.LATEST_BLOCK && protocolMessage.GetUsedDefaultValue()) + // 3. The requested block is smaller than the latest block hash requested from the cache reply (reqBlock >= 0 && reqBlock < latestBlockHashRequested) + // 4. The requested block is not applicable meaning block parsing failed completely (reqBlock == spectypes.NOT_APPLICABLE) + if latestBlockHashRequested >= 0 && + ((reqBlock == spectypes.LATEST_BLOCK && protocolMessage.GetUsedDefaultValue()) || + reqBlock >= 0 && reqBlock < latestBlockHashRequested) { + reqBlock = latestBlockHashRequested + } + return reqBlock +} + +func (rpccs *RPCConsumerServer) updateBlocksHashesToHeightsIfNeeded(extensions []*spectypes.Extension, chainMessage chainlib.ChainMessage, blockHashesToHeights []*pairingtypes.BlockHashToHeight, latestBlock int64, finalized bool, relayState *RelayState) ([]*pairingtypes.BlockHashToHeight, bool) { + // This function will add the requested block hash with the height of the block that will force it to be archive on the following conditions: + // 1. The current extension is archive. + // 2. The user requested a single block hash. + // 3. The archive extension rule is set + + // Adding to cache only if we upgraded to archive, meaning normal relay didn't work and archive did. + // It is safe to assume in most cases this hash should be used with archive. + // And if it is not, it will only increase archive load but wont result in user errors. + // After the finalized cache duration it will be reset until next time. + if relayState == nil { + return blockHashesToHeights, finalized + } + if !relayState.GetIsUpgraded() { + if relayState.GetIsEarliestUsed() && relayState.GetIsArchive() && chainMessage.GetUsedDefaultValue() { + finalized = true + } + return blockHashesToHeights, finalized + } + + isArchiveRelay := false + var rule *spectypes.Rule + for _, extension := range extensions { + if extension.Name == extensionslib.ArchiveExtension { + isArchiveRelay = true + rule = extension.Rule + break + } + } + requestedBlocksHashes := chainMessage.GetRequestedBlocksHashes() + isUserRequestedSingleBlocksHashes := len(requestedBlocksHashes) == 1 + + if isArchiveRelay && isUserRequestedSingleBlocksHashes && rule != nil { + ruleBlock := int64(rule.Block) + if ruleBlock >= 0 { + height := latestBlock - ruleBlock - 1 + if height < 0 { + height = 0 + } + blockHashesToHeights = append(blockHashesToHeights, &pairingtypes.BlockHashToHeight{ + Hash: requestedBlocksHashes[0], + Height: height, + }) + // we can assume this result is finalized. + finalized = true + } + } + + return blockHashesToHeights, finalized +} + +func (rpccs *RPCConsumerServer) newBlocksHashesToHeightsSliceFromRequestedBlockHashes(requestedBlockHashes []string) []*pairingtypes.BlockHashToHeight { + var blocksHashesToHeights []*pairingtypes.BlockHashToHeight + for _, blockHash := range requestedBlockHashes { + blocksHashesToHeights = append(blocksHashesToHeights, &pairingtypes.BlockHashToHeight{Hash: blockHash, Height: spectypes.NOT_APPLICABLE}) + } + return blocksHashesToHeights +} + +func (rpccs *RPCConsumerServer) newBlocksHashesToHeightsSliceFromFinalizationConsensus(finalizedBlockHashes map[int64]string) []*pairingtypes.BlockHashToHeight { + var blocksHashesToHeights []*pairingtypes.BlockHashToHeight + for height, blockHash := range finalizedBlockHashes { + blocksHashesToHeights = append(blocksHashesToHeights, &pairingtypes.BlockHashToHeight{Hash: blockHash, Height: height}) + } + return blocksHashesToHeights +} + func (rpccs *RPCConsumerServer) sendRelayToProvider( ctx context.Context, - protocolMessage chainlib.ProtocolMessage, + relayState *RelayState, relayProcessor *RelayProcessor, analytics *metrics.RelayMetrics, ) (errRet error) { @@ -486,6 +596,7 @@ func (rpccs *RPCConsumerServer) sendRelayToProvider( // if necessary send detection tx for hashes consensus mismatch // handle QoS updates // in case connection totally fails, update unresponsive providers in ConsumerSessionManager + protocolMessage := relayState.GetProtocolMessage() userData := protocolMessage.GetUserData() var sharedStateId string // defaults to "", if shared state is disabled then no shared state will be used. if rpccs.sharedState { @@ -500,6 +611,8 @@ func (rpccs *RPCConsumerServer) sendRelayToProvider( reqBlock, _ := protocolMessage.RequestedBlock() // try using cache before sending relay + earliestBlockHashRequested := spectypes.NOT_APPLICABLE + latestBlockHashRequested := spectypes.NOT_APPLICABLE var cacheError error if rpccs.cache.CacheActive() { // use cache only if its defined. if !protocolMessage.GetForceCacheRefresh() { // don't use cache if user specified @@ -511,14 +624,15 @@ func (rpccs *RPCConsumerServer) sendRelayToProvider( } else { cacheCtx, cancel := context.WithTimeout(ctx, common.CacheTimeout) cacheReply, cacheError = rpccs.cache.GetEntry(cacheCtx, &pairingtypes.RelayCacheGet{ - RequestHash: hashKey, - RequestedBlock: reqBlock, - ChainId: chainId, - BlockHash: nil, - Finalized: false, - SharedStateId: sharedStateId, - SeenBlock: protocolMessage.RelayPrivateData().SeenBlock, - }) // caching in the portal doesn't care about hashes, and we don't have data on finalization yet + RequestHash: hashKey, + RequestedBlock: reqBlock, + ChainId: chainId, + BlockHash: nil, + Finalized: false, + SharedStateId: sharedStateId, + SeenBlock: protocolMessage.RelayPrivateData().SeenBlock, + BlocksHashesToHeights: rpccs.newBlocksHashesToHeightsSliceFromRequestedBlockHashes(protocolMessage.GetRequestedBlocksHashes()), + }) // caching in the consumer doesn't care about hashes, and we don't have data on finalization yet cancel() reply := cacheReply.GetReply() @@ -553,6 +667,8 @@ func (rpccs *RPCConsumerServer) sendRelayToProvider( }) return nil } + latestBlockHashRequested, earliestBlockHashRequested = rpccs.getEarliestBlockHashRequestedFromCacheReply(cacheReply) + utils.LavaFormatTrace("[Archive Debug] Reading block hashes from cache", utils.LogAttr("latestBlockHashRequested", latestBlockHashRequested), utils.LogAttr("earliestBlockHashRequested", earliestBlockHashRequested)) // cache failed, move on to regular relay if performance.NotConnectedError.Is(cacheError) { utils.LavaFormatDebug("cache not connected", utils.LogAttr("error", cacheError)) @@ -564,14 +680,15 @@ func (rpccs *RPCConsumerServer) sendRelayToProvider( } } - if reqBlock == spectypes.LATEST_BLOCK && protocolMessage.RelayPrivateData().SeenBlock != 0 { - // make optimizer select a provider that is likely to have the latest seen block - reqBlock = protocolMessage.RelayPrivateData().SeenBlock - } + addon := chainlib.GetAddon(protocolMessage) + reqBlock = rpccs.resolveRequestedBlock(reqBlock, protocolMessage.RelayPrivateData().SeenBlock, latestBlockHashRequested, protocolMessage) + // check whether we need a new protocol message with the new earliest block hash requested + protocolMessage = rpccs.updateProtocolMessageIfNeededWithNewEarliestData(ctx, relayState, protocolMessage, earliestBlockHashRequested, addon) + // consumerEmergencyTracker always use latest virtual epoch virtualEpoch := rpccs.consumerTxSender.GetLatestVirtualEpoch() - addon := chainlib.GetAddon(protocolMessage) extensions := protocolMessage.GetExtensions() + utils.LavaFormatTrace("[Archive Debug] Extensions to send", utils.LogAttr("extensions", extensions)) usedProviders := relayProcessor.GetUsedProviders() sessions, err := rpccs.consumerSessionManager.GetSessions(ctx, chainlib.GetComputeUnits(protocolMessage), usedProviders, reqBlock, addon, extensions, chainlib.GetStateful(protocolMessage), virtualEpoch) if err != nil { @@ -693,7 +810,6 @@ func (rpccs *RPCConsumerServer) sendRelayToProvider( utils.LogAttr("Provider", providerPublicAddress), ) } - return } @@ -774,6 +890,7 @@ func (rpccs *RPCConsumerServer) sendRelayToProvider( requestedBlock := localRelayResult.Request.RelayData.RequestBlock // get requested block before removing it from the data seenBlock := localRelayResult.Request.RelayData.SeenBlock // get seen block before removing it from the data hashKey, _, hashErr := chainlib.HashCacheRequest(localRelayResult.Request.RelayData, chainId) // get the hash (this changes the data) + finalizedBlockHashes := localRelayResult.Reply.FinalizedBlocksHashes go func() { // deal with copying error. @@ -789,22 +906,40 @@ func (rpccs *RPCConsumerServer) sendRelayToProvider( return } + blockHashesToHeights := make([]*pairingtypes.BlockHashToHeight, 0) + + var finalizedBlockHashesObj map[int64]string + err := json.Unmarshal(finalizedBlockHashes, &finalizedBlockHashesObj) + if err != nil { + utils.LavaFormatError("failed unmarshalling finalizedBlockHashes", err, + utils.LogAttr("GUID", ctx), + utils.LogAttr("finalizedBlockHashes", finalizedBlockHashes), + utils.LogAttr("providerAddr", providerPublicAddress), + ) + } else { + blockHashesToHeights = rpccs.newBlocksHashesToHeightsSliceFromFinalizationConsensus(finalizedBlockHashesObj) + } + var finalized bool + blockHashesToHeights, finalized = rpccs.updateBlocksHashesToHeightsIfNeeded(extensions, protocolMessage, blockHashesToHeights, latestBlock, localRelayResult.Finalized, relayState) + utils.LavaFormatTrace("[Archive Debug] Adding HASH TO CACHE", utils.LogAttr("blockHashesToHeights", blockHashesToHeights)) + new_ctx := context.Background() new_ctx, cancel := context.WithTimeout(new_ctx, common.DataReliabilityTimeoutIncrease) defer cancel() _, averageBlockTime, _, _ := rpccs.chainParser.ChainBlockStats() err2 := rpccs.cache.SetEntry(new_ctx, &pairingtypes.RelayCacheSet{ - RequestHash: hashKey, - ChainId: chainId, - RequestedBlock: requestedBlock, - SeenBlock: seenBlock, - BlockHash: nil, // consumer cache doesn't care about block hashes - Response: copyReply, - Finalized: localRelayResult.Finalized, - OptionalMetadata: nil, - SharedStateId: sharedStateId, - AverageBlockTime: int64(averageBlockTime), // by using average block time we can set longer TTL - IsNodeError: isNodeError, + RequestHash: hashKey, + ChainId: chainId, + RequestedBlock: requestedBlock, + SeenBlock: seenBlock, + BlockHash: nil, // consumer cache doesn't care about block hashes + Response: copyReply, + Finalized: finalized, + OptionalMetadata: nil, + SharedStateId: sharedStateId, + AverageBlockTime: int64(averageBlockTime), // by using average block time we can set longer TTL + IsNodeError: isNodeError, + BlocksHashesToHeights: blockHashesToHeights, }) if err2 != nil { utils.LavaFormatWarning("error updating cache with new entry", err2) @@ -838,6 +973,10 @@ func (rpccs *RPCConsumerServer) relayInner(ctx context.Context, singleConsumerSe utils.LavaFormatTrace("Sending relay to provider", utils.LogAttr("GUID", ctx), utils.LogAttr("lbUniqueId", singleConsumerSession.EndpointConnection.GetLbUniqueId()), + utils.LogAttr("providerAddress", providerPublicAddress), + utils.LogAttr("requestBlock", relayResult.Request.RelayData.RequestBlock), + utils.LogAttr("seenBlock", relayResult.Request.RelayData.SeenBlock), + utils.LogAttr("extensions", relayResult.Request.RelayData.Extensions), ) connectCtx = metadata.NewOutgoingContext(connectCtx, metadataAdd) defer connectCtxCancel() @@ -1167,7 +1306,7 @@ func (rpccs *RPCConsumerServer) sendDataReliabilityRelayIfApplicable(ctx context rpccs.relayRetriesManager, NewRelayStateMachine(ctx, relayProcessor.usedProviders, rpccs, dataReliabilityProtocolMessage, nil, rpccs.debugRelays, rpccs.rpcConsumerLogs), ) - err := rpccs.sendRelayToProvider(ctx, dataReliabilityProtocolMessage, relayProcessorDataReliability, nil) + err := rpccs.sendRelayToProvider(ctx, GetEmptyRelayState(ctx, dataReliabilityProtocolMessage), relayProcessorDataReliability, nil) if err != nil { return utils.LavaFormatWarning("failed data reliability relay to provider", err, utils.LogAttr("relayProcessorDataReliability", relayProcessorDataReliability)) } @@ -1356,6 +1495,12 @@ func (rpccs *RPCConsumerServer) appendHeadersToRelayResult(ctx context.Context, directiveHeaders := protocolMessage.GetDirectiveHeaders() _, debugRelays := directiveHeaders[common.LAVA_DEBUG_RELAY] if debugRelays { + metadataReply = append(metadataReply, + pairingtypes.Metadata{ + Name: common.REQUESTED_BLOCK_HEADER_NAME, + Value: strconv.FormatInt(protocolMessage.RelayPrivateData().GetRequestBlock(), 10), + }) + routerKey := lavasession.NewRouterKeyFromExtensions(protocolMessage.GetExtensions()) erroredProviders := relayProcessor.GetUsedProviders().GetErroredProviders(routerKey) if len(erroredProviders) > 0 { @@ -1415,3 +1560,59 @@ func (rpccs *RPCConsumerServer) appendHeadersToRelayResult(ctx context.Context, func (rpccs *RPCConsumerServer) IsHealthy() bool { return rpccs.relaysMonitor.IsHealthy() } + +func (rpccs *RPCConsumerServer) IsInitialized() bool { + if rpccs == nil { + return false + } + + return rpccs.initialized.Load() +} + +func (rpccs *RPCConsumerServer) RoundTrip(req *http.Request) (*http.Response, error) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + guid := utils.GenerateUniqueIdentifier() + ctx = utils.WithUniqueIdentifier(ctx, guid) + url, data, connectionType, metadata, err := rpccs.chainParser.ExtractDataFromRequest(req) + if err != nil { + return nil, err + } + relayResult, err := rpccs.SendRelay(ctx, url, data, connectionType, "", "", nil, metadata) + if err != nil { + return nil, err + } + resp, err := rpccs.chainParser.SetResponseFromRelayResult(relayResult) + rpccs.rpcConsumerLogs.SetLoLResponse(err == nil) + return resp, err +} + +func (rpccs *RPCConsumerServer) updateProtocolMessageIfNeededWithNewEarliestData( + ctx context.Context, + relayState *RelayState, + protocolMessage chainlib.ProtocolMessage, + earliestBlockHashRequested int64, + addon string, +) chainlib.ProtocolMessage { + if !relayState.GetIsEarliestUsed() && earliestBlockHashRequested != spectypes.NOT_APPLICABLE { + // We got a earliest block data from cache, we need to create a new protocol message with the new earliest block hash parsed + // and update the extension rules with the new earliest block data as it might be archive. + // Setting earliest used to attempt this only once. + relayState.SetIsEarliestUsed() + relayRequestData := protocolMessage.RelayPrivateData() + userData := protocolMessage.GetUserData() + newProtocolMessage, err := rpccs.ParseRelay(ctx, relayRequestData.ApiUrl, string(relayRequestData.Data), relayRequestData.ConnectionType, userData.DappId, userData.ConsumerIp, nil) + if err != nil { + utils.LavaFormatError("Failed copying protocol message in sendRelayToProvider", err) + return protocolMessage + } + + extensionAdded := newProtocolMessage.UpdateEarliestAndValidateExtensionRules(rpccs.chainParser.ExtensionsParser(), earliestBlockHashRequested, addon, relayRequestData.SeenBlock) + if extensionAdded && relayState.CheckIsArchive(newProtocolMessage.RelayPrivateData()) { + relayState.SetIsArchive(true) + } + relayState.SetProtocolMessage(newProtocolMessage) + return newProtocolMessage + } + return protocolMessage +} diff --git a/protocol/rpcprovider/provider_state_machine.go b/protocol/rpcprovider/provider_state_machine.go index 38a98e3e7d..4fd6f022e5 100644 --- a/protocol/rpcprovider/provider_state_machine.go +++ b/protocol/rpcprovider/provider_state_machine.go @@ -20,13 +20,15 @@ type ProviderStateMachine struct { relayRetriesManager lavaprotocol.RelayRetriesManagerInf chainId string relaySender ProviderRelaySender + numberOfRetries int } -func NewProviderStateMachine(chainId string, relayRetriesManager lavaprotocol.RelayRetriesManagerInf, relaySender ProviderRelaySender) *ProviderStateMachine { +func NewProviderStateMachine(chainId string, relayRetriesManager lavaprotocol.RelayRetriesManagerInf, relaySender ProviderRelaySender, numberOfRetries int) *ProviderStateMachine { return &ProviderStateMachine{ relayRetriesManager: relayRetriesManager, chainId: chainId, relaySender: relaySender, + numberOfRetries: numberOfRetries, } } @@ -41,7 +43,7 @@ func (psm *ProviderStateMachine) SendNodeMessage(ctx context.Context, chainMsg c var replyWrapper *chainlib.RelayReplyWrapper var isNodeError bool - for retryAttempt := 0; retryAttempt <= numberOfRetriesAllowedOnNodeErrors; retryAttempt++ { + for retryAttempt := 0; retryAttempt <= psm.numberOfRetries; retryAttempt++ { sendTime := time.Now() replyWrapper, _, _, _, _, err = psm.relaySender.SendNodeMsg(ctx, nil, chainMsg, request.RelayData.Extensions) if err != nil { diff --git a/protocol/rpcprovider/provider_state_machine_test.go b/protocol/rpcprovider/provider_state_machine_test.go index e5791e81a6..7e0cf37b85 100644 --- a/protocol/rpcprovider/provider_state_machine_test.go +++ b/protocol/rpcprovider/provider_state_machine_test.go @@ -25,7 +25,7 @@ func (rs *relaySenderMock) SendNodeMsg(ctx context.Context, ch chan interface{}, func TestStateMachineHappyFlow(t *testing.T) { relaySender := &relaySenderMock{} - stateMachine := NewProviderStateMachine("test", lavaprotocol.NewRelayRetriesManager(), relaySender) + stateMachine := NewProviderStateMachine("test", lavaprotocol.NewRelayRetriesManager(), relaySender, numberOfRetriesAllowedOnNodeErrors) chainMsgMock := chainlib.NewMockChainMessage(gomock.NewController(t)) chainMsgMock. EXPECT(). @@ -50,7 +50,7 @@ func TestStateMachineHappyFlow(t *testing.T) { func TestStateMachineAllFailureFlows(t *testing.T) { relaySender := &relaySenderMock{} - stateMachine := NewProviderStateMachine("test", lavaprotocol.NewRelayRetriesManager(), relaySender) + stateMachine := NewProviderStateMachine("test", lavaprotocol.NewRelayRetriesManager(), relaySender, numberOfRetriesAllowedOnNodeErrors) chainMsgMock := chainlib.NewMockChainMessage(gomock.NewController(t)) returnFalse := false chainMsgMock. @@ -87,7 +87,7 @@ func TestStateMachineAllFailureFlows(t *testing.T) { func TestStateMachineFailureAndRecoveryFlow(t *testing.T) { relaySender := &relaySenderMock{} - stateMachine := NewProviderStateMachine("test", lavaprotocol.NewRelayRetriesManager(), relaySender) + stateMachine := NewProviderStateMachine("test", lavaprotocol.NewRelayRetriesManager(), relaySender, numberOfRetriesAllowedOnNodeErrors) chainMsgMock := chainlib.NewMockChainMessage(gomock.NewController(t)) returnFalse := false chainMsgMock. diff --git a/protocol/rpcprovider/rpcprovider.go b/protocol/rpcprovider/rpcprovider.go index 89a99d5f98..2bdb3d837c 100644 --- a/protocol/rpcprovider/rpcprovider.go +++ b/protocol/rpcprovider/rpcprovider.go @@ -30,6 +30,7 @@ import ( "github.com/lavanet/lava/v4/protocol/statetracker/updaters" "github.com/lavanet/lava/v4/protocol/upgrade" "github.com/lavanet/lava/v4/utils" + "github.com/lavanet/lava/v4/utils/lavaslices" "github.com/lavanet/lava/v4/utils/rand" "github.com/lavanet/lava/v4/utils/sigs" epochstorage "github.com/lavanet/lava/v4/x/epochstorage/types" @@ -218,7 +219,7 @@ func (rpcp *RPCProvider) Start(options *rpcProviderStartOptions) (err error) { utils.LavaFormatInfo("RPCProvider pubkey: " + rpcp.addr.String()) - rpcp.createAndRegisterFreezeUpdatersByOptions(ctx, options.clientCtx, rpcp.addr.String()) + rpcp.createAndRegisterFreezeUpdatersByOptions(ctx, providerStateTracker.StateQuery.StateQuery, rpcp.addr.String()) utils.LavaFormatInfo("RPCProvider setting up endpoints", utils.Attribute{Key: "count", Value: strconv.Itoa(len(options.rpcProviderEndpoints))}) @@ -281,9 +282,8 @@ func (rpcp *RPCProvider) Start(options *rpcProviderStartOptions) (err error) { return nil } -func (rpcp *RPCProvider) createAndRegisterFreezeUpdatersByOptions(ctx context.Context, clientCtx client.Context, publicAddress string) { - queryClient := pairingtypes.NewQueryClient(clientCtx) - freezeJailUpdater := updaters.NewProviderFreezeJailUpdater(queryClient, publicAddress, rpcp.providerMetricsManager) +func (rpcp *RPCProvider) createAndRegisterFreezeUpdatersByOptions(ctx context.Context, stateQuery *updaters.StateQuery, publicAddress string) { + freezeJailUpdater := updaters.NewProviderFreezeJailUpdater(stateQuery, publicAddress, rpcp.providerMetricsManager) rpcp.providerStateTracker.RegisterForEpochUpdates(ctx, freezeJailUpdater) } @@ -355,11 +355,20 @@ func GetAllAddonsAndExtensionsFromNodeUrlSlice(nodeUrls []common.NodeUrl) *Provi return policy } +func GetAllNodeUrlsInternalPaths(nodeUrls []common.NodeUrl) []string { + paths := []string{} + for _, nodeUrl := range nodeUrls { + paths = append(paths, nodeUrl.InternalPath) + } + return paths +} + func (rpcp *RPCProvider) SetupEndpoint(ctx context.Context, rpcProviderEndpoint *lavasession.RPCProviderEndpoint, specValidator *SpecValidator) error { err := rpcProviderEndpoint.Validate() if err != nil { return utils.LavaFormatError("[PANIC] panic severity critical error, aborting support for chain api due to invalid node url definition, continuing with others", err, utils.Attribute{Key: "endpoint", Value: rpcProviderEndpoint.String()}) } + chainID := rpcProviderEndpoint.ChainID apiInterface := rpcProviderEndpoint.ApiInterface providerSessionManager := lavasession.NewProviderSessionManager(rpcProviderEndpoint, rpcp.blockMemorySize) @@ -375,6 +384,18 @@ func (rpcp *RPCProvider) SetupEndpoint(ctx context.Context, rpcProviderEndpoint return utils.LavaFormatError("[PANIC] failed to RegisterForSpecUpdates, panic severity critical error, aborting support for chain api due to invalid chain parser, continuing with others", err, utils.Attribute{Key: "endpoint", Value: rpcProviderEndpoint.String()}) } + // warn if not all internal paths are configured + configuredInternalPaths := GetAllNodeUrlsInternalPaths(rpcProviderEndpoint.NodeUrls) + chainInternalPaths := chainParser.GetAllInternalPaths() + overConfiguredInternalPaths := lavaslices.Difference(configuredInternalPaths, chainInternalPaths) + if len(overConfiguredInternalPaths) > 0 { + utils.LavaFormatWarning("Some configured internal paths are not in the chain's spec", nil, + utils.LogAttr("chainID", chainID), + utils.LogAttr("apiInterface", apiInterface), + utils.LogAttr("internalPaths", strings.Join(overConfiguredInternalPaths, ",")), + ) + } + // after registering for spec updates our chain parser contains the spec and we can add our addons and extensions to allow our provider to function properly providerPolicy := GetAllAddonsAndExtensionsFromNodeUrlSlice(rpcProviderEndpoint.NodeUrls) utils.LavaFormatDebug("supported services for provider", @@ -505,7 +526,7 @@ func (rpcp *RPCProvider) SetupEndpoint(ctx context.Context, rpcProviderEndpoint utils.LavaFormatTrace("Creating provider node subscription manager", utils.LogAttr("rpcProviderEndpoint", rpcProviderEndpoint)) providerNodeSubscriptionManager = chainlib.NewProviderNodeSubscriptionManager(chainRouter, chainParser, rpcProviderServer, rpcp.privKey) } - rpcProviderServer.ServeRPCRequests(ctx, rpcProviderEndpoint, chainParser, rpcp.rewardServer, providerSessionManager, reliabilityManager, rpcp.privKey, rpcp.cache, chainRouter, rpcp.providerStateTracker, rpcp.addr, rpcp.lavaChainID, DEFAULT_ALLOWED_MISSING_CU, providerMetrics, relaysMonitor, providerNodeSubscriptionManager, rpcp.staticProvider, loadManager) + rpcProviderServer.ServeRPCRequests(ctx, rpcProviderEndpoint, chainParser, rpcp.rewardServer, providerSessionManager, reliabilityManager, rpcp.privKey, rpcp.cache, chainRouter, rpcp.providerStateTracker, rpcp.addr, rpcp.lavaChainID, DEFAULT_ALLOWED_MISSING_CU, providerMetrics, relaysMonitor, providerNodeSubscriptionManager, rpcp.staticProvider, loadManager, numberOfRetriesAllowedOnNodeErrors) // set up grpc listener var listener *ProviderListener func() { @@ -808,10 +829,10 @@ rpcprovider 127.0.0.1:3333 OSMOSIS tendermintrpc "wss://www.node-path.com:80,htt cmdRPCProvider.Flags().Duration(common.RelayHealthIntervalFlag, RelayHealthIntervalFlagDefault, "interval between relay health checks") cmdRPCProvider.Flags().String(HealthCheckURLPathFlagName, HealthCheckURLPathFlagDefault, "the url path for the provider's grpc health check") cmdRPCProvider.Flags().DurationVar(&updaters.TimeOutForFetchingLavaBlocks, common.TimeOutForFetchingLavaBlocksFlag, time.Second*5, "setting the timeout for fetching lava blocks") - cmdRPCProvider.Flags().BoolVar(&chainlib.IgnoreSubscriptionNotConfiguredError, chainlib.IgnoreSubscriptionNotConfiguredErrorFlag, chainlib.IgnoreSubscriptionNotConfiguredError, "ignore webSocket node url not configured error, when subscription is enabled in spec") cmdRPCProvider.Flags().IntVar(&numberOfRetriesAllowedOnNodeErrors, common.SetRelayCountOnNodeErrorFlag, 2, "set the number of retries attempt on node errors") cmdRPCProvider.Flags().String(common.UseStaticSpecFlag, "", "load offline spec provided path to spec file, used to test specs before they are proposed on chain, example for spec with inheritance: --use-static-spec ./cookbook/specs/ibc.json,./cookbook/specs/tendermint.json,./cookbook/specs/cosmossdk.json,./cookbook/specs/ethermint.json,./cookbook/specs/ethereum.json,./cookbook/specs/evmos.json") cmdRPCProvider.Flags().Uint64(common.RateLimitRequestPerSecondFlag, 0, "Measuring the load relative to this number for feedback - per second - per chain - default unlimited. Given Y simultaneous relay calls, a value of X and will measure Y/X load rate.") + cmdRPCProvider.Flags().BoolVar(&chainlib.AllowMissingApisByDefault, common.AllowMissingApisByDefaultFlagName, true, "allows missing apis to be proxied to the node by default, set false to block missing apis in the spec, might result in degraded performance if spec is misconfigured") common.AddRollingLogConfig(cmdRPCProvider) return cmdRPCProvider } diff --git a/protocol/rpcprovider/rpcprovider_server.go b/protocol/rpcprovider/rpcprovider_server.go index 4ca070c3b3..ab0e15559d 100644 --- a/protocol/rpcprovider/rpcprovider_server.go +++ b/protocol/rpcprovider/rpcprovider_server.go @@ -114,6 +114,7 @@ func (rpcps *RPCProviderServer) ServeRPCRequests( providerNodeSubscriptionManager *chainlib.ProviderNodeSubscriptionManager, staticProvider bool, providerLoadManager *ProviderLoadManager, + numberOfRetries int, ) { rpcps.cache = cache rpcps.chainRouter = chainRouter @@ -135,7 +136,7 @@ func (rpcps *RPCProviderServer) ServeRPCRequests( rpcps.metrics = providerMetrics rpcps.relaysMonitor = relaysMonitor rpcps.providerNodeSubscriptionManager = providerNodeSubscriptionManager - rpcps.providerStateMachine = NewProviderStateMachine(rpcProviderEndpoint.ChainID, lavaprotocol.NewRelayRetriesManager(), chainRouter) + rpcps.providerStateMachine = NewProviderStateMachine(rpcProviderEndpoint.ChainID, lavaprotocol.NewRelayRetriesManager(), chainRouter, numberOfRetries) rpcps.providerLoadManager = providerLoadManager rpcps.initRelaysMonitor(ctx) @@ -328,7 +329,13 @@ func (rpcps *RPCProviderServer) initRelay(ctx context.Context, request *pairingt } // we only need the chainMessage for a static provider if rpcps.StaticProvider { - return nil, nil, chainMessage, nil + // extract consumer address from signature + extractedConsumerAddress, err := rpcps.ExtractConsumerAddress(ctx, request.RelaySession) + if err != nil { + return nil, nil, nil, err + } + + return nil, extractedConsumerAddress, chainMessage, nil } relayCU := chainMessage.GetApi().ComputeUnits virtualEpoch := rpcps.stateTracker.GetVirtualEpoch(uint64(request.RelaySession.Epoch)) @@ -352,7 +359,7 @@ func (rpcps *RPCProviderServer) ValidateAddonsExtensions(addon string, extension if apiCollection.CollectionData.AddOn != addon { return utils.LavaFormatWarning("invalid addon in relay, parsed addon is not the same as requested", nil, utils.Attribute{Key: "requested addon", Value: addon[0]}, utils.Attribute{Key: "parsed addon", Value: chainMessage.GetApiCollection().CollectionData.AddOn}) } - if !rpcps.chainRouter.ExtensionsSupported(extensions) { + if !rpcps.chainRouter.ExtensionsSupported(apiCollection.CollectionData.InternalPath, extensions) { return utils.LavaFormatWarning("requested extensions are unsupported in chainRouter", nil, utils.Attribute{Key: "requested extensions", Value: extensions}) } return nil @@ -601,7 +608,7 @@ func (rpcps *RPCProviderServer) ExtractConsumerAddress(ctx context.Context, rela } else { extractedConsumerAddress, err = sigs.ExtractSignerAddress(relaySession) if err != nil { - return nil, utils.LavaFormatWarning("extract signer address from relay", err, utils.Attribute{Key: "GUID", Value: ctx}) + return nil, utils.LavaFormatWarning("failed to extract signer address from relay session", err, utils.LogAttr("GUID", ctx)) } } return extractedConsumerAddress, nil @@ -795,7 +802,7 @@ func (rpcps *RPCProviderServer) TryRelay(ctx context.Context, request *pairingty } } else if len(request.RelayData.Extensions) > 0 { // if cached, Add Archive trailer if requested by the consumer. - grpc.SetTrailer(ctx, metadata.Pairs(chainlib.RPCProviderNodeExtension, string(lavasession.NewRouterKey(request.RelayData.Extensions)))) + grpc.SetTrailer(ctx, metadata.Pairs(chainlib.RPCProviderNodeExtension, lavasession.NewRouterKey(request.RelayData.Extensions).String())) } if dataReliabilityEnabled { diff --git a/protocol/rpcprovider/rpcprovider_server_test.go b/protocol/rpcprovider/rpcprovider_server_test.go index a18228b9ec..912a368eb2 100644 --- a/protocol/rpcprovider/rpcprovider_server_test.go +++ b/protocol/rpcprovider/rpcprovider_server_test.go @@ -129,7 +129,7 @@ func TestHandleConsistency(t *testing.T) { requestBlock: spectypes.LATEST_BLOCK, specId: "LAV1", err: nil, - timeout: 20 * time.Millisecond, // 150 is one way travel time + timeout: 25 * time.Millisecond, // 150 is one way travel time chainTrackerBlocks: []int64{100, 101}, changeTime: 100 * time.Second, sleep: true, diff --git a/protocol/statetracker/consumer_state_tracker.go b/protocol/statetracker/consumer_state_tracker.go index 8ab2bd046e..ffa21cd6a7 100644 --- a/protocol/statetracker/consumer_state_tracker.go +++ b/protocol/statetracker/consumer_state_tracker.go @@ -25,7 +25,7 @@ type ConsumerTxSenderInf interface { // ConsumerStateTracker CSTis a class for tracking consumer data from the lava blockchain, such as epoch changes. // it allows also to query specific data form the blockchain and acts as a single place to send transactions type ConsumerStateTracker struct { - stateQuery *updaters.ConsumerStateQuery + StateQuery *updaters.ConsumerStateQuery ConsumerTxSenderInf *StateTracker ConsumerEmergencyTrackerInf @@ -34,7 +34,8 @@ type ConsumerStateTracker struct { func NewConsumerStateTracker(ctx context.Context, txFactory tx.Factory, clientCtx client.Context, chainFetcher chaintracker.ChainFetcher, metrics *metrics.ConsumerMetricsManager, disableConflictTransactions bool) (ret *ConsumerStateTracker, err error) { emergencyTracker, blockNotFoundCallback := NewEmergencyTracker(metrics) - stateTrackerBase, err := NewStateTracker(ctx, txFactory, clientCtx, chainFetcher, blockNotFoundCallback) + stateQuery := updaters.NewConsumerStateQuery(ctx, clientCtx) + stateTrackerBase, err := NewStateTracker(ctx, txFactory, stateQuery.StateQuery, chainFetcher, blockNotFoundCallback) if err != nil { return nil, err } @@ -44,7 +45,7 @@ func NewConsumerStateTracker(ctx context.Context, txFactory tx.Factory, clientCt } cst := &ConsumerStateTracker{ StateTracker: stateTrackerBase, - stateQuery: updaters.NewConsumerStateQuery(ctx, clientCtx), + StateQuery: stateQuery, ConsumerTxSenderInf: txSender, ConsumerEmergencyTrackerInf: emergencyTracker, disableConflictTransactions: disableConflictTransactions, @@ -56,7 +57,7 @@ func NewConsumerStateTracker(ctx context.Context, txFactory tx.Factory, clientCt func (cst *ConsumerStateTracker) RegisterConsumerSessionManagerForPairingUpdates(ctx context.Context, consumerSessionManager *lavasession.ConsumerSessionManager, staticProvidersList []*lavasession.RPCProviderEndpoint) { // register this CSM to get the updated pairing list when a new epoch starts - pairingUpdater := updaters.NewPairingUpdater(cst.stateQuery, consumerSessionManager.RPCEndpoint().ChainID) + pairingUpdater := updaters.NewPairingUpdater(cst.StateQuery, consumerSessionManager.RPCEndpoint().ChainID) pairingUpdaterRaw := cst.StateTracker.RegisterForUpdates(ctx, pairingUpdater) pairingUpdater, ok := pairingUpdaterRaw.(*updaters.PairingUpdater) if !ok { @@ -81,7 +82,7 @@ func (cst *ConsumerStateTracker) RegisterConsumerSessionManagerForPairingUpdates } func (cst *ConsumerStateTracker) RegisterForPairingUpdates(ctx context.Context, pairingUpdatable updaters.PairingUpdatable, specId string) { - pairingUpdater := updaters.NewPairingUpdater(cst.stateQuery, specId) + pairingUpdater := updaters.NewPairingUpdater(cst.StateQuery, specId) pairingUpdaterRaw := cst.StateTracker.RegisterForUpdates(ctx, pairingUpdater) pairingUpdater, ok := pairingUpdaterRaw.(*updaters.PairingUpdater) if !ok { @@ -94,7 +95,7 @@ func (cst *ConsumerStateTracker) RegisterForPairingUpdates(ctx context.Context, } func (cst *ConsumerStateTracker) RegisterFinalizationConsensusForUpdates(ctx context.Context, finalizationConsensus *finalizationconsensus.FinalizationConsensus) { - finalizationConsensusUpdater := updaters.NewFinalizationConsensusUpdater(cst.stateQuery, finalizationConsensus.SpecId) + finalizationConsensusUpdater := updaters.NewFinalizationConsensusUpdater(cst.StateQuery, finalizationConsensus.SpecId) finalizationConsensusUpdaterRaw := cst.StateTracker.RegisterForUpdates(ctx, finalizationConsensusUpdater) finalizationConsensusUpdater, ok := finalizationConsensusUpdaterRaw.(*updaters.FinalizationConsensusUpdater) if !ok { @@ -120,7 +121,7 @@ func (cst *ConsumerStateTracker) TxConflictDetection(ctx context.Context, finali func (cst *ConsumerStateTracker) RegisterForSpecUpdates(ctx context.Context, specUpdatable updaters.SpecUpdatable, endpoint lavasession.RPCEndpoint) error { // register for spec updates sets spec and updates when a spec has been modified - specUpdater := updaters.NewSpecUpdater(endpoint.ChainID, cst.stateQuery, cst.EventTracker) + specUpdater := updaters.NewSpecUpdater(endpoint.ChainID, cst.StateQuery, cst.EventTracker) specUpdaterRaw := cst.StateTracker.RegisterForUpdates(ctx, specUpdater) specUpdater, ok := specUpdaterRaw.(*updaters.SpecUpdater) if !ok { @@ -130,11 +131,11 @@ func (cst *ConsumerStateTracker) RegisterForSpecUpdates(ctx context.Context, spe } func (cst *ConsumerStateTracker) GetConsumerPolicy(ctx context.Context, consumerAddress, chainID string) (*plantypes.Policy, error) { - return cst.stateQuery.GetEffectivePolicy(ctx, consumerAddress, chainID) + return cst.StateQuery.GetEffectivePolicy(ctx, consumerAddress, chainID) } func (cst *ConsumerStateTracker) RegisterForVersionUpdates(ctx context.Context, version *protocoltypes.Version, versionValidator updaters.VersionValidationInf) { - versionUpdater := updaters.NewVersionUpdater(cst.stateQuery, cst.EventTracker, version, versionValidator) + versionUpdater := updaters.NewVersionUpdater(cst.StateQuery, cst.EventTracker, version, versionValidator) versionUpdaterRaw := cst.StateTracker.RegisterForUpdates(ctx, versionUpdater) versionUpdater, ok := versionUpdaterRaw.(*updaters.VersionUpdater) if !ok { @@ -145,7 +146,7 @@ func (cst *ConsumerStateTracker) RegisterForVersionUpdates(ctx context.Context, func (cst *ConsumerStateTracker) RegisterForDowntimeParamsUpdates(ctx context.Context, downtimeParamsUpdatable updaters.DowntimeParamsUpdatable) error { // register for downtimeParams updates sets downtimeParams and updates when downtimeParams has been changed - downtimeParamsUpdater := updaters.NewDowntimeParamsUpdater(cst.stateQuery, cst.EventTracker) + downtimeParamsUpdater := updaters.NewDowntimeParamsUpdater(cst.StateQuery, cst.EventTracker) downtimeParamsUpdaterRaw := cst.StateTracker.RegisterForUpdates(ctx, downtimeParamsUpdater) downtimeParamsUpdater, ok := downtimeParamsUpdaterRaw.(*updaters.DowntimeParamsUpdater) if !ok { @@ -156,5 +157,5 @@ func (cst *ConsumerStateTracker) RegisterForDowntimeParamsUpdates(ctx context.Co } func (cst *ConsumerStateTracker) GetProtocolVersion(ctx context.Context) (*updaters.ProtocolVersionResponse, error) { - return cst.stateQuery.GetProtocolVersion(ctx) + return cst.StateQuery.GetProtocolVersion(ctx) } diff --git a/protocol/statetracker/events.go b/protocol/statetracker/events.go index 9b41f74842..1aa68f248a 100644 --- a/protocol/statetracker/events.go +++ b/protocol/statetracker/events.go @@ -65,11 +65,8 @@ func eventsLookup(ctx context.Context, clientCtx client.Context, blocks, fromBlo defer ticker.Stop() readEventsFromBlock := func(blockFrom int64, blockTo int64, hash string) { for block := blockFrom; block < blockTo; block++ { - brp, err := updaters.TryIntoTendermintRPC(clientCtx.Client) - if err != nil { - utils.LavaFormatFatal("invalid blockResults provider", err) - } - blockResults, err := brp.BlockResults(ctx, &block) + queryInst := updaters.NewStateQueryAccessInst(clientCtx) + blockResults, err := queryInst.BlockResults(ctx, &block) if err != nil { utils.LavaFormatError("invalid blockResults status", err) return @@ -275,14 +272,11 @@ func paymentsLookup(ctx context.Context, clientCtx client.Context, blockStart, b continue } utils.LavaFormatInfo("fetching block", utils.LogAttr("block", block)) - brp, err := updaters.TryIntoTendermintRPC(clientCtx.Client) - if err != nil { - utils.LavaFormatFatal("invalid blockResults provider", err) - } + queryInst := updaters.NewStateQueryAccessInst(clientCtx) var blockResults *coretypes.ResultBlockResults for retry := 0; retry < 3; retry++ { ctxWithTimeout, cancelContextWithTimeout := context.WithTimeout(ctx, time.Second*30) - blockResults, err = brp.BlockResults(ctxWithTimeout, &block) + blockResults, err = queryInst.BlockResults(ctxWithTimeout, &block) cancelContextWithTimeout() if err != nil { utils.LavaFormatWarning("@@@@ failed fetching block results will retry", err, utils.LogAttr("block_number", block)) @@ -660,10 +654,7 @@ func countTransactionsPerDay(ctx context.Context, clientCtx client.Context, bloc utils.LogAttr("starting_block", latestHeight-numberOfBlocksInADay), ) - tmClient, err := updaters.TryIntoTendermintRPC(clientCtx.Client) - if err != nil { - utils.LavaFormatFatal("invalid blockResults provider", err) - } + queryInst := updaters.NewStateQueryAccessInst(clientCtx) // i is days // j are blocks in that day // starting from current day and going backwards @@ -697,7 +688,7 @@ func countTransactionsPerDay(ctx context.Context, clientCtx client.Context, bloc defer wg.Done() ctxWithTimeout, cancel := context.WithTimeout(ctx, 10*time.Second) defer cancel() - blockResults, err := tmClient.BlockResults(ctxWithTimeout, &k) + blockResults, err := queryInst.BlockResults(ctxWithTimeout, &k) if err != nil { utils.LavaFormatError("invalid blockResults status", err) return diff --git a/protocol/statetracker/provider_state_tracker.go b/protocol/statetracker/provider_state_tracker.go index 2dc0bbddcb..d6f5423b72 100644 --- a/protocol/statetracker/provider_state_tracker.go +++ b/protocol/statetracker/provider_state_tracker.go @@ -19,7 +19,7 @@ import ( // ProviderStateTracker PST is a class for tracking provider data from the lava blockchain, such as epoch changes. // it allows also to query specific data form the blockchain and acts as a single place to send transactions type ProviderStateTracker struct { - stateQuery *updaters.ProviderStateQuery + StateQuery *updaters.ProviderStateQuery txSender *ProviderTxSender *StateTracker *EmergencyTracker @@ -27,7 +27,8 @@ type ProviderStateTracker struct { func NewProviderStateTracker(ctx context.Context, txFactory tx.Factory, clientCtx client.Context, chainFetcher chaintracker.ChainFetcher, metrics *metrics.ProviderMetricsManager) (ret *ProviderStateTracker, err error) { emergencyTracker, blockNotFoundCallback := NewEmergencyTracker(metrics) - stateTrackerBase, err := NewStateTracker(ctx, txFactory, clientCtx, chainFetcher, blockNotFoundCallback) + stateQuery := updaters.NewProviderStateQuery(ctx, updaters.NewStateQueryAccessInst(clientCtx)) + stateTrackerBase, err := NewStateTracker(ctx, txFactory, stateQuery.StateQuery, chainFetcher, blockNotFoundCallback) if err != nil { return nil, err } @@ -37,7 +38,7 @@ func NewProviderStateTracker(ctx context.Context, txFactory tx.Factory, clientCt } pst := &ProviderStateTracker{ StateTracker: stateTrackerBase, - stateQuery: updaters.NewProviderStateQuery(ctx, clientCtx), + StateQuery: stateQuery, txSender: txSender, EmergencyTracker: emergencyTracker, } @@ -49,7 +50,7 @@ func NewProviderStateTracker(ctx context.Context, txFactory tx.Factory, clientCt } func (pst *ProviderStateTracker) RegisterForEpochUpdates(ctx context.Context, epochUpdatable updaters.EpochUpdatable) { - epochUpdater := updaters.NewEpochUpdater(&pst.stateQuery.EpochStateQuery) + epochUpdater := updaters.NewEpochUpdater(&pst.StateQuery.EpochStateQuery) epochUpdaterRaw := pst.StateTracker.RegisterForUpdates(ctx, epochUpdater) epochUpdater, ok := epochUpdaterRaw.(*updaters.EpochUpdater) if !ok { @@ -60,7 +61,7 @@ func (pst *ProviderStateTracker) RegisterForEpochUpdates(ctx context.Context, ep func (pst *ProviderStateTracker) RegisterForSpecUpdates(ctx context.Context, specUpdatable updaters.SpecUpdatable, endpoint lavasession.RPCEndpoint) error { // register for spec updates sets spec and updates when a spec has been modified - specUpdater := updaters.NewSpecUpdater(endpoint.ChainID, pst.stateQuery, pst.EventTracker) + specUpdater := updaters.NewSpecUpdater(endpoint.ChainID, pst.StateQuery, pst.EventTracker) specUpdaterRaw := pst.StateTracker.RegisterForUpdates(ctx, specUpdater) specUpdater, ok := specUpdaterRaw.(*updaters.SpecUpdater) if !ok { @@ -71,7 +72,7 @@ func (pst *ProviderStateTracker) RegisterForSpecUpdates(ctx context.Context, spe func (pst *ProviderStateTracker) RegisterForSpecVerifications(ctx context.Context, specVerifier updaters.SpecVerifier, chainId string) error { // register for spec verifications sets spec and verifies when a spec has been modified - specUpdater := updaters.NewSpecUpdater(chainId, pst.stateQuery, pst.EventTracker) + specUpdater := updaters.NewSpecUpdater(chainId, pst.StateQuery, pst.EventTracker) specUpdaterRaw := pst.StateTracker.RegisterForUpdates(ctx, specUpdater) specUpdater, ok := specUpdaterRaw.(*updaters.SpecUpdater) if !ok { @@ -81,7 +82,7 @@ func (pst *ProviderStateTracker) RegisterForSpecVerifications(ctx context.Contex } func (pst *ProviderStateTracker) RegisterForVersionUpdates(ctx context.Context, version *protocoltypes.Version, versionValidator updaters.VersionValidationInf) { - versionUpdater := updaters.NewVersionUpdater(pst.stateQuery, pst.EventTracker, version, versionValidator) + versionUpdater := updaters.NewVersionUpdater(pst.StateQuery, pst.EventTracker, version, versionValidator) versionUpdaterRaw := pst.StateTracker.RegisterForUpdates(ctx, versionUpdater) versionUpdater, ok := versionUpdaterRaw.(*updaters.VersionUpdater) if !ok { @@ -114,7 +115,7 @@ func (pst *ProviderStateTracker) RegisterPaymentUpdatableForPayments(ctx context func (pst *ProviderStateTracker) RegisterForDowntimeParamsUpdates(ctx context.Context, downtimeParamsUpdatable updaters.DowntimeParamsUpdatable) error { // register for downtimeParams updates sets downtimeParams and updates when downtimeParams has been changed - downtimeParamsUpdater := updaters.NewDowntimeParamsUpdater(pst.stateQuery, pst.EventTracker) + downtimeParamsUpdater := updaters.NewDowntimeParamsUpdater(pst.StateQuery, pst.EventTracker) downtimeParamsUpdaterRaw := pst.StateTracker.RegisterForUpdates(ctx, downtimeParamsUpdater) downtimeParamsUpdater, ok := downtimeParamsUpdaterRaw.(*updaters.DowntimeParamsUpdater) if !ok { @@ -141,31 +142,31 @@ func (pst *ProviderStateTracker) LatestBlock() int64 { } func (pst *ProviderStateTracker) GetMaxCuForUser(ctx context.Context, consumerAddress, chainID string, epoch uint64) (maxCu uint64, err error) { - return pst.stateQuery.GetMaxCuForUser(ctx, consumerAddress, chainID, epoch) + return pst.StateQuery.GetMaxCuForUser(ctx, consumerAddress, chainID, epoch) } func (pst *ProviderStateTracker) VerifyPairing(ctx context.Context, consumerAddress, providerAddress string, epoch uint64, chainID string) (valid bool, total int64, projectId string, err error) { - return pst.stateQuery.VerifyPairing(ctx, consumerAddress, providerAddress, epoch, chainID) + return pst.StateQuery.VerifyPairing(ctx, consumerAddress, providerAddress, epoch, chainID) } func (pst *ProviderStateTracker) GetEpochSize(ctx context.Context) (uint64, error) { - return pst.stateQuery.GetEpochSize(ctx) + return pst.StateQuery.GetEpochSize(ctx) } func (pst *ProviderStateTracker) EarliestBlockInMemory(ctx context.Context) (uint64, error) { - return pst.stateQuery.EarliestBlockInMemory(ctx) + return pst.StateQuery.EarliestBlockInMemory(ctx) } func (pst *ProviderStateTracker) GetRecommendedEpochNumToCollectPayment(ctx context.Context) (uint64, error) { - return pst.stateQuery.GetRecommendedEpochNumToCollectPayment(ctx) + return pst.StateQuery.GetRecommendedEpochNumToCollectPayment(ctx) } func (pst *ProviderStateTracker) GetEpochSizeMultipliedByRecommendedEpochNumToCollectPayment(ctx context.Context) (uint64, error) { - return pst.stateQuery.GetEpochSizeMultipliedByRecommendedEpochNumToCollectPayment(ctx) + return pst.StateQuery.GetEpochSizeMultipliedByRecommendedEpochNumToCollectPayment(ctx) } func (pst *ProviderStateTracker) GetProtocolVersion(ctx context.Context) (*updaters.ProtocolVersionResponse, error) { - return pst.stateQuery.GetProtocolVersion(ctx) + return pst.StateQuery.GetProtocolVersion(ctx) } func (pst *ProviderStateTracker) GetAverageBlockTime() time.Duration { diff --git a/protocol/statetracker/state_tracker.go b/protocol/statetracker/state_tracker.go index c50639cb34..d08535d24f 100644 --- a/protocol/statetracker/state_tracker.go +++ b/protocol/statetracker/state_tracker.go @@ -5,7 +5,6 @@ import ( "sync" "time" - "github.com/cosmos/cosmos-sdk/client" "github.com/cosmos/cosmos-sdk/client/tx" "github.com/lavanet/lava/v4/protocol/chainlib" "github.com/lavanet/lava/v4/protocol/chaintracker" @@ -19,12 +18,14 @@ import ( const ( BlocksToSaveLavaChainTracker = 1 // we only need the latest block TendermintConsensusParamsQuery = "consensus_params" + MAINNET_SPEC = "LAVA" + TESTNET_SPEC = "LAV1" ) var ( lavaSpecName = "" // TODO: add a governance param change that indicates what spec id belongs to lava. - lavaSpecOptions = []string{"LAV1", "LAVA"} + LavaSpecOptions = []string{TESTNET_SPEC, MAINNET_SPEC} ) // ConsumerStateTracker CSTis a class for tracking consumer data from the lava blockchain, such as epoch changes. @@ -48,20 +49,20 @@ type SpecUpdaterInf interface { } // Either register for spec updates or set spec for offline spec, used in both consumer and provider process -func RegisterForSpecUpdatesOrSetStaticSpec(ctx context.Context, chainParser chainlib.ChainParser, specPath string, rpcEndpoint lavasession.RPCEndpoint, specUpdaterInf SpecUpdaterInf) (err error) { - if specPath != "" { - // offline spec mode. - parsedOfflineSpec, loadError := specutils.GetSpecsFromPath(specPath, rpcEndpoint.ChainID, nil, nil) - if loadError != nil { - err = utils.LavaFormatError("failed loading offline spec", err, utils.LogAttr("spec_path", specPath), utils.LogAttr("spec_id", rpcEndpoint.ChainID)) - } - utils.LavaFormatInfo("Loaded offline spec successfully", utils.LogAttr("spec_path", specPath), utils.LogAttr("chain_id", parsedOfflineSpec.Index)) - chainParser.SetSpec(parsedOfflineSpec) - } else { - // register for spec updates - err = specUpdaterInf.RegisterForSpecUpdates(ctx, chainParser, rpcEndpoint) +func RegisterForSpecUpdatesOrSetStaticSpec(ctx context.Context, chainParser chainlib.ChainParser, specPath string, rpcEndpoint lavasession.RPCEndpoint, specUpdaterInf SpecUpdaterInf) error { + if specPath == "" { + return specUpdaterInf.RegisterForSpecUpdates(ctx, chainParser, rpcEndpoint) } - return + + // offline spec mode. + parsedOfflineSpec, err := specutils.GetSpecsFromPath(specPath, rpcEndpoint.ChainID, nil, nil) + if err != nil { + return utils.LavaFormatError("failed loading offline spec", err, utils.LogAttr("spec_path", specPath), utils.LogAttr("spec_id", rpcEndpoint.ChainID)) + } + utils.LavaFormatInfo("Loaded offline spec successfully", utils.LogAttr("spec_path", specPath), utils.LogAttr("chain_id", parsedOfflineSpec.Index)) + chainParser.SetSpec(parsedOfflineSpec) + + return nil } func GetLavaSpecWithRetry(ctx context.Context, specQueryClient spectypes.QueryClient) (*spectypes.QueryGetSpecResponse, error) { @@ -69,7 +70,7 @@ func GetLavaSpecWithRetry(ctx context.Context, specQueryClient spectypes.QueryCl var err error for i := 0; i < updaters.BlockResultRetry; i++ { if lavaSpecName == "" { // spec name is not initialized, try fetching specs. - for _, specId := range lavaSpecOptions { + for _, specId := range LavaSpecOptions { specResponse, err = specQueryClient.Spec(ctx, &spectypes.QueryGetSpecRequest{ ChainID: specId, }) @@ -93,9 +94,9 @@ func GetLavaSpecWithRetry(ctx context.Context, specQueryClient spectypes.QueryCl return specResponse, err } -func NewStateTracker(ctx context.Context, txFactory tx.Factory, clientCtx client.Context, chainFetcher chaintracker.ChainFetcher, blockNotFoundCallback func(latestBlockTime time.Time)) (ret *StateTracker, err error) { +func NewStateTracker(ctx context.Context, txFactory tx.Factory, stateQuery *updaters.StateQuery, chainFetcher chaintracker.ChainFetcher, blockNotFoundCallback func(latestBlockTime time.Time)) (ret *StateTracker, err error) { // validate chainId - status, err := clientCtx.Client.Status(ctx) + status, err := stateQuery.Status(ctx) if err != nil { return nil, utils.LavaFormatError("failed getting status", err) } @@ -103,7 +104,7 @@ func NewStateTracker(ctx context.Context, txFactory tx.Factory, clientCtx client return nil, utils.LavaFormatError("Chain ID mismatch", nil, utils.Attribute{Key: "--chain-id", Value: txFactory.ChainID()}, utils.Attribute{Key: "Node chainID", Value: status.NodeInfo.Network}) } - eventTracker := &updaters.EventTracker{ClientCtx: clientCtx} + eventTracker := &updaters.EventTracker{StateQuery: stateQuery} for i := 0; i < updaters.BlockResultRetry; i++ { err = eventTracker.UpdateBlockResults(0) if err == nil { @@ -114,7 +115,7 @@ func NewStateTracker(ctx context.Context, txFactory tx.Factory, clientCtx client if err != nil { return nil, utils.LavaFormatError("failed getting blockResults after retries", err) } - specQueryClient := spectypes.NewQueryClient(clientCtx) + specQueryClient := stateQuery.GetSpecQueryClient() specResponse, err := GetLavaSpecWithRetry(ctx, specQueryClient) if err != nil { utils.LavaFormatFatal("failed querying lava spec for state tracker", err) @@ -196,3 +197,12 @@ func (st *StateTracker) RegisterForUpdates(ctx context.Context, updater Updater) func (st *StateTracker) GetEventTracker() *updaters.EventTracker { return st.EventTracker } + +func IsLavaNativeSpec(checked string) bool { + for _, nativeLavaChain := range LavaSpecOptions { + if checked == nativeLavaChain { + return true + } + } + return false +} diff --git a/protocol/statetracker/updaters/event_tracker.go b/protocol/statetracker/updaters/event_tracker.go index 6f442a83af..ec93865abe 100644 --- a/protocol/statetracker/updaters/event_tracker.go +++ b/protocol/statetracker/updaters/event_tracker.go @@ -2,14 +2,12 @@ package updaters import ( "context" - "fmt" "sync" "time" "golang.org/x/exp/slices" ctypes "github.com/cometbft/cometbft/rpc/core/types" - "github.com/cosmos/cosmos-sdk/client" "github.com/lavanet/lava/v4/protocol/rpcprovider/reliabilitymanager" "github.com/lavanet/lava/v4/protocol/rpcprovider/rewardserver" "github.com/lavanet/lava/v4/utils" @@ -25,8 +23,8 @@ const ( var TimeOutForFetchingLavaBlocks = time.Second * 5 type EventTracker struct { - lock sync.RWMutex - ClientCtx client.Context + lock sync.RWMutex + *StateQuery blockResults *ctypes.ResultBlockResults latestUpdatedBlock int64 } @@ -38,7 +36,7 @@ func (et *EventTracker) UpdateBlockResults(latestBlock int64) (err error) { var res *ctypes.ResultStatus for i := 0; i < 3; i++ { timeoutCtx, cancel := context.WithTimeout(ctx, TimeOutForFetchingLavaBlocks) - res, err = et.ClientCtx.Client.Status(timeoutCtx) + res, err = et.StateQuery.Status(timeoutCtx) cancel() if err == nil { break @@ -50,14 +48,10 @@ func (et *EventTracker) UpdateBlockResults(latestBlock int64) (err error) { latestBlock = res.SyncInfo.LatestBlockHeight } - brp, err := TryIntoTendermintRPC(et.ClientCtx.Client) - if err != nil { - return utils.LavaFormatError("failed converting client.TendermintRPC to tendermintRPC", err) - } var blockResults *ctypes.ResultBlockResults for i := 0; i < BlockResultRetry; i++ { timeoutCtx, cancel := context.WithTimeout(ctx, TimeOutForFetchingLavaBlocks) - blockResults, err = brp.BlockResults(timeoutCtx, &latestBlock) + blockResults, err = et.StateQuery.BlockResults(timeoutCtx, &latestBlock) cancel() if err == nil { break @@ -216,11 +210,3 @@ type tendermintRPC interface { height *int64, ) (*ctypes.ResultConsensusParams, error) } - -func TryIntoTendermintRPC(cl client.TendermintRPC) (tendermintRPC, error) { - brp, ok := cl.(tendermintRPC) - if !ok { - return nil, fmt.Errorf("client does not implement tendermintRPC: %T", cl) - } - return brp, nil -} diff --git a/protocol/statetracker/updaters/provider_freeze_jail_updater.go b/protocol/statetracker/updaters/provider_freeze_jail_updater.go index 69f24ad1ab..0be8a93e8c 100644 --- a/protocol/statetracker/updaters/provider_freeze_jail_updater.go +++ b/protocol/statetracker/updaters/provider_freeze_jail_updater.go @@ -6,15 +6,15 @@ import ( "github.com/lavanet/lava/v4/utils" pairingtypes "github.com/lavanet/lava/v4/x/pairing/types" - "google.golang.org/grpc" + grpc "google.golang.org/grpc" ) const ( CallbackKeyForFreezeUpdate = "freeze-update" ) -type ProviderPairingStatusStateQueryInf interface { - Provider(ctx context.Context, in *pairingtypes.QueryProviderRequest, opts ...grpc.CallOption) (*pairingtypes.QueryProviderResponse, error) +type ProviderQueryGetter interface { + GetPairingQueryClient() pairingtypes.QueryClient } type ProviderMetricsManagerInf interface { @@ -30,27 +30,31 @@ const ( FROZEN ) +type ProviderPairingStatusStateQueryInf interface { + Provider(ctx context.Context, in *pairingtypes.QueryProviderRequest, opts ...grpc.CallOption) (*pairingtypes.QueryProviderResponse, error) +} + type ProviderFreezeJailUpdater struct { - pairingQueryClient ProviderPairingStatusStateQueryInf - metricsManager ProviderMetricsManagerInf - publicAddress string + querier ProviderPairingStatusStateQueryInf + metricsManager ProviderMetricsManagerInf + publicAddress string } func NewProviderFreezeJailUpdater( - pairingQueryClient ProviderPairingStatusStateQueryInf, + querier ProviderPairingStatusStateQueryInf, publicAddress string, metricsManager ProviderMetricsManagerInf, ) *ProviderFreezeJailUpdater { return &ProviderFreezeJailUpdater{ - pairingQueryClient: pairingQueryClient, - publicAddress: publicAddress, - metricsManager: metricsManager, + querier: querier, + publicAddress: publicAddress, + metricsManager: metricsManager, } } func (pfu *ProviderFreezeJailUpdater) UpdateEpoch(epoch uint64) { ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) - response, err := pfu.pairingQueryClient.Provider(ctx, &pairingtypes.QueryProviderRequest{Address: pfu.publicAddress}) + response, err := pfu.querier.Provider(ctx, &pairingtypes.QueryProviderRequest{Address: pfu.publicAddress}) cancel() if err != nil { diff --git a/protocol/statetracker/updaters/provider_freeze_jail_updater_mocks.go b/protocol/statetracker/updaters/provider_freeze_jail_updater_mocks.go index 24b0738393..5b506dd58f 100644 --- a/protocol/statetracker/updaters/provider_freeze_jail_updater_mocks.go +++ b/protocol/statetracker/updaters/provider_freeze_jail_updater_mocks.go @@ -41,6 +41,10 @@ func (m *MockProviderPairingStatusStateQueryInf) EXPECT() *MockProviderPairingSt return m.recorder } +func (m *MockProviderPairingStatusStateQueryInf) GetPairingQueryClient() ProviderPairingStatusStateQueryInf { + return m +} + // Provider mocks base method. func (m *MockProviderPairingStatusStateQueryInf) Provider(ctx context.Context, in *types.QueryProviderRequest, opts ...grpc.CallOption) (*types.QueryProviderResponse, error) { m.ctrl.T.Helper() diff --git a/protocol/statetracker/updaters/state_query.go b/protocol/statetracker/updaters/state_query.go index 51cb45ec61..0877f171c1 100644 --- a/protocol/statetracker/updaters/state_query.go +++ b/protocol/statetracker/updaters/state_query.go @@ -9,6 +9,7 @@ import ( downtimev1 "github.com/lavanet/lava/v4/x/downtime/v1" "github.com/cosmos/cosmos-sdk/client" + grpc1 "github.com/cosmos/gogoproto/grpc" "github.com/dgraph-io/ristretto" reliabilitymanager "github.com/lavanet/lava/v4/protocol/rpcprovider/reliabilitymanager" "github.com/lavanet/lava/v4/utils" @@ -37,22 +38,40 @@ type ProtocolVersionResponse struct { BlockNumber string } +type StateQueryAccessInf interface { + grpc1.ClientConn + tendermintRPC + client.TendermintRPC +} + +type StateQueryAccessInst struct { + grpc1.ClientConn + tendermintRPC + client.TendermintRPC +} + +func NewStateQueryAccessInst(clientCtx client.Context) *StateQueryAccessInst { + tenderRpc, ok := clientCtx.Client.(tendermintRPC) + if !ok { + utils.LavaFormatFatal("failed casting tendermint rpc from client context", nil) + } + return &StateQueryAccessInst{ClientConn: clientCtx, tendermintRPC: tenderRpc, TendermintRPC: clientCtx.Client} +} + type StateQuery struct { - SpecQueryClient spectypes.QueryClient - PairingQueryClient pairingtypes.QueryClient - EpochStorageQueryClient epochstoragetypes.QueryClient - ProtocolClient protocoltypes.QueryClient - DowntimeClient downtimev1.QueryClient + specQueryClient spectypes.QueryClient + pairingQueryClient pairingtypes.QueryClient + epochStorageQueryClient epochstoragetypes.QueryClient + protocolClient protocoltypes.QueryClient + downtimeClient downtimev1.QueryClient ResponsesCache *ristretto.Cache + tendermintRPC + client.TendermintRPC } -func NewStateQuery(ctx context.Context, clientCtx client.Context) *StateQuery { +func NewStateQuery(ctx context.Context, accessInf StateQueryAccessInf) *StateQuery { sq := &StateQuery{} - sq.SpecQueryClient = spectypes.NewQueryClient(clientCtx) - sq.PairingQueryClient = pairingtypes.NewQueryClient(clientCtx) - sq.EpochStorageQueryClient = epochstoragetypes.NewQueryClient(clientCtx) - sq.ProtocolClient = protocoltypes.NewQueryClient(clientCtx) - sq.DowntimeClient = downtimev1.NewQueryClient(clientCtx) + sq.UpdateAccess(accessInf) cache, err := ristretto.NewCache(&ristretto.Config{NumCounters: CacheNumCounters, MaxCost: CacheMaxCost, BufferItems: 64}) if err != nil { utils.LavaFormatFatal("failed setting up cache for queries", err) @@ -61,9 +80,27 @@ func NewStateQuery(ctx context.Context, clientCtx client.Context) *StateQuery { return sq } +func (sq *StateQuery) UpdateAccess(accessInf StateQueryAccessInf) { + sq.specQueryClient = spectypes.NewQueryClient(accessInf) + sq.pairingQueryClient = pairingtypes.NewQueryClient(accessInf) + sq.epochStorageQueryClient = epochstoragetypes.NewQueryClient(accessInf) + sq.protocolClient = protocoltypes.NewQueryClient(accessInf) + sq.downtimeClient = downtimev1.NewQueryClient(accessInf) + sq.tendermintRPC = accessInf + sq.TendermintRPC = accessInf +} + +func (sq *StateQuery) Provider(ctx context.Context, in *pairingtypes.QueryProviderRequest, opts ...grpc.CallOption) (*pairingtypes.QueryProviderResponse, error) { + return sq.pairingQueryClient.Provider(ctx, in, opts...) +} + +func (sq *StateQuery) GetSpecQueryClient() spectypes.QueryClient { + return sq.specQueryClient +} + func (csq *StateQuery) GetProtocolVersion(ctx context.Context) (*ProtocolVersionResponse, error) { header := metadata.MD{} - param, err := csq.ProtocolClient.Params(ctx, &protocoltypes.QueryParamsRequest{}, grpc.Header(&header)) + param, err := csq.protocolClient.Params(ctx, &protocoltypes.QueryParamsRequest{}, grpc.Header(&header)) if err != nil { return nil, err } @@ -76,7 +113,7 @@ func (csq *StateQuery) GetProtocolVersion(ctx context.Context) (*ProtocolVersion } func (csq *StateQuery) GetSpec(ctx context.Context, chainID string) (*spectypes.Spec, error) { - spec, err := csq.SpecQueryClient.Spec(ctx, &spectypes.QueryGetSpecRequest{ + spec, err := csq.specQueryClient.Spec(ctx, &spectypes.QueryGetSpecRequest{ ChainID: chainID, }) if err != nil { @@ -86,7 +123,7 @@ func (csq *StateQuery) GetSpec(ctx context.Context, chainID string) (*spectypes. } func (csq *StateQuery) GetDowntimeParams(ctx context.Context) (*downtimev1.Params, error) { - res, err := csq.DowntimeClient.QueryParams(ctx, &downtimev1.QueryParamsRequest{}) + res, err := csq.downtimeClient.QueryParams(ctx, &downtimev1.QueryParamsRequest{}) if err != nil { return nil, err } @@ -94,13 +131,13 @@ func (csq *StateQuery) GetDowntimeParams(ctx context.Context) (*downtimev1.Param } type ConsumerStateQuery struct { - StateQuery - clientCtx client.Context + *StateQuery + fromAddress string lastChainID string } func NewConsumerStateQuery(ctx context.Context, clientCtx client.Context) *ConsumerStateQuery { - csq := &ConsumerStateQuery{StateQuery: *NewStateQuery(ctx, clientCtx), clientCtx: clientCtx, lastChainID: ""} + csq := &ConsumerStateQuery{StateQuery: NewStateQuery(ctx, NewStateQueryAccessInst(clientCtx)), fromAddress: clientCtx.FromAddress.String(), lastChainID: ""} return csq } @@ -114,7 +151,7 @@ func (csq *ConsumerStateQuery) GetEffectivePolicy(ctx context.Context, consumerA } } - resp, err := csq.PairingQueryClient.EffectivePolicy(ctx, &pairingtypes.QueryEffectivePolicyRequest{ + resp, err := csq.pairingQueryClient.EffectivePolicy(ctx, &pairingtypes.QueryEffectivePolicyRequest{ Consumer: consumerAddress, SpecID: specID, }) @@ -141,9 +178,9 @@ func (csq *ConsumerStateQuery) GetPairing(ctx context.Context, chainID string, l } } - pairingResp, err := csq.PairingQueryClient.GetPairing(ctx, &pairingtypes.QueryGetPairingRequest{ + pairingResp, err := csq.pairingQueryClient.GetPairing(ctx, &pairingtypes.QueryGetPairingRequest{ ChainID: chainID, - Client: csq.clientCtx.FromAddress.String(), + Client: csq.fromAddress, }) if err != nil { return nil, 0, 0, utils.LavaFormatError("Failed in get pairing query", err, utils.Attribute{}) @@ -154,7 +191,7 @@ func (csq *ConsumerStateQuery) GetPairing(ctx context.Context, chainID string, l utils.LavaFormatWarning("Chain returned empty provider list, check node connection and consumer subscription status, or no providers provide this chain", nil, utils.LogAttr("chainId", chainID), utils.LogAttr("epoch", pairingResp.CurrentEpoch), - utils.LogAttr("consumer_address", csq.clientCtx.FromAddress.String()), + utils.LogAttr("consumer_address", csq.fromAddress), ) } return pairingResp.Providers, pairingResp.CurrentEpoch, pairingResp.BlockOfNextPairing, nil @@ -175,8 +212,8 @@ func (csq *ConsumerStateQuery) GetMaxCUForUser(ctx context.Context, chainID stri } if userEntryRes == nil { - address := csq.clientCtx.FromAddress.String() - userEntryRes, err = csq.PairingQueryClient.UserEntry(ctx, &pairingtypes.QueryUserEntryRequest{ChainID: chainID, Address: address, Block: epoch}) + address := csq.fromAddress + userEntryRes, err = csq.pairingQueryClient.UserEntry(ctx, &pairingtypes.QueryUserEntryRequest{ChainID: chainID, Address: address, Block: epoch}) if err != nil { return 0, utils.LavaFormatError("failed querying StakeEntry for consumer", err, utils.Attribute{Key: "chainID", Value: chainID}, utils.Attribute{Key: "address", Value: address}, utils.Attribute{Key: "block", Value: epoch}) } @@ -196,7 +233,7 @@ type EpochStateQuery struct { } func (esq *EpochStateQuery) CurrentEpochStart(ctx context.Context) (uint64, error) { - epochDetails, err := esq.EpochStorageQueryClient.EpochDetails(ctx, &epochstoragetypes.QueryGetEpochDetailsRequest{}) + epochDetails, err := esq.epochStorageQueryClient.EpochDetails(ctx, &epochstoragetypes.QueryGetEpochDetailsRequest{}) if err != nil { return 0, utils.LavaFormatError("Failed Querying EpochDetails", err) } @@ -209,15 +246,14 @@ func NewEpochStateQuery(stateQuery *StateQuery) *EpochStateQuery { } type ProviderStateQuery struct { - StateQuery + *StateQuery EpochStateQuery - clientCtx client.Context } -func NewProviderStateQuery(ctx context.Context, clientCtx client.Context) *ProviderStateQuery { - sq := NewStateQuery(ctx, clientCtx) +func NewProviderStateQuery(ctx context.Context, stateQueryAccess StateQueryAccessInf) *ProviderStateQuery { + sq := NewStateQuery(ctx, stateQueryAccess) esq := NewEpochStateQuery(sq) - csq := &ProviderStateQuery{StateQuery: *sq, EpochStateQuery: *esq, clientCtx: clientCtx} + csq := &ProviderStateQuery{StateQuery: sq, EpochStateQuery: *esq} return csq } @@ -233,7 +269,7 @@ func (psq *ProviderStateQuery) GetMaxCuForUser(ctx context.Context, consumerAddr } } if userEntryRes == nil { - userEntryRes, err = psq.PairingQueryClient.UserEntry(ctx, &pairingtypes.QueryUserEntryRequest{ChainID: chainID, Address: consumerAddress, Block: epoch}) + userEntryRes, err = psq.pairingQueryClient.UserEntry(ctx, &pairingtypes.QueryUserEntryRequest{ChainID: chainID, Address: consumerAddress, Block: epoch}) if err != nil { return 0, utils.LavaFormatError("StakeEntry querying for consumer failed", err, utils.Attribute{Key: "chainID", Value: chainID}, utils.Attribute{Key: "address", Value: consumerAddress}, utils.Attribute{Key: "block", Value: epoch}) } @@ -248,10 +284,7 @@ func (psq *ProviderStateQuery) entryKey(consumerAddress, chainID string, epoch u } func (psq *ProviderStateQuery) VoteEvents(ctx context.Context, latestBlock int64) (votes []*reliabilitymanager.VoteParams, err error) { - brp, err := TryIntoTendermintRPC(psq.clientCtx.Client) - if err != nil { - return nil, utils.LavaFormatError("failed to get block result provider", err) - } + brp := psq.StateQuery.tendermintRPC blockResults, err := brp.BlockResults(ctx, &latestBlock) if err != nil { return nil, err @@ -311,7 +344,7 @@ func (psq *ProviderStateQuery) VerifyPairing(ctx context.Context, consumerAddres } } if verifyResponse == nil { - verifyResponse, err = psq.PairingQueryClient.VerifyPairing(context.Background(), &pairingtypes.QueryVerifyPairingRequest{ + verifyResponse, err = psq.pairingQueryClient.VerifyPairing(context.Background(), &pairingtypes.QueryVerifyPairingRequest{ ChainID: chainID, Client: consumerAddress, Provider: providerAddress, @@ -334,7 +367,7 @@ func (psq *ProviderStateQuery) VerifyPairing(ctx context.Context, consumerAddres } func (psq *ProviderStateQuery) GetEpochSize(ctx context.Context) (uint64, error) { - res, err := psq.EpochStorageQueryClient.Params(ctx, &epochstoragetypes.QueryParamsRequest{}) + res, err := psq.epochStorageQueryClient.Params(ctx, &epochstoragetypes.QueryParamsRequest{}) if err != nil { return 0, err } @@ -342,7 +375,7 @@ func (psq *ProviderStateQuery) GetEpochSize(ctx context.Context) (uint64, error) } func (psq *ProviderStateQuery) EarliestBlockInMemory(ctx context.Context) (uint64, error) { - res, err := psq.EpochStorageQueryClient.EpochDetails(ctx, &epochstoragetypes.QueryGetEpochDetailsRequest{}) + res, err := psq.epochStorageQueryClient.EpochDetails(ctx, &epochstoragetypes.QueryGetEpochDetailsRequest{}) if err != nil { return 0, err } @@ -350,7 +383,7 @@ func (psq *ProviderStateQuery) EarliestBlockInMemory(ctx context.Context) (uint6 } func (psq *ProviderStateQuery) GetRecommendedEpochNumToCollectPayment(ctx context.Context) (uint64, error) { - res, err := psq.PairingQueryClient.Params(ctx, &pairingtypes.QueryParamsRequest{}) + res, err := psq.pairingQueryClient.Params(ctx, &pairingtypes.QueryParamsRequest{}) if err != nil { return 0, err } diff --git a/scripts/init_chain_commands.sh b/scripts/init_chain_commands.sh index e9ccefeeb5..75faa7345d 100755 --- a/scripts/init_chain_commands.sh +++ b/scripts/init_chain_commands.sh @@ -52,6 +52,7 @@ PROVIDERSTAKE="500000000000ulava" PROVIDER1_LISTENER="127.0.0.1:2221" PROVIDER2_LISTENER="127.0.0.1:2222" PROVIDER3_LISTENER="127.0.0.1:2223" +# PROVIDER4_LISTENER="127.0.0.1:2224" sleep 4 @@ -64,10 +65,10 @@ wait_count_blocks 2 echo; echo "#### Voting on plans del proposal ####" lavad tx gov vote $(latest_vote) yes -y --from alice --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE -echo; echo "#### Sending proposal for plans del ####" +echo; echo "#### Buy DefaultPlan subscription for user1 ####" lavad tx subscription buy DefaultPlan $(lavad keys show user1 -a) --enable-auto-renewal -y --from user1 --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE # wait_count_blocks 2 -# lavad tx project set-policy $(lavad keys show user1 -a)-admin ./cookbook/projects/policy_all_chains_with_addon.yml -y --from user1 --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE +# lavad tx project set-policy $(lavad keys show user1 -a)-admin ./cookbook/projects/policy_all_chains_with_extension.yml -y --from user1 --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE # MANTLE CHAINS="ETH1,SEP1,HOL1,OSMOSIS,FTM250,CELO,LAV1,OSMOSIST,ALFAJORES,ARB1,ARBN,APT1,STRK,JUN1,COSMOSHUB,POLYGON1,EVMOS,OPTM,BASES,CANTO,SUIT,SOLANA,BSC,AXELAR,AVAX,FVM,NEAR,SQDSUBGRAPH,AGR,AGRT,KOIIT,AVAXT,CELESTIATM" @@ -82,6 +83,9 @@ lavad tx pairing bulk-stake-provider $BASE_CHAINS $PROVIDERSTAKE "$PROVIDER2_LIS echo; echo "#### Staking provider 3 ####" lavad tx pairing bulk-stake-provider $BASE_CHAINS $PROVIDERSTAKE "$PROVIDER3_LISTENER,1" 1 $(operator_address) -y --delegate-commission 50 --from servicer3 --provider-moniker "servicer3" --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE +# echo; echo "#### Staking provider 4 ####" +# lavad tx pairing bulk-stake-provider $BASE_CHAINS $PROVIDERSTAKE "$PROVIDER4_LISTENER,1" 1 $(operator_address) -y --delegate-commission 50 --from servicer4 --provider-moniker "servicer4" --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE + echo; echo "#### Waiting 1 block ####" wait_count_blocks 1 diff --git a/scripts/pre_setups/init_celestia_only_with_node.sh b/scripts/pre_setups/init_celestia_only_with_node.sh new file mode 100755 index 0000000000..3bea92f70a --- /dev/null +++ b/scripts/pre_setups/init_celestia_only_with_node.sh @@ -0,0 +1,61 @@ +#!/bin/bash +__dir=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd ) +source "$__dir"/../useful_commands.sh +. "${__dir}"/../vars/variables.sh + +LOGS_DIR=${__dir}/../../testutil/debugging/logs +mkdir -p $LOGS_DIR +rm $LOGS_DIR/*.log + +killall screen +screen -wipe + +echo "[Test Setup] installing all binaries" +make install-all + +echo "[Test Setup] setting up a new lava node" +screen -d -m -S node bash -c "./scripts/start_env_dev.sh" +screen -ls +echo "[Test Setup] sleeping 20 seconds for node to finish setup (if its not enough increase timeout)" +sleep 5 +wait_for_lava_node_to_start + +GASPRICE="0.00002ulava" +lavad tx gov submit-legacy-proposal spec-add ./cookbook/specs/ibc.json,./cookbook/specs/cosmoswasm.json,./cookbook/specs/tendermint.json,./cookbook/specs/cosmossdk.json,./cookbook/specs/cosmossdk_45.json,./cookbook/specs/cosmossdk_full.json,./cookbook/specs/ethermint.json,./cookbook/specs/ethereum.json,./cookbook/specs/cosmoshub.json,./cookbook/specs/lava.json,./cookbook/specs/osmosis.json,./cookbook/specs/fantom.json,./cookbook/specs/celo.json,./cookbook/specs/optimism.json,./cookbook/specs/arbitrum.json,./cookbook/specs/starknet.json,./cookbook/specs/aptos.json,./cookbook/specs/juno.json,./cookbook/specs/polygon.json,./cookbook/specs/evmos.json,./cookbook/specs/base.json,./cookbook/specs/canto.json,./cookbook/specs/sui.json,./cookbook/specs/solana.json,./cookbook/specs/bsc.json,./cookbook/specs/axelar.json,./cookbook/specs/avalanche.json,./cookbook/specs/fvm.json,./cookbook/specs/celestia.json --lava-dev-test -y --from alice --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE & +wait_next_block +wait_next_block +lavad tx gov vote 1 yes -y --from alice --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE +sleep 4 + +# Plans proposal +lavad tx gov submit-legacy-proposal plans-add ./cookbook/plans/test_plans/default.json,./cookbook/plans/test_plans/temporary-add.json -y --from alice --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE +wait_next_block +wait_next_block +lavad tx gov vote 2 yes -y --from alice --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE + +sleep 4 + +CLIENTSTAKE="500000000000ulava" +PROVIDERSTAKE="500000000000ulava" + +PROVIDER1_LISTENER="127.0.0.1:2220" + +lavad tx subscription buy DefaultPlan $(lavad keys show user1 -a) -y --from user1 --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE +wait_next_block +lavad tx pairing stake-provider "AXELAR" $PROVIDERSTAKE "$PROVIDER1_LISTENER,1" 1 $(operator_address) -y --from servicer1 --provider-moniker "dummyMoniker" --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE + +sleep_until_next_epoch + +screen -d -m -S provider1 bash -c "source ~/.bashrc; lavap rpcprovider \ +$PROVIDER1_LISTENER CELESTIATM rest '$CELESTIA_REST' \ +$PROVIDER1_LISTENER CELESTIATM tendermintrpc '$CELESTIA_RPC,$CELESTIA_RPC' \ +$PROVIDER1_LISTENER CELESTIATM grpc '$CELESTIA_GRPC' \ +$PROVIDER1_LISTENER CELESTIATM jsonrpc '$CELESTIA_JSONRPC' \ +$EXTRA_PROVIDER_FLAGS --geolocation 1 --log_level debug --from servicer1 --chain-id lava --metrics-listen-address ":7776" 2>&1 | tee $LOGS_DIR/PROVIDER1.log" && sleep 0.25 + +screen -d -m -S consumers bash -c "source ~/.bashrc; lavap rpcconsumer \ +127.0.0.1:3360 CELESTIATM rest 127.0.0.1:3361 CELESTIATM tendermintrpc 127.0.0.1:3362 CELESTIATM grpc \ +$EXTRA_PORTAL_FLAGS --geolocation 1 --log_level debug --from user1 --chain-id lava --allow-insecure-provider-dialing --metrics-listen-address ":7779" 2>&1 | tee $LOGS_DIR/CONSUMERS.log" && sleep 0.25 + +echo "--- setting up screens done ---" +screen -ls \ No newline at end of file diff --git a/scripts/pre_setups/init_eth_archive_mix.sh b/scripts/pre_setups/init_eth_archive_mix.sh index 2d9291a578..134dfcb21e 100755 --- a/scripts/pre_setups/init_eth_archive_mix.sh +++ b/scripts/pre_setups/init_eth_archive_mix.sh @@ -67,7 +67,7 @@ screen -d -m -S provider$i bash -c "source ~/.bashrc; lavap rpcprovider \ $EXTRA_PROVIDER_FLAGS --geolocation 1 --log_level debug --from servicer$i --chain-id lava 2>&1 | tee $LOGS_DIR/PROVIDER$i.log" && sleep 0.25 screen -d -m -S portals bash -c "source ~/.bashrc; lavap rpcconsumer consumer_examples/ethereum_example.yml\ -$EXTRA_PORTAL_FLAGS --cache-be "127.0.0.1:7778" --geolocation 1 --debug-relays --log_level debug --from user1 --chain-id lava --allow-insecure-provider-dialing 2>&1 | tee $LOGS_DIR/PORTAL.log" && sleep 0.25 +$EXTRA_PORTAL_FLAGS --cache-be "127.0.0.1:7778" --geolocation 1 --debug-relays --log_level debug --from user1 --chain-id lava --allow-insecure-provider-dialing 2>&1 | tee $LOGS_DIR/CONSUMER.log" && sleep 0.25 echo "--- setting up screens done ---" screen -ls diff --git a/scripts/pre_setups/init_lava_only_with_node.sh b/scripts/pre_setups/init_lava_only_with_node.sh index d99ddc4094..61f814b263 100755 --- a/scripts/pre_setups/init_lava_only_with_node.sh +++ b/scripts/pre_setups/init_lava_only_with_node.sh @@ -57,7 +57,7 @@ wait_next_block screen -d -m -S consumers bash -c "source ~/.bashrc; lavap rpcconsumer \ 127.0.0.1:3360 LAV1 rest 127.0.0.1:3361 LAV1 tendermintrpc 127.0.0.1:3362 LAV1 grpc \ -$EXTRA_PORTAL_FLAGS --geolocation 1 --log_level trace --from user1 --chain-id lava --add-api-method-metrics --allow-insecure-provider-dialing --metrics-listen-address ":7779" 2>&1 | tee $LOGS_DIR/CONSUMERS.log" && sleep 0.25 +$EXTRA_PORTAL_FLAGS --geolocation 1 --optimizer-qos-listen --log_level trace --from user1 --chain-id lava --add-api-method-metrics --limit-parallel-websocket-connections-per-ip 1 --allow-insecure-provider-dialing --metrics-listen-address ":7779" 2>&1 | tee $LOGS_DIR/CONSUMERS.log" && sleep 0.25 echo "--- setting up screens done ---" screen -ls \ No newline at end of file diff --git a/scripts/setup_providers.sh b/scripts/setup_providers.sh index a3b5fa9b0d..127640da41 100755 --- a/scripts/setup_providers.sh +++ b/scripts/setup_providers.sh @@ -100,9 +100,9 @@ $EXTRA_PROVIDER_FLAGS --geolocation "$GEOLOCATION" --log_level debug --from serv # $PROVIDER3_LISTENER MANTLE jsonrpc '$MANTLE_JRPC' \ echo; echo "#### Starting consumer ####" -# Setup Portal +# Setup Consumer screen -d -m -S portals bash -c "source ~/.bashrc; lavap rpcconsumer consumer_examples/full_consumer_example.yml\ -$EXTRA_PORTAL_FLAGS --cache-be "127.0.0.1:7778" --geolocation "$GEOLOCATION" --debug-relays --log_level debug --from user1 --chain-id lava --allow-insecure-provider-dialing --strategy distributed 2>&1 | tee $LOGS_DIR/PORTAL.log" && sleep 0.25 +$EXTRA_PORTAL_FLAGS --cache-be "127.0.0.1:7778" --geolocation "$GEOLOCATION" --debug-relays --log_level debug --from user1 --chain-id lava --allow-insecure-provider-dialing --strategy distributed 2>&1 | tee $LOGS_DIR/CONSUMER.log" && sleep 0.25 # 127.0.0.1:3385 MANTLE jsonrpc \ echo "--- setting up screens done ---" diff --git a/scripts/test/httpServer.py b/scripts/test/httpServer.py index 94358ac9d6..ddbe4e77a4 100644 --- a/scripts/test/httpServer.py +++ b/scripts/test/httpServer.py @@ -1,6 +1,8 @@ from http.server import BaseHTTPRequestHandler, HTTPServer import sys +payload_ret = "OK" + class RequestHandler(BaseHTTPRequestHandler): def do_GET(self): self.print_request() @@ -26,10 +28,11 @@ def print_request(self): print(f"Body:\n{body.decode('utf-8')}") # Send a response back to the client + response = payload_ret.encode('utf-8') self.send_response(200) - self.send_header("Content-type", "text/html") + self.send_header("Content-type", "application/json") self.end_headers() - self.wfile.write(b"OK") + self.wfile.write(response) def run_server(port=8000): server_address = ('', port) @@ -40,6 +43,8 @@ def run_server(port=8000): if __name__ == '__main__': if len(sys.argv) > 1: port = int(sys.argv[1]) + if len(sys.argv) > 2: + payload_ret = sys.argv[2] run_server(port) else: run_server() \ No newline at end of file diff --git a/scripts/test/jail_provider_test.sh b/scripts/test/jail_provider_test.sh index 878754b542..f190e70376 100755 --- a/scripts/test/jail_provider_test.sh +++ b/scripts/test/jail_provider_test.sh @@ -74,11 +74,11 @@ $PROVIDER4_LISTENER LAV1 tendermintrpc '$LAVA_RPC,$LAVA_RPC' \ $PROVIDER4_LISTENER LAV1 grpc '$LAVA_GRPC' \ $EXTRA_PROVIDER_FLAGS --chain-id=lava --metrics-listen-address ":7780" --geolocation 1 --log_level debug --from servicer4 2>&1 | tee $LOGS_DIR/PROVIDER4.log" -# Setup Portal +# Setup Consumer screen -d -m -S portals bash -c "source ~/.bashrc; lava-protocol rpcconsumer \ 127.0.0.1:3333 ETH1 jsonrpc \ 127.0.0.1:3360 LAV1 rest 127.0.0.1:3361 LAV1 tendermintrpc 127.0.0.1:3362 LAV1 grpc \ -$EXTRA_PORTAL_FLAGS --metrics-listen-address ":7779" --geolocation 1 --log_level debug --from user1 --chain-id lava --allow-insecure-provider-dialing 2>&1 | tee $LOGS_DIR/PORTAL.log" +$EXTRA_PORTAL_FLAGS --metrics-listen-address ":7779" --geolocation 1 --log_level debug --from user1 --chain-id lava --allow-insecure-provider-dialing 2>&1 | tee $LOGS_DIR/CONSUMER.log" # need to wait 8 epochs for the provider to be jail eligible diff --git a/scripts/test/vote_test.sh b/scripts/test/vote_test.sh new file mode 100755 index 0000000000..79f5a22377 --- /dev/null +++ b/scripts/test/vote_test.sh @@ -0,0 +1,31 @@ +#!/bin/bash +__dir=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd ) +source $__dir/../useful_commands.sh +. ${__dir}/vars/variables.sh +# Making sure old screens are not running +echo "current vote number $(latest_vote)" +killall screen +screen -wipe +GASPRICE="0.00002ulava" + +delegate_amount=1000000000000ulava +delegate_amount_big=49000000000000ulava +operator=$(lavad q staking validators --output json | jq -r ".validators[0].operator_address") +echo "operator: $operator" +lavad tx staking delegate $operator $delegate_amount --from bob --chain-id lava --gas-prices $GASPRICE --gas-adjustment 1.5 --gas auto -y +lavad tx staking delegate $operator $delegate_amount --from user1 --chain-id lava --gas-prices $GASPRICE --gas-adjustment 1.5 --gas auto -y +lavad tx staking delegate $operator $delegate_amount --from user2 --chain-id lava --gas-prices $GASPRICE --gas-adjustment 1.5 --gas auto -y +lavad tx staking delegate $operator $delegate_amount_big --from user3 --chain-id lava --gas-prices $GASPRICE --gas-adjustment 1.5 --gas auto -y +lavad tx staking delegate $operator $delegate_amount_big --from user4 --chain-id lava --gas-prices $GASPRICE --gas-adjustment 1.5 --gas auto -y +wait_count_blocks 1 +lavad tx gov submit-legacy-proposal plans-add ./cookbook/plans/test_plans/default.json -y --from alice --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE +echo; echo "#### Waiting 2 blocks ####" +wait_count_blocks 2 +# voting abstain with 50% of the voting power, yes with 2% of the voting power no with 1% of the voting power +lavad tx gov vote $(latest_vote) abstain -y --from user3 --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE +lavad tx gov vote $(latest_vote) yes -y --from user2 --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE +lavad tx gov vote $(latest_vote) yes -y --from user1 --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE +lavad tx gov vote $(latest_vote) no -y --from bob --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE + +echo "latest vote: $(latest_vote)" +lavad q gov proposal $(latest_vote) \ No newline at end of file diff --git a/scripts/test_spec_full.sh b/scripts/test_spec_full.sh index 9971f9959c..cd504f47ec 100755 --- a/scripts/test_spec_full.sh +++ b/scripts/test_spec_full.sh @@ -206,7 +206,7 @@ done echo "[+]generated consumer config: $output_consumer_yaml" cat $output_consumer_yaml if [ "$dry" = false ]; then - screen -d -m -S consumers bash -c "source ~/.bashrc; lavap rpcconsumer testutil/debugging/logs/consumer.yml $EXTRA_PORTAL_FLAGS --geolocation 1 --debug-relays --log_level debug --from user1 --chain-id lava --allow-insecure-provider-dialing --metrics-listen-address ":7779" 2>&1 | tee $LOGS_DIR/PORTAL.log" + screen -d -m -S consumers bash -c "source ~/.bashrc; lavap rpcconsumer testutil/debugging/logs/consumer.yml $EXTRA_PORTAL_FLAGS --geolocation 1 --debug-relays --log_level debug --from user1 --chain-id lava --allow-insecure-provider-dialing --metrics-listen-address ":7779" 2>&1 | tee $LOGS_DIR/CONSUMER.log" echo "[+] letting providers start and running health check then running command with flags: $test_consumer_command_args" sleep 10 diff --git a/testutil/e2e/allowedErrorList.go b/testutil/e2e/allowedErrorList.go index 192e3eac1b..11dc03bfbf 100644 --- a/testutil/e2e/allowedErrorList.go +++ b/testutil/e2e/allowedErrorList.go @@ -17,6 +17,7 @@ var allowedErrorsDuringEmergencyMode = map[string]string{ "Connection refused": "Connection to tendermint port sometimes can happen as we shut down the node and we try to fetch info during emergency mode", "connection reset by peer": "Connection to tendermint port sometimes can happen as we shut down the node and we try to fetch info during emergency mode", "Failed Querying EpochDetails": "Connection to tendermint port sometimes can happen as we shut down the node and we try to fetch info during emergency mode", + "http://[IP_ADDRESS]:26657": "This error is allowed because it can happen when EOF error happens when we shut down the node in emergency mode", } var allowedErrorsPaymentE2E = map[string]string{ diff --git a/testutil/e2e/proxy/proxy.go b/testutil/e2e/proxy/proxy.go index 9eb9dde326..e5c73e3826 100644 --- a/testutil/e2e/proxy/proxy.go +++ b/testutil/e2e/proxy/proxy.go @@ -195,9 +195,46 @@ func startProxyProcess(process proxyProcess) { break } // Print the message to the console - log.Printf("Received: %s\n", msg) - // Write message back to browser - if err = conn.WriteMessage(msgType, msg); err != nil { + log.Printf("WS Received: %s\n", msg) + + var respmsg rpcclient.JsonrpcMessage + err = json.Unmarshal(msg, &respmsg) + if err != nil { + println(err.Error()) + continue + } + + replyMessage, err := rpcInterfaceMessages.ConvertJsonRPCMsg(&respmsg) + if err != nil { + println(err.Error()) + continue + } + + jStruct := &jsonStruct{} + err = json.Unmarshal(msg, jStruct) + if err != nil { + println(err.Error()) + continue + } + jStruct.ID = 0 + rawBodySNoID, _ := json.Marshal(jStruct) + + if val, ok := process.mock.requests[string(rawBodySNoID)]; ok && process.cache { + orderedJSON := idInsertedResponse(val, replyMessage) + println(dotsStr+process.port+dotsStr+process.id+" ::: Cached Response ::: ", orderedJSON) + cacheCount += 1 + + // Change Response + if fakeResponse { + val = fakeResult(val, "0xe000000000000000000") + // val = "{\"jsonrpc\":\"2.0\",\"id\":1,\"result\":\"0xe000000000000000000\"}" + println(process.port+" ::: Fake Response ::: ", val) + fakeCount += 1 + } + time.Sleep(500 * time.Millisecond) + conn.WriteMessage(msgType, []byte(orderedJSON)) + } else if err = conn.WriteMessage(msgType, msg); err != nil { + // Write message back to browser log.Println("Write error:", err) break } @@ -250,7 +287,7 @@ func fakeResult(val, fake string) string { return strings.Join(parts, ",") } -func idInstertedResponse(val string, replyMessage *rpcInterfaceMessages.JsonrpcMessage) string { +func idInsertedResponse(val string, replyMessage *rpcInterfaceMessages.JsonrpcMessage) string { // Extract ID from raw message respId, idErr := rpcInterfaceMessages.IdFromRawMessage(replyMessage.ID) if idErr != nil { @@ -320,7 +357,7 @@ func (p proxyProcess) LavaTestProxy(responseWriter http.ResponseWriter, request jStruct.ID = 0 rawBodySNoID, _ := json.Marshal(jStruct) if val, ok := mock.requests[string(rawBodySNoID)]; ok && p.cache { - orderedJSON := idInstertedResponse(val, replyMessage) + orderedJSON := idInsertedResponse(val, replyMessage) println(dotsStr+p.port+dotsStr+p.id+" ::: Cached Response ::: ", orderedJSON) cacheCount += 1 diff --git a/utils/lavalog.go b/utils/lavalog.go index 5e7908d185..aae7e82827 100644 --- a/utils/lavalog.go +++ b/utils/lavalog.go @@ -30,6 +30,7 @@ const ( LAVA_LOG_ERROR LAVA_LOG_FATAL LAVA_LOG_PANIC + LAVA_LOG_PRODUCTION NoColor = true ) @@ -226,6 +227,18 @@ func LavaFormatLog(description string, err error, attributes []Attribute, severi zerologlog.Logger = zerologlog.Output(zerolog.ConsoleWriter{Out: os.Stderr, NoColor: NoColor, TimeFormat: time.Stamp}).Level(defaultGlobalLogLevel) } + // depending on the build flag, this log function will log either a warning or an error. + // the purpose of this function is to fail E2E tests and not allow unexpected behavior to reach main. + // while in production some errors may occur as consumers / providers might set up their processes in the wrong way. + // in test environment we don't expect to have these errors and if they occur we would like to fail the test. + if severity == LAVA_LOG_PRODUCTION { + if ExtendedLogLevel == "production" { + severity = LAVA_LOG_WARN + } else { + severity = LAVA_LOG_ERROR + } + } + var logEvent *zerolog.Event var rollingLoggerEvent *zerolog.Event switch severity { @@ -301,16 +314,9 @@ func LavaFormatFatal(description string, err error, attributes ...Attribute) { LavaFormatLog(description, err, attributes, LAVA_LOG_FATAL) } -// depending on the build flag, this log function will log either a warning or an error. -// the purpose of this function is to fail E2E tests and not allow unexpected behavior to reach main. -// while in production some errors may occur as consumers / providers might set up their processes in the wrong way. -// in test environment we dont expect to have these errors and if they occur we would like to fail the test. +// see documentation in LavaFormatLog function func LavaFormatProduction(description string, err error, attributes ...Attribute) error { - if ExtendedLogLevel == "production" { - return LavaFormatWarning(description, err, attributes...) - } else { - return LavaFormatError(description, err, attributes...) - } + return LavaFormatLog(description, err, attributes, LAVA_LOG_PRODUCTION) } func LavaFormatError(description string, err error, attributes ...Attribute) error { diff --git a/utils/lavaslices/slices.go b/utils/lavaslices/slices.go index abad07d4f1..4eb0c64b8e 100644 --- a/utils/lavaslices/slices.go +++ b/utils/lavaslices/slices.go @@ -131,6 +131,15 @@ func Contains[T comparable](slice []T, elem T) bool { return false } +func ContainsPredicate[T comparable](slice []T, predicate func(elem T) bool) bool { + for _, e := range slice { + if predicate(e) { + return true + } + } + return false +} + // Remove removes the first instance (if exists) of elem from the slice, and // returns the new slice and indication if removal took place. func Remove[T comparable](slice []T, elem T) ([]T, bool) { @@ -237,6 +246,28 @@ func UnionByFunc[T ComparableByFunc](arrays ...[]T) []T { return res } +func Difference[T comparable](slice1, slice2 []T) []T { + // This function returns the difference between two slices + // (i.e., the elements that are in slice1 but not in slice2) + + // Create a map to store elements of the second slice for quick lookup + elementMap := make(map[T]bool) + for _, elem := range slice2 { + elementMap[elem] = true + } + + // Create a slice to hold the difference + diff := make([]T, 0) + for _, elem := range slice1 { + // If the element in slice1 is not in slice2, add it to the result + if !elementMap[elem] { + diff = append(diff, elem) + } + } + + return diff +} + func Map[T, V any](slice []T, filter func(T) V) []V { values := make([]V, len(slice)) for i := range slice { diff --git a/utils/lavaslices/slices_test.go b/utils/lavaslices/slices_test.go index 4a5880a8a2..8ae4f1bb39 100644 --- a/utils/lavaslices/slices_test.go +++ b/utils/lavaslices/slices_test.go @@ -2,6 +2,7 @@ package lavaslices import ( "math" + "reflect" "testing" "time" @@ -510,3 +511,56 @@ func TestSliceSplitter(t *testing.T) { } } } + +func TestDifference(t *testing.T) { + tests := []struct { + name string + slice1 []int + slice2 []int + expected []int + }{ + { + name: "Basic difference", + slice1: []int{1, 2, 3, 4}, + slice2: []int{3, 4, 5, 6}, + expected: []int{1, 2}, + }, + { + name: "No difference", + slice1: []int{1, 2, 3}, + slice2: []int{1, 2, 3}, + expected: []int{}, + }, + { + name: "All elements different", + slice1: []int{1, 2, 3}, + slice2: []int{4, 5, 6}, + expected: []int{1, 2, 3}, + }, + { + name: "Empty first slice", + slice1: []int{}, + slice2: []int{1, 2, 3}, + expected: []int{}, + }, + { + name: "Empty second slice", + slice1: []int{1, 2, 3}, + slice2: []int{}, + expected: []int{1, 2, 3}, + }, + { + name: "Mixed elements", + slice1: []int{1, 2, 2, 3, 4}, + slice2: []int{2, 4}, + expected: []int{1, 3}, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := Difference(tt.slice1, tt.slice2) + require.True(t, reflect.DeepEqual(result, tt.expected)) + }) + } +} diff --git a/utils/maps/maps.go b/utils/maps/maps.go index e6702b46fb..5486c70672 100644 --- a/utils/maps/maps.go +++ b/utils/maps/maps.go @@ -37,3 +37,19 @@ func GetMaxKey[T constraints.Ordered, V any](m map[T]V) T { } return maxKey } + +func KeysSlice[T comparable, V any](in map[T]V) []T { + keys := []T{} + for k := range in { + keys = append(keys, k) + } + return keys +} + +func ValuesSlice[T comparable, V any](in map[T]V) []V { + values := []V{} + for _, v := range in { + values = append(values, v) + } + return values +} diff --git a/utils/sigs/sigs.go b/utils/sigs/sigs.go index a50052502f..a21232a396 100644 --- a/utils/sigs/sigs.go +++ b/utils/sigs/sigs.go @@ -112,21 +112,6 @@ func EncodeUint64(val uint64) []byte { return encodedVal } -// Join() is faster than bytes.Join because it does what -// bytes.Join() does without appending (empty) separators -func Join(s [][]byte) []byte { - n := 0 - for _, v := range s { - n += len(v) - } - - b, i := make([]byte, n), 0 - for _, v := range s { - i += copy(b[i:], v) - } - return b -} - func GetKeyName(clientCtx client.Context) (string, error) { _, name, _, err := client.GetFromFields(clientCtx, clientCtx.Keyring, clientCtx.From) if err != nil { diff --git a/x/conflict/module_simulation.go b/x/conflict/module_simulation.go deleted file mode 100644 index 43fea0d140..0000000000 --- a/x/conflict/module_simulation.go +++ /dev/null @@ -1,118 +0,0 @@ -package conflict - -import ( - "math/rand" - - "github.com/cosmos/cosmos-sdk/testutil/sims" - types2 "github.com/cosmos/cosmos-sdk/x/auth/types" - - "github.com/cosmos/cosmos-sdk/baseapp" - sdk "github.com/cosmos/cosmos-sdk/types" - "github.com/cosmos/cosmos-sdk/types/module" - simtypes "github.com/cosmos/cosmos-sdk/types/simulation" - "github.com/cosmos/cosmos-sdk/x/simulation" - "github.com/lavanet/lava/v4/testutil/sample" - conflictsimulation "github.com/lavanet/lava/v4/x/conflict/simulation" - "github.com/lavanet/lava/v4/x/conflict/types" -) - -// avoid unused import issue -var ( - _ = sample.AccAddress - _ = conflictsimulation.FindAccount - _ = sims.StakePerAccount - _ = simulation.MsgEntryKind - _ = baseapp.Paramspace -) - -const ( - opWeightMsgDetection = "op_weight_msg_create_chain" - // TODO: Determine the simulation weight value - defaultWeightMsgDetection int = 100 - - opWeightMsgConflictVoteCommit = "op_weight_msg_conflict_vote_commit" - // TODO: Determine the simulation weight value - defaultWeightMsgConflictVoteCommit int = 100 - - opWeightMsgConflictVoteReveal = "op_weight_msg_conflict_vote_reveal" - // TODO: Determine the simulation weight value - defaultWeightMsgConflictVoteReveal int = 100 - - // this line is used by starport scaffolding # simapp/module/const -) - -// GenerateGenesisState creates a randomized GenState of the module -func (AppModule) GenerateGenesisState(simState *module.SimulationState) { - accs := make([]string, len(simState.Accounts)) - for i, acc := range simState.Accounts { - accs[i] = acc.Address.String() - } - conflictGenesis := types.GenesisState{ - // this line is used by starport scaffolding # simapp/module/genesisState - } - simState.GenState[types.ModuleName] = simState.Cdc.MustMarshalJSON(&conflictGenesis) -} - -// TODO: Add weighted proposals -func (AppModule) ProposalMsgs(_ module.SimulationState) []simtypes.WeightedProposalMsg { - return []simtypes.WeightedProposalMsg{ - simulation.NewWeightedProposalMsg("op_weight_msg_update_params", 100, func(r *rand.Rand, ctx sdk.Context, accs []simtypes.Account) sdk.Msg { - return &types2.MsgUpdateParams{} - }), - } -} - -//// RandomizedParams creates randomized param changes for the simulator -// func (am AppModule) RandomizedParams(_ *rand.Rand) []simtypes.ParamChange { -// conflictParams := types.DefaultParams() -// return []simtypes.ParamChange{ -// simulation.NewSimParamChange(types.ModuleName, string(types.KeyMajorityPercent), func(r *rand.Rand) string { -// return string(types.Amino.MustMarshalJSON(conflictParams.MajorityPercent)) -// }), -// } -// } - -// RegisterStoreDecoder registers a decoder -func (am AppModule) RegisterStoreDecoder(_ sdk.StoreDecoderRegistry) {} - -// WeightedOperations returns the all the gov module operations with their respective weights. -func (am AppModule) WeightedOperations(simState module.SimulationState) []simtypes.WeightedOperation { - operations := make([]simtypes.WeightedOperation, 0) - - var weightMsgDetection int - simState.AppParams.GetOrGenerate(simState.Cdc, opWeightMsgDetection, &weightMsgDetection, nil, - func(_ *rand.Rand) { - weightMsgDetection = defaultWeightMsgDetection - }, - ) - operations = append(operations, simulation.NewWeightedOperation( - weightMsgDetection, - conflictsimulation.SimulateMsgDetection(am.accountKeeper, am.bankKeeper, am.keeper), - )) - - var weightMsgConflictVoteCommit int - simState.AppParams.GetOrGenerate(simState.Cdc, opWeightMsgConflictVoteCommit, &weightMsgConflictVoteCommit, nil, - func(_ *rand.Rand) { - weightMsgConflictVoteCommit = defaultWeightMsgConflictVoteCommit - }, - ) - operations = append(operations, simulation.NewWeightedOperation( - weightMsgConflictVoteCommit, - conflictsimulation.SimulateMsgConflictVoteCommit(am.accountKeeper, am.bankKeeper, am.keeper), - )) - - var weightMsgConflictVoteReveal int - simState.AppParams.GetOrGenerate(simState.Cdc, opWeightMsgConflictVoteReveal, &weightMsgConflictVoteReveal, nil, - func(_ *rand.Rand) { - weightMsgConflictVoteReveal = defaultWeightMsgConflictVoteReveal - }, - ) - operations = append(operations, simulation.NewWeightedOperation( - weightMsgConflictVoteReveal, - conflictsimulation.SimulateMsgConflictVoteReveal(am.accountKeeper, am.bankKeeper, am.keeper), - )) - - // this line is used by starport scaffolding # simapp/module/operation - - return operations -} diff --git a/x/conflict/simulation/conflict_vote_commit.go b/x/conflict/simulation/conflict_vote_commit.go deleted file mode 100644 index e9bb19a51f..0000000000 --- a/x/conflict/simulation/conflict_vote_commit.go +++ /dev/null @@ -1,29 +0,0 @@ -package simulation - -import ( - "math/rand" - - "github.com/cosmos/cosmos-sdk/baseapp" - sdk "github.com/cosmos/cosmos-sdk/types" - simtypes "github.com/cosmos/cosmos-sdk/types/simulation" - "github.com/lavanet/lava/v4/x/conflict/keeper" - "github.com/lavanet/lava/v4/x/conflict/types" -) - -func SimulateMsgConflictVoteCommit( - ak types.AccountKeeper, - bk types.BankKeeper, - k keeper.Keeper, -) simtypes.Operation { - return func(r *rand.Rand, app *baseapp.BaseApp, ctx sdk.Context, accs []simtypes.Account, chainID string, - ) (simtypes.OperationMsg, []simtypes.FutureOperation, error) { - simAccount, _ := simtypes.RandomAcc(r, accs) - msg := &types.MsgConflictVoteCommit{ - Creator: simAccount.Address.String(), - } - - // TODO: Handling the ConflictVoteCommit simulation - - return simtypes.NoOpMsg(types.ModuleName, msg.Type(), "ConflictVoteCommit simulation not implemented"), nil, nil - } -} diff --git a/x/conflict/simulation/conflict_vote_reveal.go b/x/conflict/simulation/conflict_vote_reveal.go deleted file mode 100644 index f77bd6ce77..0000000000 --- a/x/conflict/simulation/conflict_vote_reveal.go +++ /dev/null @@ -1,29 +0,0 @@ -package simulation - -import ( - "math/rand" - - "github.com/cosmos/cosmos-sdk/baseapp" - sdk "github.com/cosmos/cosmos-sdk/types" - simtypes "github.com/cosmos/cosmos-sdk/types/simulation" - "github.com/lavanet/lava/v4/x/conflict/keeper" - "github.com/lavanet/lava/v4/x/conflict/types" -) - -func SimulateMsgConflictVoteReveal( - ak types.AccountKeeper, - bk types.BankKeeper, - k keeper.Keeper, -) simtypes.Operation { - return func(r *rand.Rand, app *baseapp.BaseApp, ctx sdk.Context, accs []simtypes.Account, chainID string, - ) (simtypes.OperationMsg, []simtypes.FutureOperation, error) { - simAccount, _ := simtypes.RandomAcc(r, accs) - msg := &types.MsgConflictVoteReveal{ - Creator: simAccount.Address.String(), - } - - // TODO: Handling the ConflictVoteReveal simulation - - return simtypes.NoOpMsg(types.ModuleName, msg.Type(), "ConflictVoteReveal simulation not implemented"), nil, nil - } -} diff --git a/x/conflict/simulation/detection.go b/x/conflict/simulation/detection.go deleted file mode 100644 index 6444cd204c..0000000000 --- a/x/conflict/simulation/detection.go +++ /dev/null @@ -1,29 +0,0 @@ -package simulation - -import ( - "math/rand" - - "github.com/cosmos/cosmos-sdk/baseapp" - sdk "github.com/cosmos/cosmos-sdk/types" - simtypes "github.com/cosmos/cosmos-sdk/types/simulation" - "github.com/lavanet/lava/v4/x/conflict/keeper" - "github.com/lavanet/lava/v4/x/conflict/types" -) - -func SimulateMsgDetection( - ak types.AccountKeeper, - bk types.BankKeeper, - k keeper.Keeper, -) simtypes.Operation { - return func(r *rand.Rand, app *baseapp.BaseApp, ctx sdk.Context, accs []simtypes.Account, chainID string, - ) (simtypes.OperationMsg, []simtypes.FutureOperation, error) { - simAccount, _ := simtypes.RandomAcc(r, accs) - msg := &types.MsgDetection{ - Creator: simAccount.Address.String(), - } - - // TODO: Handling the Detection simulation - - return simtypes.NoOpMsg(types.ModuleName, msg.Type(), "Detection simulation not implemented"), nil, nil - } -} diff --git a/x/conflict/simulation/simap.go b/x/conflict/simulation/simap.go deleted file mode 100644 index 92c437c0d1..0000000000 --- a/x/conflict/simulation/simap.go +++ /dev/null @@ -1,15 +0,0 @@ -package simulation - -import ( - sdk "github.com/cosmos/cosmos-sdk/types" - simtypes "github.com/cosmos/cosmos-sdk/types/simulation" -) - -// FindAccount find a specific address from an account list -func FindAccount(accs []simtypes.Account, address string) (simtypes.Account, bool) { - creator, err := sdk.AccAddressFromBech32(address) - if err != nil { - panic(err) - } - return simtypes.FindAccount(accs, creator) -} diff --git a/x/conflict/types/relay_finalization.go b/x/conflict/types/relay_finalization.go index b5e9f896e4..5174807ab3 100644 --- a/x/conflict/types/relay_finalization.go +++ b/x/conflict/types/relay_finalization.go @@ -1,6 +1,7 @@ package types import ( + "bytes" "encoding/json" fmt "fmt" @@ -55,7 +56,7 @@ func (rf RelayFinalization) DataToSign() []byte { sdkAccAddress, relaySessionHash, } - return sigs.Join(msgParts) + return bytes.Join(msgParts, nil) } func (rfm RelayFinalization) HashRounds() int { diff --git a/x/downtime/v1/genesis_test.go b/x/downtime/v1/genesis_test.go index 0e3a2ca76e..066f5bb37f 100644 --- a/x/downtime/v1/genesis_test.go +++ b/x/downtime/v1/genesis_test.go @@ -58,8 +58,8 @@ func TestGenesis_Validate(t *testing.T) { } for name, tc := range tests { - tc := tc t.Run(name, func(t *testing.T) { + t.Parallel() err := tc.Genesis.Validate() if tc.ExpError == "" { require.NoError(t, err) diff --git a/x/dualstaking/migrations/v4/delegator_reward.pb.go b/x/dualstaking/migrations/v4/delegator_reward.pb.go deleted file mode 100644 index f469e4ee1d..0000000000 --- a/x/dualstaking/migrations/v4/delegator_reward.pb.go +++ /dev/null @@ -1,481 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: lavanet/lava/dualstaking/delegator_reward.proto - -package types - -import ( - fmt "fmt" - types "github.com/cosmos/cosmos-sdk/types" - _ "github.com/cosmos/gogoproto/gogoproto" - proto "github.com/cosmos/gogoproto/proto" - io "io" - math "math" - math_bits "math/bits" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -type DelegatorRewardv4 struct { - Delegator string `protobuf:"bytes,1,opt,name=delegator,proto3" json:"delegator,omitempty"` - Provider string `protobuf:"bytes,2,opt,name=provider,proto3" json:"provider,omitempty"` - ChainId string `protobuf:"bytes,3,opt,name=chain_id,json=chainId,proto3" json:"chain_id,omitempty"` - Amount types.Coin `protobuf:"bytes,4,opt,name=amount,proto3" json:"amount"` -} - -func (m *DelegatorRewardv4) Reset() { *m = DelegatorRewardv4{} } -func (m *DelegatorRewardv4) String() string { return proto.CompactTextString(m) } -func (*DelegatorRewardv4) ProtoMessage() {} -func (*DelegatorRewardv4) Descriptor() ([]byte, []int) { - return fileDescriptor_c8b6da054bf40d1f, []int{0} -} -func (m *DelegatorRewardv4) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *DelegatorRewardv4) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_DelegatorReward.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *DelegatorRewardv4) XXX_Merge(src proto.Message) { - xxx_messageInfo_DelegatorReward.Merge(m, src) -} -func (m *DelegatorRewardv4) XXX_Size() int { - return m.Size() -} -func (m *DelegatorRewardv4) XXX_DiscardUnknown() { - xxx_messageInfo_DelegatorReward.DiscardUnknown(m) -} - -var xxx_messageInfo_DelegatorReward proto.InternalMessageInfo - -func (m *DelegatorRewardv4) GetDelegator() string { - if m != nil { - return m.Delegator - } - return "" -} - -func (m *DelegatorRewardv4) GetProvider() string { - if m != nil { - return m.Provider - } - return "" -} - -func (m *DelegatorRewardv4) GetChainId() string { - if m != nil { - return m.ChainId - } - return "" -} - -func (m *DelegatorRewardv4) GetAmount() types.Coin { - if m != nil { - return m.Amount - } - return types.Coin{} -} - -func init() { - proto.RegisterType((*DelegatorRewardv4)(nil), "lavanet.lava.dualstaking.DelegatorRewardv4") -} - -func init() { - proto.RegisterFile("lavanet/lava/dualstaking/delegator_reward.proto", fileDescriptor_c8b6da054bf40d1f) -} - -var fileDescriptor_c8b6da054bf40d1f = []byte{ - // 281 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x54, 0x50, 0xb1, 0x4e, 0xf3, 0x30, - 0x18, 0x8c, 0xff, 0xbf, 0x2a, 0xad, 0x19, 0x90, 0x22, 0x86, 0x34, 0x42, 0xa6, 0x62, 0xaa, 0x84, - 0x64, 0xab, 0x30, 0xb0, 0x17, 0x18, 0x58, 0x33, 0xb2, 0x54, 0x4e, 0x6c, 0xa5, 0x16, 0x89, 0xbf, - 0xc8, 0x71, 0x02, 0xbc, 0x05, 0x6f, 0xc0, 0xeb, 0x74, 0xec, 0xc8, 0x84, 0x50, 0xf2, 0x22, 0x28, - 0x4e, 0x28, 0x74, 0xfa, 0xfc, 0xf9, 0xee, 0x74, 0xf7, 0x1d, 0x66, 0x19, 0xaf, 0xb9, 0x96, 0xd6, - 0x4d, 0x26, 0x2a, 0x9e, 0x95, 0x96, 0x3f, 0x29, 0x9d, 0x32, 0x21, 0x33, 0x99, 0x72, 0x0b, 0x66, - 0x6d, 0xe4, 0x33, 0x37, 0x82, 0x16, 0x06, 0x2c, 0xf8, 0xc1, 0x20, 0xa0, 0xdd, 0xa4, 0x7f, 0x04, - 0xe1, 0x69, 0x0a, 0x29, 0x38, 0x12, 0xeb, 0x5e, 0x3d, 0x3f, 0x24, 0x09, 0x94, 0x39, 0x94, 0x2c, - 0xe6, 0xa5, 0x64, 0xf5, 0x32, 0x96, 0x96, 0x2f, 0x59, 0x02, 0x4a, 0xf7, 0xf8, 0xc5, 0x3b, 0xc2, - 0x27, 0x77, 0x3f, 0x56, 0x91, 0x73, 0xf2, 0xcf, 0xf0, 0x74, 0xef, 0x1e, 0xa0, 0x39, 0x5a, 0x4c, - 0xa3, 0xdf, 0x0f, 0x3f, 0xc4, 0x93, 0xc2, 0x40, 0xad, 0x84, 0x34, 0xc1, 0x3f, 0x07, 0xee, 0x77, - 0x7f, 0x86, 0x27, 0xc9, 0x86, 0x2b, 0xbd, 0x56, 0x22, 0xf8, 0xef, 0xb0, 0x23, 0xb7, 0x3f, 0x08, - 0xff, 0x06, 0x8f, 0x79, 0x0e, 0x95, 0xb6, 0xc1, 0x68, 0x8e, 0x16, 0xc7, 0x57, 0x33, 0xda, 0x27, - 0xa3, 0x5d, 0x32, 0x3a, 0x24, 0xa3, 0xb7, 0xa0, 0xf4, 0x6a, 0xb4, 0xfd, 0x3c, 0xf7, 0xa2, 0x81, - 0xbe, 0xba, 0xdf, 0x36, 0x04, 0xed, 0x1a, 0x82, 0xbe, 0x1a, 0x82, 0xde, 0x5a, 0xe2, 0xed, 0x5a, - 0xe2, 0x7d, 0xb4, 0xc4, 0x7b, 0xbc, 0x4c, 0x95, 0xdd, 0x54, 0x31, 0x4d, 0x20, 0x3f, 0xec, 0xf1, - 0xe5, 0xa0, 0x49, 0xfb, 0x5a, 0xc8, 0x32, 0x1e, 0xbb, 0x7b, 0xaf, 0xbf, 0x03, 0x00, 0x00, 0xff, - 0xff, 0x2c, 0x0a, 0x50, 0x1d, 0x72, 0x01, 0x00, 0x00, -} - -func (m *DelegatorRewardv4) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *DelegatorRewardv4) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *DelegatorRewardv4) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - { - size, err := m.Amount.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintDelegatorReward(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x22 - if len(m.ChainId) > 0 { - i -= len(m.ChainId) - copy(dAtA[i:], m.ChainId) - i = encodeVarintDelegatorReward(dAtA, i, uint64(len(m.ChainId))) - i-- - dAtA[i] = 0x1a - } - if len(m.Provider) > 0 { - i -= len(m.Provider) - copy(dAtA[i:], m.Provider) - i = encodeVarintDelegatorReward(dAtA, i, uint64(len(m.Provider))) - i-- - dAtA[i] = 0x12 - } - if len(m.Delegator) > 0 { - i -= len(m.Delegator) - copy(dAtA[i:], m.Delegator) - i = encodeVarintDelegatorReward(dAtA, i, uint64(len(m.Delegator))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func encodeVarintDelegatorReward(dAtA []byte, offset int, v uint64) int { - offset -= sovDelegatorReward(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *DelegatorRewardv4) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Delegator) - if l > 0 { - n += 1 + l + sovDelegatorReward(uint64(l)) - } - l = len(m.Provider) - if l > 0 { - n += 1 + l + sovDelegatorReward(uint64(l)) - } - l = len(m.ChainId) - if l > 0 { - n += 1 + l + sovDelegatorReward(uint64(l)) - } - l = m.Amount.Size() - n += 1 + l + sovDelegatorReward(uint64(l)) - return n -} - -func sovDelegatorReward(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozDelegatorReward(x uint64) (n int) { - return sovDelegatorReward(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *DelegatorRewardv4) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowDelegatorReward - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: DelegatorReward: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: DelegatorReward: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Delegator", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowDelegatorReward - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthDelegatorReward - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthDelegatorReward - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Delegator = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Provider", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowDelegatorReward - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthDelegatorReward - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthDelegatorReward - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Provider = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ChainId", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowDelegatorReward - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthDelegatorReward - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthDelegatorReward - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ChainId = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Amount", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowDelegatorReward - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthDelegatorReward - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthDelegatorReward - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Amount.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipDelegatorReward(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthDelegatorReward - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipDelegatorReward(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowDelegatorReward - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowDelegatorReward - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowDelegatorReward - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthDelegatorReward - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupDelegatorReward - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthDelegatorReward - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthDelegatorReward = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowDelegatorReward = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupDelegatorReward = fmt.Errorf("proto: unexpected end of group") -) diff --git a/x/dualstaking/module_simulation.go b/x/dualstaking/module_simulation.go deleted file mode 100644 index beaddac229..0000000000 --- a/x/dualstaking/module_simulation.go +++ /dev/null @@ -1,124 +0,0 @@ -package dualstaking - -import ( - "math/rand" - - "github.com/cosmos/cosmos-sdk/baseapp" - sdk "github.com/cosmos/cosmos-sdk/types" - "github.com/cosmos/cosmos-sdk/types/module" - simtypes "github.com/cosmos/cosmos-sdk/types/simulation" - "github.com/cosmos/cosmos-sdk/x/simulation" - "github.com/lavanet/lava/v4/testutil/sample" - dualstakingsimulation "github.com/lavanet/lava/v4/x/dualstaking/simulation" - "github.com/lavanet/lava/v4/x/dualstaking/types" -) - -// avoid unused import issue -var ( - _ = sample.AccAddress - _ = dualstakingsimulation.FindAccount - _ = simulation.MsgEntryKind - _ = baseapp.Paramspace - _ = rand.Rand{} -) - -const ( - opWeightMsgDelegate = "op_weight_msg_delegate" - // TODO: Determine the simulation weight value - defaultWeightMsgDelegate int = 100 - - opWeightMsgRedelegate = "op_weight_msg_redelegate" - // TODO: Determine the simulation weight value - defaultWeightMsgRedelegate int = 100 - - opWeightMsgUnbond = "op_weight_msg_unbond" - // TODO: Determine the simulation weight value - defaultWeightMsgUnbond int = 100 - - opWeightMsgClaimRewards = "op_weight_msg_claim_rewards" - // TODO: Determine the simulation weight value - defaultWeightMsgClaimRewards int = 100 - - // this line is used by starport scaffolding # simapp/module/const -) - -// GenerateGenesisState creates a randomized GenState of the module. -func (AppModule) GenerateGenesisState(simState *module.SimulationState) { - accs := make([]string, len(simState.Accounts)) - for i, acc := range simState.Accounts { - accs[i] = acc.Address.String() - } - dualstakingGenesis := types.GenesisState{ - Params: types.DefaultParams(), - // this line is used by starport scaffolding # simapp/module/genesisState - } - simState.GenState[types.ModuleName] = simState.Cdc.MustMarshalJSON(&dualstakingGenesis) -} - -// RegisterStoreDecoder registers a decoder. -func (am AppModule) RegisterStoreDecoder(_ sdk.StoreDecoderRegistry) {} - -// ProposalContents doesn't return any content functions for governance proposals. -func (AppModule) ProposalContents(_ module.SimulationState) []simtypes.WeightedProposalMsg { - return nil -} - -// WeightedOperations returns the all the gov module operations with their respective weights. -func (am AppModule) WeightedOperations(simState module.SimulationState) []simtypes.WeightedOperation { - operations := make([]simtypes.WeightedOperation, 0) - - var weightMsgDelegate int - simState.AppParams.GetOrGenerate(simState.Cdc, opWeightMsgDelegate, &weightMsgDelegate, nil, - func(_ *rand.Rand) { - weightMsgDelegate = defaultWeightMsgDelegate - }, - ) - operations = append(operations, simulation.NewWeightedOperation( - weightMsgDelegate, - dualstakingsimulation.SimulateMsgDelegate(am.accountKeeper, am.bankKeeper, am.keeper), - )) - - var weightMsgRedelegate int - simState.AppParams.GetOrGenerate(simState.Cdc, opWeightMsgRedelegate, &weightMsgRedelegate, nil, - func(_ *rand.Rand) { - weightMsgRedelegate = defaultWeightMsgRedelegate - }, - ) - operations = append(operations, simulation.NewWeightedOperation( - weightMsgRedelegate, - dualstakingsimulation.SimulateMsgRedelegate(am.accountKeeper, am.bankKeeper, am.keeper), - )) - - var weightMsgUnbond int - simState.AppParams.GetOrGenerate(simState.Cdc, opWeightMsgUnbond, &weightMsgUnbond, nil, - func(_ *rand.Rand) { - weightMsgUnbond = defaultWeightMsgUnbond - }, - ) - operations = append(operations, simulation.NewWeightedOperation( - weightMsgUnbond, - dualstakingsimulation.SimulateMsgUnbond(am.accountKeeper, am.bankKeeper, am.keeper), - )) - - var weightMsgClaimRewards int - simState.AppParams.GetOrGenerate(simState.Cdc, opWeightMsgClaimRewards, &weightMsgClaimRewards, nil, - func(_ *rand.Rand) { - weightMsgClaimRewards = defaultWeightMsgClaimRewards - }, - ) - operations = append(operations, simulation.NewWeightedOperation( - weightMsgClaimRewards, - dualstakingsimulation.SimulateMsgClaimRewards(am.accountKeeper, am.bankKeeper, am.keeper), - )) - - // this line is used by starport scaffolding # simapp/module/operation - - return operations -} - -// ProposalMsgs returns msgs used for governance proposals for simulations. -func (am AppModule) ProposalMsgs(simState module.SimulationState) []simtypes.WeightedProposalMsg { - return []simtypes.WeightedProposalMsg{ - // this line is used by starport scaffolding # simapp/module/OpMsg - } -} diff --git a/x/dualstaking/simulation/claim_rewards.go b/x/dualstaking/simulation/claim_rewards.go deleted file mode 100644 index bfbe077299..0000000000 --- a/x/dualstaking/simulation/claim_rewards.go +++ /dev/null @@ -1,29 +0,0 @@ -package simulation - -import ( - "math/rand" - - "github.com/cosmos/cosmos-sdk/baseapp" - sdk "github.com/cosmos/cosmos-sdk/types" - simtypes "github.com/cosmos/cosmos-sdk/types/simulation" - "github.com/lavanet/lava/v4/x/dualstaking/keeper" - "github.com/lavanet/lava/v4/x/dualstaking/types" -) - -func SimulateMsgClaimRewards( - ak types.AccountKeeper, - bk types.BankKeeper, - k keeper.Keeper, -) simtypes.Operation { - return func(r *rand.Rand, app *baseapp.BaseApp, ctx sdk.Context, accs []simtypes.Account, chainID string, - ) (simtypes.OperationMsg, []simtypes.FutureOperation, error) { - simAccount, _ := simtypes.RandomAcc(r, accs) - msg := &types.MsgClaimRewards{ - Creator: simAccount.Address.String(), - } - - // TODO: Handling the ClaimRewards simulation - - return simtypes.NoOpMsg(types.ModuleName, msg.Type(), "ClaimRewards simulation not implemented"), nil, nil - } -} diff --git a/x/dualstaking/simulation/delegate.go b/x/dualstaking/simulation/delegate.go deleted file mode 100644 index bf12e20b40..0000000000 --- a/x/dualstaking/simulation/delegate.go +++ /dev/null @@ -1,65 +0,0 @@ -package simulation - -import ( - "math/rand" - - "github.com/cosmos/cosmos-sdk/baseapp" - sdk "github.com/cosmos/cosmos-sdk/types" - simtypes "github.com/cosmos/cosmos-sdk/types/simulation" - "github.com/lavanet/lava/v4/x/dualstaking/keeper" - "github.com/lavanet/lava/v4/x/dualstaking/types" -) - -func SimulateMsgDelegate( - ak types.AccountKeeper, - bk types.BankKeeper, - k keeper.Keeper, -) simtypes.Operation { - return func(r *rand.Rand, app *baseapp.BaseApp, ctx sdk.Context, accs []simtypes.Account, chainID string, - ) (simtypes.OperationMsg, []simtypes.FutureOperation, error) { - simAccount, _ := simtypes.RandomAcc(r, accs) - msg := &types.MsgDelegate{ - Creator: simAccount.Address.String(), - } - - // TODO: Handling the Delegate simulation - - return simtypes.NoOpMsg(types.ModuleName, msg.Type(), "Delegate simulation not implemented"), nil, nil - } -} - -func SimulateMsgRedelegate( - ak types.AccountKeeper, - bk types.BankKeeper, - k keeper.Keeper, -) simtypes.Operation { - return func(r *rand.Rand, app *baseapp.BaseApp, ctx sdk.Context, accs []simtypes.Account, chainID string, - ) (simtypes.OperationMsg, []simtypes.FutureOperation, error) { - simAccount, _ := simtypes.RandomAcc(r, accs) - msg := &types.MsgRedelegate{ - Creator: simAccount.Address.String(), - } - - // TODO: Handling the Redelegate simulation - - return simtypes.NoOpMsg(types.ModuleName, msg.Type(), "Redelegate simulation not implemented"), nil, nil - } -} - -func SimulateMsgUnbond( - ak types.AccountKeeper, - bk types.BankKeeper, - k keeper.Keeper, -) simtypes.Operation { - return func(r *rand.Rand, app *baseapp.BaseApp, ctx sdk.Context, accs []simtypes.Account, chainID string, - ) (simtypes.OperationMsg, []simtypes.FutureOperation, error) { - simAccount, _ := simtypes.RandomAcc(r, accs) - msg := &types.MsgUnbond{ - Creator: simAccount.Address.String(), - } - - // TODO: Handling the Unbond simulation - - return simtypes.NoOpMsg(types.ModuleName, msg.Type(), "Unbond simulation not implemented"), nil, nil - } -} diff --git a/x/dualstaking/simulation/helpers.go b/x/dualstaking/simulation/helpers.go deleted file mode 100644 index 92c437c0d1..0000000000 --- a/x/dualstaking/simulation/helpers.go +++ /dev/null @@ -1,15 +0,0 @@ -package simulation - -import ( - sdk "github.com/cosmos/cosmos-sdk/types" - simtypes "github.com/cosmos/cosmos-sdk/types/simulation" -) - -// FindAccount find a specific address from an account list -func FindAccount(accs []simtypes.Account, address string) (simtypes.Account, bool) { - creator, err := sdk.AccAddressFromBech32(address) - if err != nil { - panic(err) - } - return simtypes.FindAccount(accs, creator) -} diff --git a/x/epochstorage/client/cli/query_epoch_details_test.go b/x/epochstorage/client/cli/query_epoch_details_test.go index fd55bafaa4..775d0ded21 100644 --- a/x/epochstorage/client/cli/query_epoch_details_test.go +++ b/x/epochstorage/client/cli/query_epoch_details_test.go @@ -49,8 +49,8 @@ func TestShowEpochDetails(t *testing.T) { obj: obj, }, } { - tc := tc t.Run(tc.desc, func(t *testing.T) { + t.Parallel() var args []string args = append(args, tc.args...) out, err := clitestutil.ExecTestCLICmd(ctx, cli.CmdShowEpochDetails(), args) diff --git a/x/epochstorage/keeper/migrations.go b/x/epochstorage/keeper/migrations.go index b6871ef26f..f00f382cd8 100644 --- a/x/epochstorage/keeper/migrations.go +++ b/x/epochstorage/keeper/migrations.go @@ -12,7 +12,6 @@ import ( "github.com/lavanet/lava/v4/utils" "github.com/lavanet/lava/v4/x/epochstorage/types" v3 "github.com/lavanet/lava/v4/x/epochstorage/types/migrations/v3" - v6 "github.com/lavanet/lava/v4/x/epochstorage/types/migrations/v6" ) type Migrator struct { @@ -23,53 +22,6 @@ func NewMigrator(keeper Keeper) Migrator { return Migrator{keeper: keeper} } -// Migrate5to6 goes over all existing stake entries and populates the new vault address field with the stake entry address -func (m Migrator) Migrate5to6(ctx sdk.Context) error { - utils.LavaFormatDebug("migrate: epochstorage to include provider and vault addresses") - - store := prefix.NewStore(ctx.KVStore(m.keeper.storeKey), types.KeyPrefix(v3.StakeStorageKeyPrefix)) - iterator := sdk.KVStorePrefixIterator(store, []byte{}) - - defer iterator.Close() - - for ; iterator.Valid(); iterator.Next() { - var stakeStorageV6 v6.StakeStorage - m.keeper.cdc.MustUnmarshal(iterator.Value(), &stakeStorageV6) - - for i := range stakeStorageV6.StakeEntries { - stakeStorageV6.StakeEntries[i].Vault = stakeStorageV6.StakeEntries[i].Address - } - - store.Set(iterator.Key(), m.keeper.cdc.MustMarshal(&stakeStorageV6)) - } - - return nil -} - -// Migrate6to7 goes over all existing stake entries and populates the new description field with current moniker -func (m Migrator) Migrate6to7(ctx sdk.Context) error { - utils.LavaFormatDebug("migrate: epochstorage to include detailed description") - - store := prefix.NewStore(ctx.KVStore(m.keeper.storeKey), types.KeyPrefix(v3.StakeStorageKeyPrefix)) - iterator := sdk.KVStorePrefixIterator(store, []byte{}) - - defer iterator.Close() - - for ; iterator.Valid(); iterator.Next() { - var stakeStorageV7 types.StakeStorage - m.keeper.cdc.MustUnmarshal(iterator.Value(), &stakeStorageV7) - - for i := range stakeStorageV7.StakeEntries { - stakeStorageV7.StakeEntries[i].Description.Moniker = stakeStorageV7.StakeEntries[i].Moniker - stakeStorageV7.StakeEntries[i].Moniker = "" - } - - store.Set(iterator.Key(), m.keeper.cdc.MustMarshal(&stakeStorageV7)) - } - - return nil -} - // Migrate7to8 transfers all the stake entries from the old stake storage to the new stake entries store // StakeStorage is set to the stake entries store // StakeStorageCurrent is set to the stake entries current store diff --git a/x/epochstorage/module.go b/x/epochstorage/module.go index 6e6cba0642..900c9e0762 100644 --- a/x/epochstorage/module.go +++ b/x/epochstorage/module.go @@ -130,18 +130,6 @@ func (am AppModule) RegisterServices(cfg module.Configurator) { migrator := keeper.NewMigrator(am.keeper) - // register v5 -> v6 migration - if err := cfg.RegisterMigration(types.ModuleName, 5, migrator.Migrate5to6); err != nil { - // panic:ok: at start up, migration cannot proceed anyhow - panic(fmt.Errorf("%s: failed to register migration to v6: %w", types.ModuleName, err)) - } - - // register v6 -> v7 migration - if err := cfg.RegisterMigration(types.ModuleName, 6, migrator.Migrate6to7); err != nil { - // panic:ok: at start up, migration cannot proceed anyhow - panic(fmt.Errorf("%s: failed to register migration to v7: %w", types.ModuleName, err)) - } - // register v7 -> v8 migration if err := cfg.RegisterMigration(types.ModuleName, 7, migrator.Migrate7to8); err != nil { // panic:ok: at start up, migration cannot proceed anyhow diff --git a/x/epochstorage/module_simulation.go b/x/epochstorage/module_simulation.go deleted file mode 100644 index 59cc07cb51..0000000000 --- a/x/epochstorage/module_simulation.go +++ /dev/null @@ -1,84 +0,0 @@ -package epochstorage - -import ( - "math/rand" - - "github.com/cosmos/cosmos-sdk/testutil/sims" - types2 "github.com/cosmos/cosmos-sdk/x/auth/types" - - "github.com/cosmos/cosmos-sdk/baseapp" - sdk "github.com/cosmos/cosmos-sdk/types" - "github.com/cosmos/cosmos-sdk/types/module" - simtypes "github.com/cosmos/cosmos-sdk/types/simulation" - "github.com/cosmos/cosmos-sdk/x/simulation" - "github.com/lavanet/lava/v4/testutil/sample" - epochstoragesimulation "github.com/lavanet/lava/v4/x/epochstorage/simulation" - "github.com/lavanet/lava/v4/x/epochstorage/types" -) - -// avoid unused import issue -var ( - _ = sample.AccAddress - _ = epochstoragesimulation.FindAccount - _ = sims.StakePerAccount - _ = simulation.MsgEntryKind - _ = baseapp.Paramspace -) - -const ( -// this line is used by starport scaffolding # simapp/module/const -) - -// GenerateGenesisState creates a randomized GenState of the module -func (AppModule) GenerateGenesisState(simState *module.SimulationState) { - accs := make([]string, len(simState.Accounts)) - for i, acc := range simState.Accounts { - accs[i] = acc.Address.String() - } - epochstorageGenesis := types.GenesisState{ - // this line is used by starport scaffolding # simapp/module/genesisState - } - simState.GenState[types.ModuleName] = simState.Cdc.MustMarshalJSON(&epochstorageGenesis) -} - -// ProposalContents doesn't return any content functions for governance proposals -func (AppModule) ProposalContents(_ module.SimulationState) []simtypes.WeightedProposalMsg { - return nil -} - -// TODO: Add weighted proposals -func (AppModule) ProposalMsgs(_ module.SimulationState) []simtypes.WeightedProposalMsg { - return []simtypes.WeightedProposalMsg{ - simulation.NewWeightedProposalMsg("op_weight_msg_update_params", 100, func(r *rand.Rand, ctx sdk.Context, accs []simtypes.Account) sdk.Msg { - return &types2.MsgUpdateParams{} - }), - } -} - -//// RandomizedParams creates randomized param changes for the simulator -// func (am AppModule) RandomizedParams(_ *rand.Rand) []simtypes.ParamChange { -// epochstorageParams := types.DefaultParams() -// return []simtypes.ParamChange{ -// simulation.NewSimParamChange(types.ModuleName, string(types.KeyUnstakeHoldBlocks), func(r *rand.Rand) string { -// return string(types.Amino.MustMarshalJSON(epochstorageParams.UnstakeHoldBlocks)) -// }), -// simulation.NewSimParamChange(types.ModuleName, string(types.KeyEpochBlocks), func(r *rand.Rand) string { -// return string(types.Amino.MustMarshalJSON(epochstorageParams.EpochBlocks)) -// }), -// simulation.NewSimParamChange(types.ModuleName, string(types.KeyEpochsToSave), func(r *rand.Rand) string { -// return string(types.Amino.MustMarshalJSON(epochstorageParams.EpochsToSave)) -// }), -// } -// } - -// RegisterStoreDecoder registers a decoder -func (am AppModule) RegisterStoreDecoder(_ sdk.StoreDecoderRegistry) {} - -// WeightedOperations returns the all the gov module operations with their respective weights. -func (am AppModule) WeightedOperations(simState module.SimulationState) []simtypes.WeightedOperation { - operations := make([]simtypes.WeightedOperation, 0) - - // this line is used by starport scaffolding # simapp/module/operation - - return operations -} diff --git a/x/epochstorage/simulation/simap.go b/x/epochstorage/simulation/simap.go deleted file mode 100644 index 92c437c0d1..0000000000 --- a/x/epochstorage/simulation/simap.go +++ /dev/null @@ -1,15 +0,0 @@ -package simulation - -import ( - sdk "github.com/cosmos/cosmos-sdk/types" - simtypes "github.com/cosmos/cosmos-sdk/types/simulation" -) - -// FindAccount find a specific address from an account list -func FindAccount(accs []simtypes.Account, address string) (simtypes.Account, bool) { - creator, err := sdk.AccAddressFromBech32(address) - if err != nil { - panic(err) - } - return simtypes.FindAccount(accs, creator) -} diff --git a/x/epochstorage/types/migrations/v6/endpoint.pb.go b/x/epochstorage/types/migrations/v6/endpoint.pb.go deleted file mode 100644 index ed0ac20326..0000000000 --- a/x/epochstorage/types/migrations/v6/endpoint.pb.go +++ /dev/null @@ -1,522 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: lavanet/lava/epochstorage/endpoint.proto - -package types - -import ( - fmt "fmt" - proto "github.com/cosmos/gogoproto/proto" - io "io" - math "math" - math_bits "math/bits" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -type Endpoint struct { - IPPORT string `protobuf:"bytes,1,opt,name=iPPORT,proto3" json:"iPPORT,omitempty"` - Geolocation int32 `protobuf:"varint,3,opt,name=geolocation,proto3" json:"geolocation,omitempty"` - Addons []string `protobuf:"bytes,4,rep,name=addons,proto3" json:"addons,omitempty"` - ApiInterfaces []string `protobuf:"bytes,5,rep,name=api_interfaces,json=apiInterfaces,proto3" json:"api_interfaces,omitempty"` - Extensions []string `protobuf:"bytes,6,rep,name=extensions,proto3" json:"extensions,omitempty"` -} - -func (m *Endpoint) Reset() { *m = Endpoint{} } -func (m *Endpoint) String() string { return proto.CompactTextString(m) } -func (*Endpoint) ProtoMessage() {} -func (*Endpoint) Descriptor() ([]byte, []int) { - return fileDescriptor_acb18a6b0d300ae9, []int{0} -} -func (m *Endpoint) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Endpoint) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Endpoint.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Endpoint) XXX_Merge(src proto.Message) { - xxx_messageInfo_Endpoint.Merge(m, src) -} -func (m *Endpoint) XXX_Size() int { - return m.Size() -} -func (m *Endpoint) XXX_DiscardUnknown() { - xxx_messageInfo_Endpoint.DiscardUnknown(m) -} - -var xxx_messageInfo_Endpoint proto.InternalMessageInfo - -func (m *Endpoint) GetIPPORT() string { - if m != nil { - return m.IPPORT - } - return "" -} - -func (m *Endpoint) GetGeolocation() int32 { - if m != nil { - return m.Geolocation - } - return 0 -} - -func (m *Endpoint) GetAddons() []string { - if m != nil { - return m.Addons - } - return nil -} - -func (m *Endpoint) GetApiInterfaces() []string { - if m != nil { - return m.ApiInterfaces - } - return nil -} - -func (m *Endpoint) GetExtensions() []string { - if m != nil { - return m.Extensions - } - return nil -} - -func init() { - proto.RegisterType((*Endpoint)(nil), "lavanet.lava.epochstorage.EndpointV6") -} - -func init() { - proto.RegisterFile("lavanet/lava/epochstorage/endpoint.proto", fileDescriptor_acb18a6b0d300ae9) -} - -var fileDescriptor_acb18a6b0d300ae9 = []byte{ - // 250 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xd2, 0xc8, 0x49, 0x2c, 0x4b, - 0xcc, 0x4b, 0x2d, 0xd1, 0x07, 0xd1, 0xfa, 0xa9, 0x05, 0xf9, 0xc9, 0x19, 0xc5, 0x25, 0xf9, 0x45, - 0x89, 0xe9, 0xa9, 0xfa, 0xa9, 0x79, 0x29, 0x05, 0xf9, 0x99, 0x79, 0x25, 0x7a, 0x05, 0x45, 0xf9, - 0x25, 0xf9, 0x42, 0x92, 0x50, 0x95, 0x7a, 0x20, 0x5a, 0x0f, 0x59, 0xa5, 0xd2, 0x4a, 0x46, 0x2e, - 0x0e, 0x57, 0xa8, 0x6a, 0x21, 0x31, 0x2e, 0xb6, 0xcc, 0x80, 0x00, 0xff, 0xa0, 0x10, 0x09, 0x46, - 0x05, 0x46, 0x0d, 0xce, 0x20, 0x28, 0x4f, 0x48, 0x81, 0x8b, 0x3b, 0x3d, 0x35, 0x3f, 0x27, 0x3f, - 0x39, 0xb1, 0x24, 0x33, 0x3f, 0x4f, 0x82, 0x59, 0x81, 0x51, 0x83, 0x35, 0x08, 0x59, 0x08, 0xa4, - 0x33, 0x31, 0x25, 0x25, 0x3f, 0xaf, 0x58, 0x82, 0x45, 0x81, 0x19, 0xa4, 0x13, 0xc2, 0x13, 0x52, - 0xe5, 0xe2, 0x4b, 0x2c, 0xc8, 0x8c, 0xcf, 0xcc, 0x2b, 0x49, 0x2d, 0x4a, 0x4b, 0x4c, 0x4e, 0x2d, - 0x96, 0x60, 0x05, 0xcb, 0xf3, 0x26, 0x16, 0x64, 0x7a, 0xc2, 0x05, 0x85, 0xe4, 0xb8, 0xb8, 0x52, - 0x2b, 0x4a, 0x52, 0xf3, 0x8a, 0x33, 0x41, 0x46, 0xb0, 0x81, 0x95, 0x20, 0x89, 0x78, 0xb1, 0x70, - 0x30, 0x09, 0x30, 0x3b, 0xb9, 0x9d, 0x78, 0x24, 0xc7, 0x78, 0xe1, 0x91, 0x1c, 0xe3, 0x83, 0x47, - 0x72, 0x8c, 0x13, 0x1e, 0xcb, 0x31, 0x5c, 0x78, 0x2c, 0xc7, 0x70, 0xe3, 0xb1, 0x1c, 0x43, 0x94, - 0x4e, 0x7a, 0x66, 0x49, 0x46, 0x69, 0x92, 0x5e, 0x72, 0x7e, 0xae, 0x3e, 0x4a, 0xa8, 0x54, 0xa0, - 0x86, 0x4b, 0x49, 0x65, 0x41, 0x6a, 0x71, 0x12, 0x1b, 0x38, 0x54, 0x8c, 0x01, 0x01, 0x00, 0x00, - 0xff, 0xff, 0xf3, 0xac, 0x09, 0xa5, 0x41, 0x01, 0x00, 0x00, -} - -func (m *Endpoint) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Endpoint) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Endpoint) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Extensions) > 0 { - for iNdEx := len(m.Extensions) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Extensions[iNdEx]) - copy(dAtA[i:], m.Extensions[iNdEx]) - i = encodeVarintEndpoint(dAtA, i, uint64(len(m.Extensions[iNdEx]))) - i-- - dAtA[i] = 0x32 - } - } - if len(m.ApiInterfaces) > 0 { - for iNdEx := len(m.ApiInterfaces) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.ApiInterfaces[iNdEx]) - copy(dAtA[i:], m.ApiInterfaces[iNdEx]) - i = encodeVarintEndpoint(dAtA, i, uint64(len(m.ApiInterfaces[iNdEx]))) - i-- - dAtA[i] = 0x2a - } - } - if len(m.Addons) > 0 { - for iNdEx := len(m.Addons) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Addons[iNdEx]) - copy(dAtA[i:], m.Addons[iNdEx]) - i = encodeVarintEndpoint(dAtA, i, uint64(len(m.Addons[iNdEx]))) - i-- - dAtA[i] = 0x22 - } - } - if m.Geolocation != 0 { - i = encodeVarintEndpoint(dAtA, i, uint64(m.Geolocation)) - i-- - dAtA[i] = 0x18 - } - if len(m.IPPORT) > 0 { - i -= len(m.IPPORT) - copy(dAtA[i:], m.IPPORT) - i = encodeVarintEndpoint(dAtA, i, uint64(len(m.IPPORT))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func encodeVarintEndpoint(dAtA []byte, offset int, v uint64) int { - offset -= sovEndpoint(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *Endpoint) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.IPPORT) - if l > 0 { - n += 1 + l + sovEndpoint(uint64(l)) - } - if m.Geolocation != 0 { - n += 1 + sovEndpoint(uint64(m.Geolocation)) - } - if len(m.Addons) > 0 { - for _, s := range m.Addons { - l = len(s) - n += 1 + l + sovEndpoint(uint64(l)) - } - } - if len(m.ApiInterfaces) > 0 { - for _, s := range m.ApiInterfaces { - l = len(s) - n += 1 + l + sovEndpoint(uint64(l)) - } - } - if len(m.Extensions) > 0 { - for _, s := range m.Extensions { - l = len(s) - n += 1 + l + sovEndpoint(uint64(l)) - } - } - return n -} - -func sovEndpoint(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozEndpoint(x uint64) (n int) { - return sovEndpoint(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *Endpoint) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowEndpoint - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Endpoint: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Endpoint: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field IPPORT", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowEndpoint - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthEndpoint - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthEndpoint - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.IPPORT = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Geolocation", wireType) - } - m.Geolocation = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowEndpoint - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Geolocation |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Addons", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowEndpoint - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthEndpoint - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthEndpoint - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Addons = append(m.Addons, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ApiInterfaces", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowEndpoint - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthEndpoint - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthEndpoint - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ApiInterfaces = append(m.ApiInterfaces, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Extensions", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowEndpoint - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthEndpoint - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthEndpoint - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Extensions = append(m.Extensions, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipEndpoint(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthEndpoint - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipEndpoint(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowEndpoint - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowEndpoint - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowEndpoint - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthEndpoint - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupEndpoint - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthEndpoint - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthEndpoint = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowEndpoint = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupEndpoint = fmt.Errorf("proto: unexpected end of group") -) \ No newline at end of file diff --git a/x/epochstorage/types/migrations/v6/stake_entry.pb.go b/x/epochstorage/types/migrations/v6/stake_entry.pb.go deleted file mode 100644 index ae92f42631..0000000000 --- a/x/epochstorage/types/migrations/v6/stake_entry.pb.go +++ /dev/null @@ -1,1106 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: lavanet/lava/epochstorage/stake_entry.proto - -package types - -import ( - fmt "fmt" - types "github.com/cosmos/cosmos-sdk/types" - _ "github.com/cosmos/gogoproto/gogoproto" - proto "github.com/cosmos/gogoproto/proto" - io "io" - math "math" - math_bits "math/bits" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -type StakeEntry struct { - Stake types.Coin `protobuf:"bytes,1,opt,name=stake,proto3" json:"stake"` - Address string `protobuf:"bytes,2,opt,name=address,proto3" json:"address,omitempty"` - StakeAppliedBlock uint64 `protobuf:"varint,3,opt,name=stake_applied_block,json=stakeAppliedBlock,proto3" json:"stake_applied_block,omitempty"` - Endpoints []Endpoint `protobuf:"bytes,4,rep,name=endpoints,proto3" json:"endpoints"` - Geolocation int32 `protobuf:"varint,5,opt,name=geolocation,proto3" json:"geolocation,omitempty"` - Chain string `protobuf:"bytes,6,opt,name=chain,proto3" json:"chain,omitempty"` - Moniker string `protobuf:"bytes,8,opt,name=moniker,proto3" json:"moniker,omitempty"` - DelegateTotal types.Coin `protobuf:"bytes,9,opt,name=delegate_total,json=delegateTotal,proto3" json:"delegate_total"` - DelegateLimit types.Coin `protobuf:"bytes,10,opt,name=delegate_limit,json=delegateLimit,proto3" json:"delegate_limit"` - DelegateCommission uint64 `protobuf:"varint,11,opt,name=delegate_commission,json=delegateCommission,proto3" json:"delegate_commission,omitempty"` - LastChange uint64 `protobuf:"varint,12,opt,name=last_change,json=lastChange,proto3" json:"last_change,omitempty"` - BlockReport *BlockReport `protobuf:"bytes,13,opt,name=block_report,json=blockReport,proto3" json:"block_report,omitempty"` - Vault string `protobuf:"bytes,14,opt,name=vault,proto3" json:"vault,omitempty"` -} - -func (m *StakeEntry) Reset() { *m = StakeEntry{} } -func (m *StakeEntry) String() string { return proto.CompactTextString(m) } -func (*StakeEntry) ProtoMessage() {} -func (*StakeEntry) Descriptor() ([]byte, []int) { - return fileDescriptor_df6302d6b53c056e, []int{0} -} -func (m *StakeEntry) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *StakeEntry) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_StakeEntry.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *StakeEntry) XXX_Merge(src proto.Message) { - xxx_messageInfo_StakeEntry.Merge(m, src) -} -func (m *StakeEntry) XXX_Size() int { - return m.Size() -} -func (m *StakeEntry) XXX_DiscardUnknown() { - xxx_messageInfo_StakeEntry.DiscardUnknown(m) -} - -var xxx_messageInfo_StakeEntry proto.InternalMessageInfo - -func (m *StakeEntry) GetStake() types.Coin { - if m != nil { - return m.Stake - } - return types.Coin{} -} - -func (m *StakeEntry) GetAddress() string { - if m != nil { - return m.Address - } - return "" -} - -func (m *StakeEntry) GetStakeAppliedBlock() uint64 { - if m != nil { - return m.StakeAppliedBlock - } - return 0 -} - -func (m *StakeEntry) GetEndpoints() []Endpoint { - if m != nil { - return m.Endpoints - } - return nil -} - -func (m *StakeEntry) GetGeolocation() int32 { - if m != nil { - return m.Geolocation - } - return 0 -} - -func (m *StakeEntry) GetChain() string { - if m != nil { - return m.Chain - } - return "" -} - -func (m *StakeEntry) GetMoniker() string { - if m != nil { - return m.Moniker - } - return "" -} - -func (m *StakeEntry) GetDelegateTotal() types.Coin { - if m != nil { - return m.DelegateTotal - } - return types.Coin{} -} - -func (m *StakeEntry) GetDelegateLimit() types.Coin { - if m != nil { - return m.DelegateLimit - } - return types.Coin{} -} - -func (m *StakeEntry) GetDelegateCommission() uint64 { - if m != nil { - return m.DelegateCommission - } - return 0 -} - -func (m *StakeEntry) GetLastChange() uint64 { - if m != nil { - return m.LastChange - } - return 0 -} - -func (m *StakeEntry) GetBlockReport() *BlockReport { - if m != nil { - return m.BlockReport - } - return nil -} - -func (m *StakeEntry) GetVault() string { - if m != nil { - return m.Vault - } - return "" -} - -// BlockReport holds the most up-to-date info regarding blocks of the provider -// It is set in the relay payment TX logic -// used by the consumer to calculate the provider's sync score -type BlockReport struct { - Epoch uint64 `protobuf:"varint,1,opt,name=epoch,proto3" json:"epoch,omitempty"` - LatestBlock uint64 `protobuf:"varint,2,opt,name=latest_block,json=latestBlock,proto3" json:"latest_block,omitempty"` -} - -func (m *BlockReport) Reset() { *m = BlockReport{} } -func (m *BlockReport) String() string { return proto.CompactTextString(m) } -func (*BlockReport) ProtoMessage() {} -func (*BlockReport) Descriptor() ([]byte, []int) { - return fileDescriptor_df6302d6b53c056e, []int{1} -} -func (m *BlockReport) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *BlockReport) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_BlockReport.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *BlockReport) XXX_Merge(src proto.Message) { - xxx_messageInfo_BlockReport.Merge(m, src) -} -func (m *BlockReport) XXX_Size() int { - return m.Size() -} -func (m *BlockReport) XXX_DiscardUnknown() { - xxx_messageInfo_BlockReport.DiscardUnknown(m) -} - -var xxx_messageInfo_BlockReport proto.InternalMessageInfo - -func (m *BlockReport) GetEpoch() uint64 { - if m != nil { - return m.Epoch - } - return 0 -} - -func (m *BlockReport) GetLatestBlock() uint64 { - if m != nil { - return m.LatestBlock - } - return 0 -} - -func init() { - proto.RegisterType((*StakeEntry)(nil), "lavanet.lava.epochstorage.StakeEntryV6") - proto.RegisterType((*BlockReport)(nil), "lavanet.lava.epochstorage.BlockReportV6") -} - -func init() { - proto.RegisterFile("lavanet/lava/epochstorage/stake_entry.proto", fileDescriptor_df6302d6b53c056e) -} - -var fileDescriptor_df6302d6b53c056e = []byte{ - // 510 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x53, 0x41, 0x6e, 0xdb, 0x3a, - 0x10, 0xb5, 0x12, 0x39, 0xb1, 0x29, 0x27, 0xf8, 0x9f, 0xc9, 0x82, 0xc9, 0x42, 0x51, 0x53, 0xa0, - 0x10, 0xd0, 0x82, 0x42, 0x52, 0xf4, 0x00, 0xb5, 0x11, 0x17, 0x2d, 0xba, 0x52, 0xbb, 0xea, 0xc6, - 0xa0, 0x24, 0x42, 0x26, 0x2c, 0x71, 0x04, 0x91, 0x31, 0x9a, 0x5b, 0xf4, 0x58, 0x59, 0x66, 0xd9, - 0x55, 0x51, 0xd8, 0x27, 0xe8, 0x0d, 0x0a, 0x92, 0x72, 0x12, 0x2f, 0x52, 0xb4, 0x2b, 0x72, 0xe6, - 0xbd, 0x37, 0x98, 0x79, 0xe4, 0xa0, 0x97, 0x15, 0x5b, 0x32, 0xc9, 0x75, 0x62, 0xce, 0x84, 0x37, - 0x90, 0xcf, 0x95, 0x86, 0x96, 0x95, 0x3c, 0x51, 0x9a, 0x2d, 0xf8, 0x8c, 0x4b, 0xdd, 0xde, 0xd0, - 0xa6, 0x05, 0x0d, 0xf8, 0xa4, 0x23, 0x53, 0x73, 0xd2, 0xc7, 0xe4, 0xd3, 0xf8, 0xe9, 0x3a, 0x5c, - 0x16, 0x0d, 0x08, 0xa9, 0x5d, 0x91, 0xd3, 0xe3, 0x12, 0x4a, 0xb0, 0xd7, 0xc4, 0xdc, 0xba, 0x6c, - 0x98, 0x83, 0xaa, 0x41, 0x25, 0x19, 0x53, 0x3c, 0x59, 0x5e, 0x64, 0x5c, 0xb3, 0x8b, 0x24, 0x07, - 0x21, 0x1d, 0x7e, 0xfe, 0xcb, 0x47, 0xe8, 0x93, 0x69, 0xe8, 0xca, 0xf4, 0x83, 0xdf, 0xa0, 0xbe, - 0x6d, 0x8f, 0x78, 0x91, 0x17, 0x07, 0x97, 0x27, 0xd4, 0xc9, 0xa9, 0x91, 0xd3, 0x4e, 0x4e, 0x27, - 0x20, 0xe4, 0xd8, 0xbf, 0xfd, 0x71, 0xd6, 0x4b, 0x1d, 0x1b, 0x13, 0xb4, 0xcf, 0x8a, 0xa2, 0xe5, - 0x4a, 0x91, 0x9d, 0xc8, 0x8b, 0x87, 0xe9, 0x26, 0xc4, 0x14, 0x1d, 0xb9, 0x79, 0x59, 0xd3, 0x54, - 0x82, 0x17, 0xb3, 0xac, 0x82, 0x7c, 0x41, 0x76, 0x23, 0x2f, 0xf6, 0xd3, 0xff, 0x2d, 0xf4, 0xd6, - 0x21, 0x63, 0x03, 0xe0, 0x77, 0x68, 0xb8, 0x99, 0x4b, 0x11, 0x3f, 0xda, 0x8d, 0x83, 0xcb, 0xe7, - 0xf4, 0x49, 0x7b, 0xe8, 0x55, 0xc7, 0xed, 0xda, 0x79, 0xd0, 0xe2, 0x08, 0x05, 0x25, 0x87, 0x0a, - 0x72, 0xa6, 0x05, 0x48, 0xd2, 0x8f, 0xbc, 0xb8, 0x9f, 0x3e, 0x4e, 0xe1, 0x63, 0xd4, 0xcf, 0xe7, - 0x4c, 0x48, 0xb2, 0x67, 0x5b, 0x76, 0x81, 0x19, 0xa5, 0x06, 0x29, 0x16, 0xbc, 0x25, 0x03, 0x37, - 0x4a, 0x17, 0xe2, 0x29, 0x3a, 0x2c, 0x78, 0xc5, 0x4b, 0xa6, 0xf9, 0x4c, 0x83, 0x66, 0x15, 0x19, - 0xfe, 0x9d, 0x49, 0x07, 0x1b, 0xd9, 0x67, 0xa3, 0xda, 0xaa, 0x53, 0x89, 0x5a, 0x68, 0x82, 0xfe, - 0xb1, 0xce, 0x47, 0xa3, 0xc2, 0x09, 0x3a, 0xba, 0xaf, 0x93, 0x43, 0x5d, 0x0b, 0xa5, 0xcc, 0xa4, - 0x81, 0xb5, 0x16, 0x6f, 0xa0, 0xc9, 0x3d, 0x82, 0xcf, 0x50, 0x50, 0x31, 0xa5, 0x67, 0xf9, 0x9c, - 0xc9, 0x92, 0x93, 0x91, 0x25, 0x22, 0x93, 0x9a, 0xd8, 0x0c, 0x7e, 0x8f, 0x46, 0xf6, 0x79, 0x66, - 0x2d, 0x6f, 0xa0, 0xd5, 0xe4, 0xc0, 0xf6, 0xf5, 0xe2, 0x0f, 0xfe, 0xdb, 0x47, 0x4b, 0x2d, 0x3b, - 0x0d, 0xb2, 0x87, 0xc0, 0x98, 0xbb, 0x64, 0xd7, 0x95, 0x26, 0x87, 0xce, 0x5c, 0x1b, 0x7c, 0xf0, - 0x07, 0xfb, 0xff, 0x0d, 0xce, 0xa7, 0x28, 0x18, 0x6f, 0x53, 0x6d, 0x4d, 0xfb, 0xe7, 0xfc, 0xd4, - 0x05, 0xf8, 0x19, 0x1a, 0x55, 0x4c, 0x73, 0xa5, 0xbb, 0x1f, 0xb3, 0x63, 0xc1, 0xc0, 0xe5, 0xac, - 0x7c, 0x3c, 0xbd, 0x5d, 0x85, 0xde, 0xdd, 0x2a, 0xf4, 0x7e, 0xae, 0x42, 0xef, 0xdb, 0x3a, 0xec, - 0xdd, 0xad, 0xc3, 0xde, 0xf7, 0x75, 0xd8, 0xfb, 0xf2, 0xaa, 0x14, 0x7a, 0x7e, 0x9d, 0xd1, 0x1c, - 0xea, 0x64, 0x6b, 0x81, 0xbe, 0x6e, 0xaf, 0x90, 0xbe, 0x69, 0xb8, 0xca, 0xf6, 0xec, 0x2a, 0xbc, - 0xfe, 0x1d, 0x00, 0x00, 0xff, 0xff, 0xc7, 0x25, 0xca, 0x07, 0xb4, 0x03, 0x00, 0x00, -} - -func (m *StakeEntry) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *StakeEntry) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *StakeEntry) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Vault) > 0 { - i -= len(m.Vault) - copy(dAtA[i:], m.Vault) - i = encodeVarintStakeEntry(dAtA, i, uint64(len(m.Vault))) - i-- - dAtA[i] = 0x72 - } - if m.BlockReport != nil { - { - size, err := m.BlockReport.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintStakeEntry(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x6a - } - if m.LastChange != 0 { - i = encodeVarintStakeEntry(dAtA, i, uint64(m.LastChange)) - i-- - dAtA[i] = 0x60 - } - if m.DelegateCommission != 0 { - i = encodeVarintStakeEntry(dAtA, i, uint64(m.DelegateCommission)) - i-- - dAtA[i] = 0x58 - } - { - size, err := m.DelegateLimit.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintStakeEntry(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x52 - { - size, err := m.DelegateTotal.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintStakeEntry(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x4a - if len(m.Moniker) > 0 { - i -= len(m.Moniker) - copy(dAtA[i:], m.Moniker) - i = encodeVarintStakeEntry(dAtA, i, uint64(len(m.Moniker))) - i-- - dAtA[i] = 0x42 - } - if len(m.Chain) > 0 { - i -= len(m.Chain) - copy(dAtA[i:], m.Chain) - i = encodeVarintStakeEntry(dAtA, i, uint64(len(m.Chain))) - i-- - dAtA[i] = 0x32 - } - if m.Geolocation != 0 { - i = encodeVarintStakeEntry(dAtA, i, uint64(m.Geolocation)) - i-- - dAtA[i] = 0x28 - } - if len(m.Endpoints) > 0 { - for iNdEx := len(m.Endpoints) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Endpoints[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintStakeEntry(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x22 - } - } - if m.StakeAppliedBlock != 0 { - i = encodeVarintStakeEntry(dAtA, i, uint64(m.StakeAppliedBlock)) - i-- - dAtA[i] = 0x18 - } - if len(m.Address) > 0 { - i -= len(m.Address) - copy(dAtA[i:], m.Address) - i = encodeVarintStakeEntry(dAtA, i, uint64(len(m.Address))) - i-- - dAtA[i] = 0x12 - } - { - size, err := m.Stake.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintStakeEntry(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *BlockReport) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *BlockReport) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *BlockReport) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.LatestBlock != 0 { - i = encodeVarintStakeEntry(dAtA, i, uint64(m.LatestBlock)) - i-- - dAtA[i] = 0x10 - } - if m.Epoch != 0 { - i = encodeVarintStakeEntry(dAtA, i, uint64(m.Epoch)) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - -func encodeVarintStakeEntry(dAtA []byte, offset int, v uint64) int { - offset -= sovStakeEntry(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *StakeEntry) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.Stake.Size() - n += 1 + l + sovStakeEntry(uint64(l)) - l = len(m.Address) - if l > 0 { - n += 1 + l + sovStakeEntry(uint64(l)) - } - if m.StakeAppliedBlock != 0 { - n += 1 + sovStakeEntry(uint64(m.StakeAppliedBlock)) - } - if len(m.Endpoints) > 0 { - for _, e := range m.Endpoints { - l = e.Size() - n += 1 + l + sovStakeEntry(uint64(l)) - } - } - if m.Geolocation != 0 { - n += 1 + sovStakeEntry(uint64(m.Geolocation)) - } - l = len(m.Chain) - if l > 0 { - n += 1 + l + sovStakeEntry(uint64(l)) - } - l = len(m.Moniker) - if l > 0 { - n += 1 + l + sovStakeEntry(uint64(l)) - } - l = m.DelegateTotal.Size() - n += 1 + l + sovStakeEntry(uint64(l)) - l = m.DelegateLimit.Size() - n += 1 + l + sovStakeEntry(uint64(l)) - if m.DelegateCommission != 0 { - n += 1 + sovStakeEntry(uint64(m.DelegateCommission)) - } - if m.LastChange != 0 { - n += 1 + sovStakeEntry(uint64(m.LastChange)) - } - if m.BlockReport != nil { - l = m.BlockReport.Size() - n += 1 + l + sovStakeEntry(uint64(l)) - } - l = len(m.Vault) - if l > 0 { - n += 1 + l + sovStakeEntry(uint64(l)) - } - return n -} - -func (m *BlockReport) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Epoch != 0 { - n += 1 + sovStakeEntry(uint64(m.Epoch)) - } - if m.LatestBlock != 0 { - n += 1 + sovStakeEntry(uint64(m.LatestBlock)) - } - return n -} - -func sovStakeEntry(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozStakeEntry(x uint64) (n int) { - return sovStakeEntry(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *StakeEntry) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowStakeEntry - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: StakeEntry: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: StakeEntry: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Stake", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowStakeEntry - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthStakeEntry - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthStakeEntry - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Stake.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Address", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowStakeEntry - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthStakeEntry - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthStakeEntry - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Address = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field StakeAppliedBlock", wireType) - } - m.StakeAppliedBlock = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowStakeEntry - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.StakeAppliedBlock |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Endpoints", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowStakeEntry - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthStakeEntry - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthStakeEntry - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Endpoints = append(m.Endpoints, Endpoint{}) - if err := m.Endpoints[len(m.Endpoints)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Geolocation", wireType) - } - m.Geolocation = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowStakeEntry - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Geolocation |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Chain", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowStakeEntry - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthStakeEntry - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthStakeEntry - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Chain = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 8: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Moniker", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowStakeEntry - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthStakeEntry - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthStakeEntry - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Moniker = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 9: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DelegateTotal", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowStakeEntry - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthStakeEntry - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthStakeEntry - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.DelegateTotal.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 10: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DelegateLimit", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowStakeEntry - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthStakeEntry - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthStakeEntry - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.DelegateLimit.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 11: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field DelegateCommission", wireType) - } - m.DelegateCommission = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowStakeEntry - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.DelegateCommission |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 12: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field LastChange", wireType) - } - m.LastChange = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowStakeEntry - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.LastChange |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 13: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field BlockReport", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowStakeEntry - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthStakeEntry - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthStakeEntry - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.BlockReport == nil { - m.BlockReport = &BlockReport{} - } - if err := m.BlockReport.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 14: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Vault", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowStakeEntry - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthStakeEntry - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthStakeEntry - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Vault = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipStakeEntry(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthStakeEntry - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *BlockReport) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowStakeEntry - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: BlockReport: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: BlockReport: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Epoch", wireType) - } - m.Epoch = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowStakeEntry - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Epoch |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field LatestBlock", wireType) - } - m.LatestBlock = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowStakeEntry - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.LatestBlock |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipStakeEntry(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthStakeEntry - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipStakeEntry(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowStakeEntry - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowStakeEntry - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowStakeEntry - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthStakeEntry - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupStakeEntry - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthStakeEntry - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthStakeEntry = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowStakeEntry = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupStakeEntry = fmt.Errorf("proto: unexpected end of group") -) \ No newline at end of file diff --git a/x/epochstorage/types/migrations/v6/stake_storage.pb.go b/x/epochstorage/types/migrations/v6/stake_storage.pb.go deleted file mode 100644 index bf34c57f4b..0000000000 --- a/x/epochstorage/types/migrations/v6/stake_storage.pb.go +++ /dev/null @@ -1,438 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: lavanet/lava/epochstorage/stake_storage.proto - -package types - -import ( - fmt "fmt" - _ "github.com/cosmos/gogoproto/gogoproto" - proto "github.com/cosmos/gogoproto/proto" - io "io" - math "math" - math_bits "math/bits" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -type StakeStorage struct { - Index string `protobuf:"bytes,1,opt,name=index,proto3" json:"index,omitempty"` - StakeEntries []StakeEntry `protobuf:"bytes,2,rep,name=stakeEntries,proto3" json:"stakeEntries"` - EpochBlockHash []byte `protobuf:"bytes,3,opt,name=epochBlockHash,proto3" json:"epochBlockHash,omitempty"` -} - -func (m *StakeStorage) Reset() { *m = StakeStorage{} } -func (m *StakeStorage) String() string { return proto.CompactTextString(m) } -func (*StakeStorage) ProtoMessage() {} -func (*StakeStorage) Descriptor() ([]byte, []int) { - return fileDescriptor_be7b78aecc265fd4, []int{0} -} -func (m *StakeStorage) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *StakeStorage) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_StakeStorage.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *StakeStorage) XXX_Merge(src proto.Message) { - xxx_messageInfo_StakeStorage.Merge(m, src) -} -func (m *StakeStorage) XXX_Size() int { - return m.Size() -} -func (m *StakeStorage) XXX_DiscardUnknown() { - xxx_messageInfo_StakeStorage.DiscardUnknown(m) -} - -var xxx_messageInfo_StakeStorage proto.InternalMessageInfo - -func (m *StakeStorage) GetIndex() string { - if m != nil { - return m.Index - } - return "" -} - -func (m *StakeStorage) GetStakeEntries() []StakeEntry { - if m != nil { - return m.StakeEntries - } - return nil -} - -func (m *StakeStorage) GetEpochBlockHash() []byte { - if m != nil { - return m.EpochBlockHash - } - return nil -} - -func init() { - proto.RegisterType((*StakeStorage)(nil), "lavanet.lava.epochstorage.StakeStorageV6") -} - -func init() { - proto.RegisterFile("lavanet/lava/epochstorage/stake_storage.proto", fileDescriptor_be7b78aecc265fd4) -} - -var fileDescriptor_be7b78aecc265fd4 = []byte{ - // 246 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xd2, 0xcd, 0x49, 0x2c, 0x4b, - 0xcc, 0x4b, 0x2d, 0xd1, 0x07, 0xd1, 0xfa, 0xa9, 0x05, 0xf9, 0xc9, 0x19, 0xc5, 0x25, 0xf9, 0x45, - 0x89, 0xe9, 0xa9, 0xfa, 0xc5, 0x25, 0x89, 0xd9, 0xa9, 0xf1, 0x50, 0x9e, 0x5e, 0x41, 0x51, 0x7e, - 0x49, 0xbe, 0x90, 0x24, 0x54, 0xb9, 0x1e, 0x88, 0xd6, 0x43, 0x56, 0x2e, 0xa5, 0x4d, 0xc8, 0xa4, - 0xd4, 0xbc, 0x92, 0xa2, 0x4a, 0x88, 0x39, 0x52, 0x22, 0xe9, 0xf9, 0xe9, 0xf9, 0x60, 0xa6, 0x3e, - 0x88, 0x05, 0x11, 0x55, 0x9a, 0xcb, 0xc8, 0xc5, 0x13, 0x0c, 0x52, 0x1b, 0x0c, 0xd1, 0x28, 0x24, - 0xc2, 0xc5, 0x9a, 0x99, 0x97, 0x92, 0x5a, 0x21, 0xc1, 0xa8, 0xc0, 0xa8, 0xc1, 0x19, 0x04, 0xe1, - 0x08, 0xf9, 0x73, 0xf1, 0x80, 0x4d, 0x74, 0xcd, 0x2b, 0x29, 0xca, 0x4c, 0x2d, 0x96, 0x60, 0x52, - 0x60, 0xd6, 0xe0, 0x36, 0x52, 0xd5, 0xc3, 0xe9, 0x36, 0xbd, 0x60, 0x98, 0xf2, 0x4a, 0x27, 0x96, - 0x13, 0xf7, 0xe4, 0x19, 0x82, 0x50, 0x0c, 0x10, 0x52, 0xe3, 0xe2, 0x03, 0x2b, 0x77, 0xca, 0xc9, - 0x4f, 0xce, 0xf6, 0x48, 0x2c, 0xce, 0x90, 0x60, 0x56, 0x60, 0xd4, 0xe0, 0x09, 0x42, 0x13, 0x75, - 0x72, 0x3b, 0xf1, 0x48, 0x8e, 0xf1, 0xc2, 0x23, 0x39, 0xc6, 0x07, 0x8f, 0xe4, 0x18, 0x27, 0x3c, - 0x96, 0x63, 0xb8, 0xf0, 0x58, 0x8e, 0xe1, 0xc6, 0x63, 0x39, 0x86, 0x28, 0x9d, 0xf4, 0xcc, 0x92, - 0x8c, 0xd2, 0x24, 0xbd, 0xe4, 0xfc, 0x5c, 0x7d, 0x94, 0x70, 0xa8, 0x40, 0x0d, 0x89, 0x92, 0xca, - 0x82, 0xd4, 0xe2, 0x24, 0x36, 0xb0, 0x77, 0x8d, 0x01, 0x01, 0x00, 0x00, 0xff, 0xff, 0x64, 0x2d, - 0xbc, 0x7a, 0x7d, 0x01, 0x00, 0x00, -} - -func (m *StakeStorage) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *StakeStorage) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *StakeStorage) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.EpochBlockHash) > 0 { - i -= len(m.EpochBlockHash) - copy(dAtA[i:], m.EpochBlockHash) - i = encodeVarintStakeStorage(dAtA, i, uint64(len(m.EpochBlockHash))) - i-- - dAtA[i] = 0x1a - } - if len(m.StakeEntries) > 0 { - for iNdEx := len(m.StakeEntries) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.StakeEntries[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintStakeStorage(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - } - if len(m.Index) > 0 { - i -= len(m.Index) - copy(dAtA[i:], m.Index) - i = encodeVarintStakeStorage(dAtA, i, uint64(len(m.Index))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func encodeVarintStakeStorage(dAtA []byte, offset int, v uint64) int { - offset -= sovStakeStorage(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *StakeStorage) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Index) - if l > 0 { - n += 1 + l + sovStakeStorage(uint64(l)) - } - if len(m.StakeEntries) > 0 { - for _, e := range m.StakeEntries { - l = e.Size() - n += 1 + l + sovStakeStorage(uint64(l)) - } - } - l = len(m.EpochBlockHash) - if l > 0 { - n += 1 + l + sovStakeStorage(uint64(l)) - } - return n -} - -func sovStakeStorage(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozStakeStorage(x uint64) (n int) { - return sovStakeStorage(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *StakeStorage) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowStakeStorage - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: StakeStorage: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: StakeStorage: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Index", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowStakeStorage - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthStakeStorage - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthStakeStorage - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Index = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field StakeEntries", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowStakeStorage - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthStakeStorage - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthStakeStorage - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.StakeEntries = append(m.StakeEntries, StakeEntry{}) - if err := m.StakeEntries[len(m.StakeEntries)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field EpochBlockHash", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowStakeStorage - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthStakeStorage - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthStakeStorage - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.EpochBlockHash = append(m.EpochBlockHash[:0], dAtA[iNdEx:postIndex]...) - if m.EpochBlockHash == nil { - m.EpochBlockHash = []byte{} - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipStakeStorage(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthStakeStorage - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipStakeStorage(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowStakeStorage - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowStakeStorage - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowStakeStorage - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthStakeStorage - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupStakeStorage - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthStakeStorage - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthStakeStorage = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowStakeStorage = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupStakeStorage = fmt.Errorf("proto: unexpected end of group") -) \ No newline at end of file diff --git a/x/pairing/client/cli/query_account_info.go b/x/pairing/client/cli/query_account_info.go index 7065137f78..28f4937e30 100644 --- a/x/pairing/client/cli/query_account_info.go +++ b/x/pairing/client/cli/query_account_info.go @@ -10,10 +10,8 @@ import ( "github.com/lavanet/lava/v4/utils" "github.com/lavanet/lava/v4/utils/sigs" dualstakingtypes "github.com/lavanet/lava/v4/x/dualstaking/types" - epochstoragetypes "github.com/lavanet/lava/v4/x/epochstorage/types" "github.com/lavanet/lava/v4/x/pairing/types" projecttypes "github.com/lavanet/lava/v4/x/projects/types" - spectypes "github.com/lavanet/lava/v4/x/spec/types" subscriptiontypes "github.com/lavanet/lava/v4/x/subscription/types" "github.com/spf13/cobra" ) @@ -55,16 +53,11 @@ func CmdAccountInfo() *cobra.Command { return err } } - specQuerier := spectypes.NewQueryClient(clientCtx) ctx := context.Background() - allChains, err := specQuerier.ShowAllChains(ctx, &spectypes.QueryShowAllChainsRequest{}) - if err != nil { - return utils.LavaFormatError("failed getting key name from clientCtx, either provide the address in an argument or verify the --from wallet exists", err) - } + pairingQuerier := types.NewQueryClient(clientCtx) subscriptionQuerier := subscriptiontypes.NewQueryClient(clientCtx) projectQuerier := projecttypes.NewQueryClient(clientCtx) - epochStorageQuerier := epochstoragetypes.NewQueryClient(clientCtx) dualstakingQuerier := dualstakingtypes.NewQueryClient(clientCtx) stakingQuerier := stakingtypes.NewQueryClient(clientCtx) resultStatus, err := clientCtx.Client.Status(ctx) @@ -77,45 +70,26 @@ func CmdAccountInfo() *cobra.Command { var info types.QueryAccountInfoResponse // fill the objects - for _, chainStructInfo := range allChains.ChainInfoList { - chainID := chainStructInfo.ChainID - response, err := pairingQuerier.Providers(ctx, &types.QueryProvidersRequest{ - ChainID: chainID, - ShowFrozen: true, - }) - if err == nil && len(response.StakeEntry) > 0 { - for _, provider := range response.StakeEntry { - if provider.IsAddressVaultOrProvider(address) { - if provider.StakeAppliedBlock > uint64(currentBlock) { - info.Frozen = append(info.Frozen, provider) - } else { - info.Provider = append(info.Provider, provider) - } - break - } - } - } - } - unstakeEntriesAllChains, err := epochStorageQuerier.StakeStorage(ctx, &epochstoragetypes.QueryGetStakeStorageRequest{ - Index: epochstoragetypes.StakeStorageKeyUnstakeConst, + response, err := pairingQuerier.Provider(ctx, &types.QueryProviderRequest{ + Address: address, }) if err == nil { - if len(unstakeEntriesAllChains.StakeStorage.StakeEntries) > 0 { - for _, unstakingProvider := range unstakeEntriesAllChains.StakeStorage.StakeEntries { - if unstakingProvider.IsAddressVaultOrProvider(address) { - info.Unstaked = append(info.Unstaked, unstakingProvider) - } + for _, provider := range response.StakeEntries { + if provider.StakeAppliedBlock > uint64(currentBlock) { + info.Frozen = append(info.Frozen, provider) + } else { + info.Provider = append(info.Provider, provider) } } } - response, err := subscriptionQuerier.Current(cmd.Context(), &subscriptiontypes.QueryCurrentRequest{ + subresponse, err := subscriptionQuerier.Current(cmd.Context(), &subscriptiontypes.QueryCurrentRequest{ Consumer: address, }) if err == nil { - info.Subscription = response.Sub + info.Subscription = subresponse.Sub } developer, err := projectQuerier.Developer(cmd.Context(), &projecttypes.QueryDeveloperRequest{Developer: address}) @@ -140,7 +114,6 @@ func CmdAccountInfo() *cobra.Command { } // we finished gathering information, now print it - return clientCtx.PrintProto(&info) }, } diff --git a/x/pairing/client/cli/query_effective_policy.go b/x/pairing/client/cli/query_effective_policy.go index f7a4ce565c..9ef356e8e8 100644 --- a/x/pairing/client/cli/query_effective_policy.go +++ b/x/pairing/client/cli/query_effective_policy.go @@ -24,7 +24,8 @@ func CmdEffectivePolicy() *cobra.Command { if len(args) > 1 { address, err = utils.ParseCLIAddress(clientCtx, args[1]) if err != nil { - return err + // this should allow project names not only addresses + address = args[1] } } else { clientCtxForTx, err := client.GetClientQueryContext(cmd) diff --git a/x/pairing/client/cli/tx_distribute_provider_stake.go b/x/pairing/client/cli/tx_distribute_provider_stake.go index f8808b5275..aba98f9c3e 100644 --- a/x/pairing/client/cli/tx_distribute_provider_stake.go +++ b/x/pairing/client/cli/tx_distribute_provider_stake.go @@ -33,16 +33,38 @@ func CmdDistributeProviderStake() *cobra.Command { return err } - provider := clientCtx.GetFromAddress().String() + address := clientCtx.GetFromAddress().String() pairingQuerier := types.NewQueryClient(clientCtx) ctx := context.Background() - response, err := pairingQuerier.Provider(ctx, &types.QueryProviderRequest{Address: provider}) + response, err := pairingQuerier.Provider(ctx, &types.QueryProviderRequest{Address: address}) if err != nil { return err } - msgs, err := CalculateDistbiruitions(provider, response.StakeEntries, args[0]) + if len(response.StakeEntries) == 0 { + // Check if the address is a vault by querying metadata + epochStorageQuerier := epochstoragetypes.NewQueryClient(clientCtx) + metadatasResponse, err := epochStorageQuerier.ProviderMetaData(ctx, &epochstoragetypes.QueryProviderMetaDataRequest{}) + if err != nil { + return err + } + + // If this provider has a vault set, try to use the vault address instead + // TOSO: this is a fix until we add a way to query the vault's metadata + for _, metadata := range metadatasResponse.MetaData { + if metadata.Vault == address { + // Query the vault's stake entries + response, err = pairingQuerier.Provider(ctx, &types.QueryProviderRequest{Address: metadata.Provider}) + if err != nil { + return err + } + break + } + } + } + + msgs, err := CalculateDistbiruitions(address, response.StakeEntries, args[0]) if err != nil { return err } @@ -64,6 +86,10 @@ type data struct { } func CalculateDistbiruitions(provider string, entries []epochstoragetypes.StakeEntry, distributionsArg string) ([]sdk.Msg, error) { + if len(entries) == 0 { + return nil, fmt.Errorf("provider: %s is not staked on any chain", provider) + } + splitedArgs := strings.Split(distributionsArg, ",") if len(splitedArgs)%2 != 0 { return nil, fmt.Errorf("args must: chain,percent,chain,percent") @@ -72,25 +98,40 @@ func CalculateDistbiruitions(provider string, entries []epochstoragetypes.StakeE totalStake := sdk.NewCoin(commontypes.TokenDenom, sdk.ZeroInt()) totalP := sdk.ZeroDec() distributions := []data{} + // First decode the args into chain->percent map + chainToPercent := make(map[string]sdk.Dec) for i := 0; i < len(splitedArgs); i += 2 { p, err := sdk.NewDecFromStr(splitedArgs[i+1]) if err != nil { return nil, err } - for _, e := range entries { - if splitedArgs[i] == e.Chain { - distributions = append(distributions, data{chain: e.Chain, original: e.Stake.Amount, percent: p}) - totalStake = totalStake.Add(e.Stake) - totalP = totalP.Add(p) - } + chainToPercent[splitedArgs[i]] = p + } + + // Then match entries with percentages and fill the data + for _, e := range entries { + if p, ok := chainToPercent[e.Chain]; ok { + distributions = append(distributions, data{chain: e.Chain, original: e.Stake.Amount, percent: p}) + totalStake = totalStake.Add(e.Stake) + totalP = totalP.Add(p) } } if len(distributions) != len(entries) { - return nil, fmt.Errorf("must specify percentages for all chains the provider is staked on") + // Print out which chains were specified vs which chains have stakes + specifiedChains := make([]string, 0) + for chain := range chainToPercent { + specifiedChains = append(specifiedChains, chain) + } + stakedChains := make([]string, 0) + for _, entry := range entries { + stakedChains = append(stakedChains, entry.Chain) + } + return nil, fmt.Errorf("chains mismatch - specified chains: %v, staked chains: %v", specifiedChains, stakedChains) } + if !totalP.Equal(sdk.NewDec(100)) { - return nil, fmt.Errorf("total percentages must be 100") + return nil, fmt.Errorf("total percentages must be 100, total input: %s", totalP.String()) } left := totalStake diff --git a/x/pairing/module_simulation.go b/x/pairing/module_simulation.go deleted file mode 100644 index 2d2e31c970..0000000000 --- a/x/pairing/module_simulation.go +++ /dev/null @@ -1,184 +0,0 @@ -package pairing - -import ( - "math/rand" - - "github.com/cosmos/cosmos-sdk/baseapp" - "github.com/cosmos/cosmos-sdk/testutil/sims" - sdk "github.com/cosmos/cosmos-sdk/types" - "github.com/cosmos/cosmos-sdk/types/module" - simtypes "github.com/cosmos/cosmos-sdk/types/simulation" - types2 "github.com/cosmos/cosmos-sdk/x/auth/types" - "github.com/cosmos/cosmos-sdk/x/simulation" - "github.com/lavanet/lava/v4/testutil/sample" - pairingsimulation "github.com/lavanet/lava/v4/x/pairing/simulation" - "github.com/lavanet/lava/v4/x/pairing/types" -) - -// avoid unused import issue -var ( - _ = sample.AccAddress - _ = pairingsimulation.FindAccount - _ = sims.StakePerAccount - _ = simulation.MsgEntryKind - _ = baseapp.Paramspace -) - -const ( - opWeightMsgStakeProvider = "op_weight_msg_stake_provider" - // TODO: Determine the simulation weight value - defaultWeightMsgStakeProvider int = 100 - - opWeightMsgStakeClient = "op_weight_msg_stake_client" - // TODO: Determine the simulation weight value - defaultWeightMsgStakeClient int = 100 - - opWeightMsgUnstakeProvider = "op_weight_msg_unstake_provider" - // TODO: Determine the simulation weight value - defaultWeightMsgUnstakeProvider int = 100 - - opWeightMsgUnstakeClient = "op_weight_msg_unstake_client" - // TODO: Determine the simulation weight value - defaultWeightMsgUnstakeClient int = 100 - - opWeightMsgRelayPayment = "op_weight_msg_relay_payment" - // TODO: Determine the simulation weight value - defaultWeightMsgRelayPayment int = 100 - - opWeightMsgFreeze = "op_weight_msg_freeze" - // TODO: Determine the simulation weight value - defaultWeightMsgFreeze int = 100 - - opWeightMsgUnfreeze = "op_weight_msg_unfreeze" - // TODO: Determine the simulation weight value - defaultWeightMsgUnfreeze int = 100 - - // this line is used by starport scaffolding # simapp/module/const -) - -// GenerateGenesisState creates a randomized GenState of the module -func (AppModule) GenerateGenesisState(simState *module.SimulationState) { - accs := make([]string, len(simState.Accounts)) - for i, acc := range simState.Accounts { - accs[i] = acc.Address.String() - } - pairingGenesis := types.GenesisState{ - Params: types.DefaultParams(), - // this line is used by starport scaffolding # simapp/module/genesisState - } - simState.GenState[types.ModuleName] = simState.Cdc.MustMarshalJSON(&pairingGenesis) -} - -// ProposalContents doesn't return any content functions for governance proposals -func (AppModule) ProposalContents(_ module.SimulationState) []simtypes.WeightedProposalMsg { - return nil -} - -// TODO: Add weighted proposals -func (AppModule) ProposalMsgs(_ module.SimulationState) []simtypes.WeightedProposalMsg { - return []simtypes.WeightedProposalMsg{ - simulation.NewWeightedProposalMsg("op_weight_msg_update_params", 100, func(r *rand.Rand, ctx sdk.Context, accs []simtypes.Account) sdk.Msg { - return &types2.MsgUpdateParams{} - }), - } -} - -//// RandomizedParams creates randomized param changes for the simulator -// func (am AppModule) RandomizedParams(_ *rand.Rand) []simtypes.ParamChange { -// pairingParams := types.DefaultParams() -// return []simtypes.ParamChange{ -// simulation.NewSimParamChange(types.ModuleName, string(types.KeyFraudStakeSlashingFactor), func(r *rand.Rand) string { -// return string(types.Amino.MustMarshalJSON(pairingParams.FraudStakeSlashingFactor)) -// }), -// simulation.NewSimParamChange(types.ModuleName, string(types.KeyFraudSlashingAmount), func(r *rand.Rand) string { -// return string(types.Amino.MustMarshalJSON(pairingParams.FraudSlashingAmount)) -// }), -// simulation.NewSimParamChange(types.ModuleName, string(types.KeyEpochBlocksOverlap), func(r *rand.Rand) string { -// return string(types.Amino.MustMarshalJSON(pairingParams.EpochBlocksOverlap)) -// }), -// simulation.NewSimParamChange(types.ModuleName, string(types.KeyRecommendedEpochNumToCollectPayment), func(r *rand.Rand) string { -// return string(types.Amino.MustMarshalJSON(pairingParams.RecommendedEpochNumToCollectPayment)) -// }), -// } -// } - -// RegisterStoreDecoder registers a decoder -func (am AppModule) RegisterStoreDecoder(_ sdk.StoreDecoderRegistry) {} - -// WeightedOperations returns the all the gov module operations with their respective weights. -func (am AppModule) WeightedOperations(simState module.SimulationState) []simtypes.WeightedOperation { - operations := make([]simtypes.WeightedOperation, 0) - - var weightMsgStakeProvider int - simState.AppParams.GetOrGenerate(simState.Cdc, opWeightMsgStakeProvider, &weightMsgStakeProvider, nil, - func(_ *rand.Rand) { - weightMsgStakeProvider = defaultWeightMsgStakeProvider - }, - ) - operations = append(operations, simulation.NewWeightedOperation( - weightMsgStakeProvider, - pairingsimulation.SimulateMsgStakeProvider(am.accountKeeper, am.bankKeeper, am.keeper), - )) - - var weightMsgStakeClient int - simState.AppParams.GetOrGenerate(simState.Cdc, opWeightMsgStakeClient, &weightMsgStakeClient, nil, - func(_ *rand.Rand) { - weightMsgStakeClient = defaultWeightMsgStakeClient - }, - ) - - var weightMsgUnstakeProvider int - simState.AppParams.GetOrGenerate(simState.Cdc, opWeightMsgUnstakeProvider, &weightMsgUnstakeProvider, nil, - func(_ *rand.Rand) { - weightMsgUnstakeProvider = defaultWeightMsgUnstakeProvider - }, - ) - operations = append(operations, simulation.NewWeightedOperation( - weightMsgUnstakeProvider, - pairingsimulation.SimulateMsgUnstakeProvider(am.accountKeeper, am.bankKeeper, am.keeper), - )) - - var weightMsgUnstakeClient int - simState.AppParams.GetOrGenerate(simState.Cdc, opWeightMsgUnstakeClient, &weightMsgUnstakeClient, nil, - func(_ *rand.Rand) { - weightMsgUnstakeClient = defaultWeightMsgUnstakeClient - }, - ) - - var weightMsgRelayPayment int - simState.AppParams.GetOrGenerate(simState.Cdc, opWeightMsgRelayPayment, &weightMsgRelayPayment, nil, - func(_ *rand.Rand) { - weightMsgRelayPayment = defaultWeightMsgRelayPayment - }, - ) - operations = append(operations, simulation.NewWeightedOperation( - weightMsgRelayPayment, - pairingsimulation.SimulateMsgRelayPayment(am.accountKeeper, am.bankKeeper, am.keeper), - )) - - var weightMsgFreeze int - simState.AppParams.GetOrGenerate(simState.Cdc, opWeightMsgFreeze, &weightMsgFreeze, nil, - func(_ *rand.Rand) { - weightMsgFreeze = defaultWeightMsgFreeze - }, - ) - operations = append(operations, simulation.NewWeightedOperation( - weightMsgFreeze, - pairingsimulation.SimulateMsgFreeze(am.accountKeeper, am.bankKeeper, am.keeper), - )) - - var weightMsgUnfreeze int - simState.AppParams.GetOrGenerate(simState.Cdc, opWeightMsgUnfreeze, &weightMsgUnfreeze, nil, - func(_ *rand.Rand) { - weightMsgUnfreeze = defaultWeightMsgUnfreeze - }, - ) - operations = append(operations, simulation.NewWeightedOperation( - weightMsgUnfreeze, - pairingsimulation.SimulateMsgUnfreeze(am.accountKeeper, am.bankKeeper, am.keeper), - )) - - // this line is used by starport scaffolding # simapp/module/operation - - return operations -} diff --git a/x/pairing/simulation/freeze.go b/x/pairing/simulation/freeze.go deleted file mode 100644 index c583ddb917..0000000000 --- a/x/pairing/simulation/freeze.go +++ /dev/null @@ -1,29 +0,0 @@ -package simulation - -import ( - "math/rand" - - "github.com/cosmos/cosmos-sdk/baseapp" - sdk "github.com/cosmos/cosmos-sdk/types" - simtypes "github.com/cosmos/cosmos-sdk/types/simulation" - "github.com/lavanet/lava/v4/x/pairing/keeper" - "github.com/lavanet/lava/v4/x/pairing/types" -) - -func SimulateMsgFreeze( - ak types.AccountKeeper, - bk types.BankKeeper, - k keeper.Keeper, -) simtypes.Operation { - return func(r *rand.Rand, app *baseapp.BaseApp, ctx sdk.Context, accs []simtypes.Account, chainID string, - ) (simtypes.OperationMsg, []simtypes.FutureOperation, error) { - simAccount, _ := simtypes.RandomAcc(r, accs) - msg := &types.MsgFreezeProvider{ - Creator: simAccount.Address.String(), - } - - // TODO: Handling the Freeze simulation - - return simtypes.NoOpMsg(types.ModuleName, msg.Type(), "Freeze simulation not implemented"), nil, nil - } -} diff --git a/x/pairing/simulation/relay_payment.go b/x/pairing/simulation/relay_payment.go deleted file mode 100644 index 6cd2b08a8d..0000000000 --- a/x/pairing/simulation/relay_payment.go +++ /dev/null @@ -1,29 +0,0 @@ -package simulation - -import ( - "math/rand" - - "github.com/cosmos/cosmos-sdk/baseapp" - sdk "github.com/cosmos/cosmos-sdk/types" - simtypes "github.com/cosmos/cosmos-sdk/types/simulation" - "github.com/lavanet/lava/v4/x/pairing/keeper" - "github.com/lavanet/lava/v4/x/pairing/types" -) - -func SimulateMsgRelayPayment( - ak types.AccountKeeper, - bk types.BankKeeper, - k keeper.Keeper, -) simtypes.Operation { - return func(r *rand.Rand, app *baseapp.BaseApp, ctx sdk.Context, accs []simtypes.Account, chainID string, - ) (simtypes.OperationMsg, []simtypes.FutureOperation, error) { - simAccount, _ := simtypes.RandomAcc(r, accs) - msg := &types.MsgRelayPayment{ - Creator: simAccount.Address.String(), - } - - // TODO: Handling the RelayPayment simulation - - return simtypes.NoOpMsg(types.ModuleName, msg.Type(), "RelayPayment simulation not implemented"), nil, nil - } -} diff --git a/x/pairing/simulation/simap.go b/x/pairing/simulation/simap.go deleted file mode 100644 index 92c437c0d1..0000000000 --- a/x/pairing/simulation/simap.go +++ /dev/null @@ -1,15 +0,0 @@ -package simulation - -import ( - sdk "github.com/cosmos/cosmos-sdk/types" - simtypes "github.com/cosmos/cosmos-sdk/types/simulation" -) - -// FindAccount find a specific address from an account list -func FindAccount(accs []simtypes.Account, address string) (simtypes.Account, bool) { - creator, err := sdk.AccAddressFromBech32(address) - if err != nil { - panic(err) - } - return simtypes.FindAccount(accs, creator) -} diff --git a/x/pairing/simulation/stake_provider.go b/x/pairing/simulation/stake_provider.go deleted file mode 100644 index 0eece3809b..0000000000 --- a/x/pairing/simulation/stake_provider.go +++ /dev/null @@ -1,45 +0,0 @@ -package simulation - -import ( - "math/rand" - - "github.com/cosmos/cosmos-sdk/baseapp" - sdk "github.com/cosmos/cosmos-sdk/types" - simtypes "github.com/cosmos/cosmos-sdk/types/simulation" - "github.com/lavanet/lava/v4/x/pairing/keeper" - "github.com/lavanet/lava/v4/x/pairing/types" -) - -func SimulateMsgStakeProvider( - ak types.AccountKeeper, - bk types.BankKeeper, - k keeper.Keeper, -) simtypes.Operation { - return func(r *rand.Rand, app *baseapp.BaseApp, ctx sdk.Context, accs []simtypes.Account, chainID string, - ) (simtypes.OperationMsg, []simtypes.FutureOperation, error) { - simAccount, _ := simtypes.RandomAcc(r, accs) - msg := &types.MsgStakeProvider{ - Creator: simAccount.Address.String(), - } - - // TODO: Handling the StakeProvider simulation - - return simtypes.NoOpMsg(types.ModuleName, msg.Type(), "StakeProvider simulation not implemented"), nil, nil - } -} - -func SimulateMsgStakeProvider_HappyFlow( - ak types.AccountKeeper, - bk types.BankKeeper, - k keeper.Keeper, -) simtypes.Operation { - return func(r *rand.Rand, app *baseapp.BaseApp, ctx sdk.Context, accs []simtypes.Account, chainID string, - ) (simtypes.OperationMsg, []simtypes.FutureOperation, error) { - simAccount, _ := simtypes.RandomAcc(r, accs) - msg := &types.MsgStakeProvider{ - Creator: simAccount.Address.String(), - } - - return simtypes.NoOpMsg(types.ModuleName, msg.Type(), "StakeProvider simulation not implemented"), nil, nil - } -} diff --git a/x/pairing/simulation/unfreeze.go b/x/pairing/simulation/unfreeze.go deleted file mode 100644 index 77256d7b20..0000000000 --- a/x/pairing/simulation/unfreeze.go +++ /dev/null @@ -1,29 +0,0 @@ -package simulation - -import ( - "math/rand" - - "github.com/cosmos/cosmos-sdk/baseapp" - sdk "github.com/cosmos/cosmos-sdk/types" - simtypes "github.com/cosmos/cosmos-sdk/types/simulation" - "github.com/lavanet/lava/v4/x/pairing/keeper" - "github.com/lavanet/lava/v4/x/pairing/types" -) - -func SimulateMsgUnfreeze( - ak types.AccountKeeper, - bk types.BankKeeper, - k keeper.Keeper, -) simtypes.Operation { - return func(r *rand.Rand, app *baseapp.BaseApp, ctx sdk.Context, accs []simtypes.Account, chainID string, - ) (simtypes.OperationMsg, []simtypes.FutureOperation, error) { - simAccount, _ := simtypes.RandomAcc(r, accs) - msg := &types.MsgUnfreezeProvider{ - Creator: simAccount.Address.String(), - } - - // TODO: Handling the Unfreeze simulation - - return simtypes.NoOpMsg(types.ModuleName, msg.Type(), "Unfreeze simulation not implemented"), nil, nil - } -} diff --git a/x/pairing/simulation/unstake_provider.go b/x/pairing/simulation/unstake_provider.go deleted file mode 100644 index 71f79e812a..0000000000 --- a/x/pairing/simulation/unstake_provider.go +++ /dev/null @@ -1,29 +0,0 @@ -package simulation - -import ( - "math/rand" - - "github.com/cosmos/cosmos-sdk/baseapp" - sdk "github.com/cosmos/cosmos-sdk/types" - simtypes "github.com/cosmos/cosmos-sdk/types/simulation" - "github.com/lavanet/lava/v4/x/pairing/keeper" - "github.com/lavanet/lava/v4/x/pairing/types" -) - -func SimulateMsgUnstakeProvider( - ak types.AccountKeeper, - bk types.BankKeeper, - k keeper.Keeper, -) simtypes.Operation { - return func(r *rand.Rand, app *baseapp.BaseApp, ctx sdk.Context, accs []simtypes.Account, chainID string, - ) (simtypes.OperationMsg, []simtypes.FutureOperation, error) { - simAccount, _ := simtypes.RandomAcc(r, accs) - msg := &types.MsgUnstakeProvider{ - Creator: simAccount.Address.String(), - } - - // TODO: Handling the UnstakeProvider simulation - - return simtypes.NoOpMsg(types.ModuleName, msg.Type(), "UnstakeProvider simulation not implemented"), nil, nil - } -} diff --git a/x/pairing/types/relay_exchange.go b/x/pairing/types/relay_exchange.go index 429ad417b2..6872668f9c 100644 --- a/x/pairing/types/relay_exchange.go +++ b/x/pairing/types/relay_exchange.go @@ -1,6 +1,7 @@ package types import ( + "bytes" "strings" "github.com/lavanet/lava/v4/utils" @@ -39,7 +40,7 @@ func (re RelayExchange) DataToSign() []byte { metadataBytes, } - return sigs.Join(msgParts) + return bytes.Join(msgParts, nil) } func (re RelayExchange) HashRounds() int { @@ -65,6 +66,5 @@ func (rp RelayPrivateData) GetContentHashData() []byte { seenBlockBytes, rp.Salt, } - msgData := sigs.Join(msgParts) - return msgData + return bytes.Join(msgParts, nil) } diff --git a/x/pairing/types/relay_exchange_test.go b/x/pairing/types/relay_exchange_test.go new file mode 100644 index 0000000000..5da398fd4b --- /dev/null +++ b/x/pairing/types/relay_exchange_test.go @@ -0,0 +1,113 @@ +package types + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestGetContentHashData_NilInputs(t *testing.T) { + // Test case with all nil/empty values + rp := RelayPrivateData{ + Metadata: nil, + Extensions: nil, + Addon: "", + ApiInterface: "", + ConnectionType: "", + ApiUrl: "", + Data: nil, + Salt: nil, + RequestBlock: 0, + SeenBlock: 0, + } + + // Function should not panic and return a valid byte slice + result := rp.GetContentHashData() + require.NotNil(t, result) + + // Test with nil Extensions but non-nil empty slice + rp.Extensions = []string{} + result = rp.GetContentHashData() + require.NotNil(t, result) + + // Test with nil Metadata but non-nil empty slice + rp.Metadata = []Metadata{} + result = rp.GetContentHashData() + require.NotNil(t, result) +} + +func TestGetContentHashData_NilData(t *testing.T) { + // Test case with nil Data field specifically + rp := RelayPrivateData{ + Metadata: []Metadata{}, + Extensions: []string{}, + Addon: "", + ApiInterface: "", + ConnectionType: "", + ApiUrl: "", + Data: nil, // Explicitly nil + Salt: make([]byte, 8), + RequestBlock: 0, + SeenBlock: 0, + } + + // Function should not panic + result := rp.GetContentHashData() + require.NotNil(t, result) +} + +func TestGetContentHashData_EdgeCases(t *testing.T) { + testCases := []struct { + name string + rp RelayPrivateData + }{ + { + name: "nil metadata with data", + rp: RelayPrivateData{ + Metadata: nil, + Data: []byte("test"), + }, + }, + { + name: "nil extensions with data", + rp: RelayPrivateData{ + Extensions: nil, + Data: []byte("test"), + }, + }, + { + name: "nil everything", + rp: RelayPrivateData{ + Metadata: nil, + Extensions: nil, + Addon: "", + ApiInterface: "", + ConnectionType: "", + ApiUrl: "", + Data: nil, + Salt: nil, + }, + }, + { + name: "empty strings with nil slices", + rp: RelayPrivateData{ + Metadata: []Metadata{}, + Extensions: []string{}, + Addon: "", + ApiInterface: "", + ConnectionType: "", + ApiUrl: "", + Data: nil, + Salt: nil, + }, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + // Function should not panic + result := tc.rp.GetContentHashData() + require.NotNil(t, result) + }) + } +} diff --git a/x/pairing/types/relay_session.go b/x/pairing/types/relay_session.go index 7e73c341ec..e0240acad3 100644 --- a/x/pairing/types/relay_session.go +++ b/x/pairing/types/relay_session.go @@ -1,6 +1,8 @@ package types import ( + "bytes" + "github.com/lavanet/lava/v4/utils/sigs" ) @@ -28,5 +30,5 @@ func (rs RelaySession) CalculateHashForFinalization() []byte { blockHeightBytes, relayNumBytes, } - return sigs.Join(msgParts) + return bytes.Join(msgParts, nil) } diff --git a/x/plans/keeper/migrations.go b/x/plans/keeper/migrations.go index 4aba7e497f..aa296391d1 100644 --- a/x/plans/keeper/migrations.go +++ b/x/plans/keeper/migrations.go @@ -4,11 +4,8 @@ import ( "math" sdk "github.com/cosmos/cosmos-sdk/types" - v2 "github.com/lavanet/lava/v4/x/plans/migrations/v2" - v3 "github.com/lavanet/lava/v4/x/plans/migrations/v3" v7 "github.com/lavanet/lava/v4/x/plans/migrations/v7" v8 "github.com/lavanet/lava/v4/x/plans/migrations/v8" - projectsv3 "github.com/lavanet/lava/v4/x/projects/migrations/v3" ) type Migrator struct { @@ -19,80 +16,6 @@ func NewMigrator(keeper Keeper) Migrator { return Migrator{keeper: keeper} } -// Migrate2to3 implements store migration from v1 to v2: -// - Trigger the version upgrade of the planFS fixation store -// - Update plan policy -func (m Migrator) Migrate2to3(ctx sdk.Context) error { - planIndices := m.keeper.plansFS.AllEntryIndicesFilter(ctx, "", nil) - - for _, planIndex := range planIndices { - blocks := m.keeper.plansFS.GetAllEntryVersions(ctx, planIndex) - for _, block := range blocks { - var plan_v2 v2.Plan - m.keeper.plansFS.ReadEntry(ctx, planIndex, block, &plan_v2) - - // create policy struct - planPolicy := projectsv3.Policy{ - GeolocationProfile: uint64(1), - TotalCuLimit: plan_v2.ComputeUnits, - EpochCuLimit: plan_v2.ComputeUnitsPerEpoch, - MaxProvidersToPair: plan_v2.MaxProvidersToPair, - } - - // convert plan from type v2.Plan to types.Plan - plan_v3 := v3.Plan{ - Index: plan_v2.Index, - Block: plan_v2.Block, - Price: plan_v2.Price, - OveruseRate: plan_v2.OveruseRate, - AllowOveruse: plan_v2.AllowOveruse, - Description: plan_v2.Description, - Type: plan_v2.Type, - AnnualDiscountPercentage: plan_v2.AnnualDiscountPercentage, - PlanPolicy: planPolicy, - } - - m.keeper.plansFS.ModifyEntry(ctx, planIndex, block, &plan_v3) - } - } - - return nil -} - -// Migrate3to4 implements store migration from v3 to v4: -// - Trigger the version upgrade of the planFS fixation store -// - Replace the store prefix from module-name ("plan") to "plan-fs" -func (m Migrator) Migrate3to4(ctx sdk.Context) error { - // This migration used to call a deprecated fixationstore function called MigrateVersionAndPrefix - - return nil -} - -// Migrate4to5 implements store migration from v4 to v5: -// - Trigger the version upgrade of the planFS fixation store (so it will -// call the version upgrade of its timer store). -func (m Migrator) Migrate4to5(ctx sdk.Context) error { - // This migration used to call a deprecated fixationstore function called MigrateVersion - - return nil -} - -// Migrate5to6 implements store migration from v5 to v6: -// -- trigger fixation migration, deleteat and live variables -func (m Migrator) Migrate5to6(ctx sdk.Context) error { - // This migration used to call a deprecated fixationstore function called MigrateVersion - - return nil -} - -// Migrate6to7 implements store migration from v6 to v7: -// -- trigger fixation migration (v4->v5), initialize IsLatest field -func (m Migrator) Migrate6to7(ctx sdk.Context) error { - // This migration used to call a deprecated fixationstore function called MigrateVersion - - return nil -} - func (m Migrator) Migrate7to8(ctx sdk.Context) error { plansInds := m.keeper.plansFS.GetAllEntryIndices(ctx) for _, ind := range plansInds { diff --git a/x/plans/migrations/v2/plan.pb.go b/x/plans/migrations/v2/plan.pb.go deleted file mode 100755 index a2f4d3f7ce..0000000000 --- a/x/plans/migrations/v2/plan.pb.go +++ /dev/null @@ -1,794 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: plans/plan.proto - -package v2 - -import ( - fmt "fmt" - types "github.com/cosmos/cosmos-sdk/types" - _ "github.com/gogo/protobuf/gogoproto" - proto "github.com/gogo/protobuf/proto" - io "io" - math "math" - math_bits "math/bits" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -type Plan struct { - Index string `protobuf:"bytes,1,opt,name=index,proto3" json:"index,omitempty"` - Block uint64 `protobuf:"varint,3,opt,name=block,proto3" json:"block,omitempty"` - Price types.Coin `protobuf:"bytes,4,opt,name=price,proto3" json:"price"` - ComputeUnits uint64 `protobuf:"varint,5,opt,name=compute_units,json=computeUnits,proto3" json:"compute_units,omitempty"` - ComputeUnitsPerEpoch uint64 `protobuf:"varint,6,opt,name=compute_units_per_epoch,json=computeUnitsPerEpoch,proto3" json:"compute_units_per_epoch,omitempty"` - MaxProvidersToPair uint64 `protobuf:"varint,7,opt,name=max_providers_to_pair,json=maxProvidersToPair,proto3" json:"max_providers_to_pair,omitempty"` - AllowOveruse bool `protobuf:"varint,8,opt,name=allow_overuse,json=allowOveruse,proto3" json:"allow_overuse,omitempty"` - OveruseRate uint64 `protobuf:"varint,9,opt,name=overuse_rate,json=overuseRate,proto3" json:"overuse_rate,omitempty"` - Description string `protobuf:"bytes,11,opt,name=description,proto3" json:"description,omitempty"` - Type string `protobuf:"bytes,12,opt,name=type,proto3" json:"type,omitempty"` - AnnualDiscountPercentage uint64 `protobuf:"varint,13,opt,name=annual_discount_percentage,json=annualDiscountPercentage,proto3" json:"annual_discount_percentage,omitempty"` -} - -func (m *Plan) Reset() { *m = Plan{} } -func (m *Plan) String() string { return proto.CompactTextString(m) } -func (*Plan) ProtoMessage() {} -func (*Plan) Descriptor() ([]byte, []int) { - return fileDescriptor_e5909a10cd0e3497, []int{0} -} -func (m *Plan) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Plan) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Plan.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Plan) XXX_Merge(src proto.Message) { - xxx_messageInfo_Plan.Merge(m, src) -} -func (m *Plan) XXX_Size() int { - return m.Size() -} -func (m *Plan) XXX_DiscardUnknown() { - xxx_messageInfo_Plan.DiscardUnknown(m) -} - -var xxx_messageInfo_Plan proto.InternalMessageInfo - -func (m *Plan) GetIndex() string { - if m != nil { - return m.Index - } - return "" -} - -func (m *Plan) GetBlock() uint64 { - if m != nil { - return m.Block - } - return 0 -} - -func (m *Plan) GetPrice() types.Coin { - if m != nil { - return m.Price - } - return types.Coin{} -} - -func (m *Plan) GetComputeUnits() uint64 { - if m != nil { - return m.ComputeUnits - } - return 0 -} - -func (m *Plan) GetComputeUnitsPerEpoch() uint64 { - if m != nil { - return m.ComputeUnitsPerEpoch - } - return 0 -} - -func (m *Plan) GetMaxProvidersToPair() uint64 { - if m != nil { - return m.MaxProvidersToPair - } - return 0 -} - -func (m *Plan) GetAllowOveruse() bool { - if m != nil { - return m.AllowOveruse - } - return false -} - -func (m *Plan) GetOveruseRate() uint64 { - if m != nil { - return m.OveruseRate - } - return 0 -} - -func (m *Plan) GetDescription() string { - if m != nil { - return m.Description - } - return "" -} - -func (m *Plan) GetType() string { - if m != nil { - return m.Type - } - return "" -} - -func (m *Plan) GetAnnualDiscountPercentage() uint64 { - if m != nil { - return m.AnnualDiscountPercentage - } - return 0 -} - -func init() { - proto.RegisterType((*Plan)(nil), "lavanet.lava.plans.PlanV2") -} - -func init() { proto.RegisterFile("plans/plan.proto", fileDescriptor_e5909a10cd0e3497) } - -var fileDescriptor_e5909a10cd0e3497 = []byte{ - // 437 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x54, 0x52, 0xc1, 0x8e, 0xd3, 0x30, - 0x10, 0x6d, 0x68, 0xba, 0x74, 0x9d, 0xae, 0x54, 0x59, 0x45, 0x98, 0x1e, 0x42, 0x00, 0x21, 0xf5, - 0x94, 0xa8, 0xa0, 0xbd, 0x71, 0xda, 0x85, 0xcb, 0x5e, 0x88, 0x22, 0xb8, 0x70, 0xb1, 0x1c, 0xd7, - 0xea, 0x5a, 0x24, 0x1e, 0xcb, 0x76, 0x4a, 0xf9, 0x0b, 0x3e, 0x83, 0xcf, 0xe0, 0xb8, 0xc7, 0x3d, - 0x72, 0x42, 0xa8, 0xfd, 0x11, 0x64, 0x27, 0xa0, 0x72, 0xf1, 0xcc, 0xbc, 0xf7, 0x3c, 0x6f, 0x6c, - 0x0d, 0x9a, 0xeb, 0x86, 0x29, 0x5b, 0xf8, 0x33, 0xd7, 0x06, 0x1c, 0x60, 0xdc, 0xb0, 0x1d, 0x53, - 0xc2, 0xe5, 0x3e, 0xe6, 0x81, 0x5e, 0x2e, 0xb6, 0xb0, 0x85, 0x40, 0x17, 0x3e, 0xeb, 0x95, 0xcb, - 0x94, 0x83, 0x6d, 0xc1, 0x16, 0x35, 0xb3, 0xa2, 0xd8, 0xad, 0x6b, 0xe1, 0xd8, 0xba, 0xe0, 0x20, - 0x87, 0x4e, 0xcf, 0x7f, 0x8c, 0x51, 0x5c, 0x36, 0x4c, 0xe1, 0x05, 0x9a, 0x48, 0xb5, 0x11, 0x7b, - 0x12, 0x65, 0xd1, 0xea, 0xbc, 0xea, 0x0b, 0x8f, 0xd6, 0x0d, 0xf0, 0xcf, 0x64, 0x9c, 0x45, 0xab, - 0xb8, 0xea, 0x0b, 0x7c, 0x89, 0x26, 0xda, 0x48, 0x2e, 0x48, 0x9c, 0x45, 0xab, 0xe4, 0xd5, 0x93, - 0xbc, 0x37, 0xc9, 0xbd, 0x49, 0x3e, 0x98, 0xe4, 0xd7, 0x20, 0xd5, 0x55, 0x7c, 0xf7, 0xeb, 0xe9, - 0xa8, 0xea, 0xd5, 0xf8, 0x05, 0xba, 0xe0, 0xd0, 0xea, 0xce, 0x09, 0xda, 0x29, 0xe9, 0x2c, 0x99, - 0x84, 0xa6, 0xb3, 0x01, 0xfc, 0xe8, 0x31, 0x7c, 0x89, 0x1e, 0xff, 0x27, 0xa2, 0x5a, 0x18, 0x2a, - 0x34, 0xf0, 0x5b, 0x72, 0x16, 0xe4, 0x8b, 0x53, 0x79, 0x29, 0xcc, 0x3b, 0xcf, 0xe1, 0x35, 0x7a, - 0xd4, 0xb2, 0x3d, 0xd5, 0x06, 0x76, 0x72, 0x23, 0x8c, 0xa5, 0x0e, 0xa8, 0x66, 0xd2, 0x90, 0x87, - 0xe1, 0x12, 0x6e, 0xd9, 0xbe, 0xfc, 0xcb, 0x7d, 0x80, 0x92, 0x49, 0xe3, 0xc7, 0x61, 0x4d, 0x03, - 0x5f, 0x28, 0xec, 0x84, 0xe9, 0xac, 0x20, 0xd3, 0x2c, 0x5a, 0x4d, 0xab, 0x59, 0x00, 0xdf, 0xf7, - 0x18, 0x7e, 0x86, 0x66, 0x03, 0x4d, 0x0d, 0x73, 0x82, 0x9c, 0x87, 0x76, 0xc9, 0x80, 0x55, 0xcc, - 0x09, 0x9c, 0xa1, 0x64, 0x23, 0x2c, 0x37, 0x52, 0x3b, 0x09, 0x8a, 0x24, 0xe1, 0xff, 0x4e, 0x21, - 0x8c, 0x51, 0xec, 0xbe, 0x6a, 0x41, 0x66, 0x81, 0x0a, 0x39, 0x7e, 0x83, 0x96, 0x4c, 0xa9, 0x8e, - 0x35, 0x74, 0x23, 0x2d, 0x87, 0x4e, 0x39, 0xff, 0x52, 0x2e, 0x94, 0x63, 0x5b, 0x41, 0x2e, 0x82, - 0x0d, 0xe9, 0x15, 0x6f, 0x07, 0x41, 0xf9, 0x8f, 0xbf, 0x89, 0xa7, 0x68, 0x9e, 0xdc, 0xc4, 0xd3, - 0x07, 0xf3, 0xf1, 0xd5, 0xf5, 0xf7, 0x43, 0x1a, 0xdd, 0x1d, 0xd2, 0xe8, 0xfe, 0x90, 0x46, 0xbf, - 0x0f, 0x69, 0xf4, 0xed, 0x98, 0x8e, 0xee, 0x8f, 0xe9, 0xe8, 0xe7, 0x31, 0x1d, 0x7d, 0x7a, 0xb9, - 0x95, 0xee, 0xb6, 0xab, 0x73, 0x0e, 0x6d, 0x31, 0x6c, 0x4d, 0x88, 0xc5, 0xbe, 0xe8, 0xd7, 0xca, - 0x4f, 0x63, 0xeb, 0xb3, 0xb0, 0x0e, 0xaf, 0xff, 0x04, 0x00, 0x00, 0xff, 0xff, 0x19, 0x0a, 0x79, - 0xc2, 0x6c, 0x02, 0x00, 0x00, -} - -func (this *Plan) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*Plan) - if !ok { - that2, ok := that.(Plan) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if this.Index != that1.Index { - return false - } - if this.Block != that1.Block { - return false - } - if !this.Price.Equal(&that1.Price) { - return false - } - if this.ComputeUnits != that1.ComputeUnits { - return false - } - if this.ComputeUnitsPerEpoch != that1.ComputeUnitsPerEpoch { - return false - } - if this.MaxProvidersToPair != that1.MaxProvidersToPair { - return false - } - if this.AllowOveruse != that1.AllowOveruse { - return false - } - if this.OveruseRate != that1.OveruseRate { - return false - } - if this.Description != that1.Description { - return false - } - if this.Type != that1.Type { - return false - } - if this.AnnualDiscountPercentage != that1.AnnualDiscountPercentage { - return false - } - return true -} -func (m *Plan) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Plan) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Plan) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.AnnualDiscountPercentage != 0 { - i = encodeVarintPlan(dAtA, i, uint64(m.AnnualDiscountPercentage)) - i-- - dAtA[i] = 0x68 - } - if len(m.Type) > 0 { - i -= len(m.Type) - copy(dAtA[i:], m.Type) - i = encodeVarintPlan(dAtA, i, uint64(len(m.Type))) - i-- - dAtA[i] = 0x62 - } - if len(m.Description) > 0 { - i -= len(m.Description) - copy(dAtA[i:], m.Description) - i = encodeVarintPlan(dAtA, i, uint64(len(m.Description))) - i-- - dAtA[i] = 0x5a - } - if m.OveruseRate != 0 { - i = encodeVarintPlan(dAtA, i, uint64(m.OveruseRate)) - i-- - dAtA[i] = 0x48 - } - if m.AllowOveruse { - i-- - if m.AllowOveruse { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x40 - } - if m.MaxProvidersToPair != 0 { - i = encodeVarintPlan(dAtA, i, uint64(m.MaxProvidersToPair)) - i-- - dAtA[i] = 0x38 - } - if m.ComputeUnitsPerEpoch != 0 { - i = encodeVarintPlan(dAtA, i, uint64(m.ComputeUnitsPerEpoch)) - i-- - dAtA[i] = 0x30 - } - if m.ComputeUnits != 0 { - i = encodeVarintPlan(dAtA, i, uint64(m.ComputeUnits)) - i-- - dAtA[i] = 0x28 - } - { - size, err := m.Price.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintPlan(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x22 - if m.Block != 0 { - i = encodeVarintPlan(dAtA, i, uint64(m.Block)) - i-- - dAtA[i] = 0x18 - } - if len(m.Index) > 0 { - i -= len(m.Index) - copy(dAtA[i:], m.Index) - i = encodeVarintPlan(dAtA, i, uint64(len(m.Index))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func encodeVarintPlan(dAtA []byte, offset int, v uint64) int { - offset -= sovPlan(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *Plan) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Index) - if l > 0 { - n += 1 + l + sovPlan(uint64(l)) - } - if m.Block != 0 { - n += 1 + sovPlan(uint64(m.Block)) - } - l = m.Price.Size() - n += 1 + l + sovPlan(uint64(l)) - if m.ComputeUnits != 0 { - n += 1 + sovPlan(uint64(m.ComputeUnits)) - } - if m.ComputeUnitsPerEpoch != 0 { - n += 1 + sovPlan(uint64(m.ComputeUnitsPerEpoch)) - } - if m.MaxProvidersToPair != 0 { - n += 1 + sovPlan(uint64(m.MaxProvidersToPair)) - } - if m.AllowOveruse { - n += 2 - } - if m.OveruseRate != 0 { - n += 1 + sovPlan(uint64(m.OveruseRate)) - } - l = len(m.Description) - if l > 0 { - n += 1 + l + sovPlan(uint64(l)) - } - l = len(m.Type) - if l > 0 { - n += 1 + l + sovPlan(uint64(l)) - } - if m.AnnualDiscountPercentage != 0 { - n += 1 + sovPlan(uint64(m.AnnualDiscountPercentage)) - } - return n -} - -func sovPlan(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozPlan(x uint64) (n int) { - return sovPlan(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *Plan) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPlan - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Plan: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Plan: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Index", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPlan - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthPlan - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthPlan - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Index = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Block", wireType) - } - m.Block = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPlan - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Block |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Price", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPlan - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthPlan - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthPlan - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Price.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ComputeUnits", wireType) - } - m.ComputeUnits = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPlan - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.ComputeUnits |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 6: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ComputeUnitsPerEpoch", wireType) - } - m.ComputeUnitsPerEpoch = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPlan - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.ComputeUnitsPerEpoch |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 7: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field MaxProvidersToPair", wireType) - } - m.MaxProvidersToPair = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPlan - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.MaxProvidersToPair |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 8: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field AllowOveruse", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPlan - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.AllowOveruse = bool(v != 0) - case 9: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field OveruseRate", wireType) - } - m.OveruseRate = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPlan - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.OveruseRate |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 11: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Description", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPlan - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthPlan - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthPlan - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Description = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 12: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPlan - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthPlan - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthPlan - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Type = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 13: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field AnnualDiscountPercentage", wireType) - } - m.AnnualDiscountPercentage = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPlan - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.AnnualDiscountPercentage |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipPlan(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthPlan - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipPlan(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowPlan - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowPlan - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowPlan - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthPlan - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupPlan - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthPlan - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthPlan = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowPlan = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupPlan = fmt.Errorf("proto: unexpected end of group") -) diff --git a/x/plans/migrations/v3/plan.pb.go b/x/plans/migrations/v3/plan.pb.go deleted file mode 100755 index a31305618f..0000000000 --- a/x/plans/migrations/v3/plan.pb.go +++ /dev/null @@ -1,736 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: plans/plan.proto - -package v3 - -import ( - fmt "fmt" - types "github.com/cosmos/cosmos-sdk/types" - _ "github.com/gogo/protobuf/gogoproto" - proto "github.com/gogo/protobuf/proto" - v3 "github.com/lavanet/lava/v4/x/projects/migrations/v3" - io "io" - math "math" - math_bits "math/bits" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -type Plan struct { - Index string `protobuf:"bytes,1,opt,name=index,proto3" json:"index,omitempty"` - Block uint64 `protobuf:"varint,3,opt,name=block,proto3" json:"block,omitempty"` - Price types.Coin `protobuf:"bytes,4,opt,name=price,proto3" json:"price"` - AllowOveruse bool `protobuf:"varint,8,opt,name=allow_overuse,json=allowOveruse,proto3" json:"allow_overuse,omitempty"` - OveruseRate uint64 `protobuf:"varint,9,opt,name=overuse_rate,json=overuseRate,proto3" json:"overuse_rate,omitempty"` - Description string `protobuf:"bytes,11,opt,name=description,proto3" json:"description,omitempty"` - Type string `protobuf:"bytes,12,opt,name=type,proto3" json:"type,omitempty"` - AnnualDiscountPercentage uint64 `protobuf:"varint,13,opt,name=annual_discount_percentage,json=annualDiscountPercentage,proto3" json:"annual_discount_percentage,omitempty"` - PlanPolicy v3.Policy `protobuf:"bytes,14,opt,name=plan_policy,json=planPolicy,proto3" json:"plan_policy"` -} - -func (m *Plan) Reset() { *m = Plan{} } -func (m *Plan) String() string { return proto.CompactTextString(m) } -func (*Plan) ProtoMessage() {} -func (*Plan) Descriptor() ([]byte, []int) { - return fileDescriptor_e5909a10cd0e3497, []int{0} -} -func (m *Plan) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Plan) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Plan.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Plan) XXX_Merge(src proto.Message) { - xxx_messageInfo_Plan.Merge(m, src) -} -func (m *Plan) XXX_Size() int { - return m.Size() -} -func (m *Plan) XXX_DiscardUnknown() { - xxx_messageInfo_Plan.DiscardUnknown(m) -} - -var xxx_messageInfo_Plan proto.InternalMessageInfo - -func (m *Plan) GetIndex() string { - if m != nil { - return m.Index - } - return "" -} - -func (m *Plan) GetBlock() uint64 { - if m != nil { - return m.Block - } - return 0 -} - -func (m *Plan) GetPrice() types.Coin { - if m != nil { - return m.Price - } - return types.Coin{} -} - -func (m *Plan) GetAllowOveruse() bool { - if m != nil { - return m.AllowOveruse - } - return false -} - -func (m *Plan) GetOveruseRate() uint64 { - if m != nil { - return m.OveruseRate - } - return 0 -} - -func (m *Plan) GetDescription() string { - if m != nil { - return m.Description - } - return "" -} - -func (m *Plan) GetType() string { - if m != nil { - return m.Type - } - return "" -} - -func (m *Plan) GetAnnualDiscountPercentage() uint64 { - if m != nil { - return m.AnnualDiscountPercentage - } - return 0 -} - -func (m *Plan) GetPlanPolicy() v3.Policy { - if m != nil { - return m.PlanPolicy - } - return v3.Policy{} -} - -func init() { - proto.RegisterType((*Plan)(nil), "lavanet.lava.plans.PlanV3") -} - -func init() { proto.RegisterFile("plans/plan.proto", fileDescriptor_e5909a10cd0e3497) } - -var fileDescriptor_e5909a10cd0e3497 = []byte{ - // 418 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x54, 0x52, 0xcd, 0x8e, 0xd3, 0x30, - 0x10, 0xae, 0xa9, 0xdb, 0xcd, 0x3a, 0x5d, 0x14, 0x59, 0x2b, 0x64, 0x2a, 0x11, 0x02, 0x08, 0xa9, - 0x27, 0x5b, 0x0b, 0xe2, 0xc6, 0x69, 0x77, 0x4f, 0xb9, 0x50, 0xe5, 0xc8, 0x25, 0x72, 0x5c, 0xab, - 0x18, 0xb2, 0x76, 0x14, 0xbb, 0x65, 0xf7, 0x11, 0xb8, 0xf1, 0x18, 0x3c, 0xca, 0x1e, 0xf7, 0xc8, - 0x09, 0xa1, 0xf6, 0x45, 0x90, 0x7f, 0x84, 0xe0, 0x92, 0x99, 0xf9, 0xbe, 0x2f, 0xfe, 0x3c, 0x33, - 0x46, 0xc5, 0xd0, 0x73, 0x6d, 0x99, 0xff, 0xd2, 0x61, 0x34, 0xce, 0x60, 0xdc, 0xf3, 0x3d, 0xd7, - 0xd2, 0x51, 0x1f, 0x69, 0xa0, 0x97, 0xe7, 0x5b, 0xb3, 0x35, 0x81, 0x66, 0x3e, 0x8b, 0xca, 0x65, - 0x29, 0x8c, 0xbd, 0x31, 0x96, 0x75, 0xdc, 0x4a, 0xb6, 0xbf, 0xe8, 0xa4, 0xe3, 0x17, 0x4c, 0x18, - 0x95, 0x4e, 0x5a, 0x3e, 0x19, 0x46, 0xf3, 0x59, 0x0a, 0x67, 0x59, 0x4a, 0x22, 0xfe, 0xf2, 0xdb, - 0x14, 0xc1, 0x75, 0xcf, 0x35, 0x3e, 0x47, 0x33, 0xa5, 0x37, 0xf2, 0x96, 0x80, 0x0a, 0xac, 0x4e, - 0x9b, 0x58, 0x78, 0xb4, 0xeb, 0x8d, 0xf8, 0x42, 0xa6, 0x15, 0x58, 0xc1, 0x26, 0x16, 0xf8, 0x1d, - 0x9a, 0x0d, 0xa3, 0x12, 0x92, 0xc0, 0x0a, 0xac, 0xf2, 0x37, 0x4f, 0x69, 0x34, 0xa7, 0xde, 0x9c, - 0x26, 0x73, 0x7a, 0x65, 0x94, 0xbe, 0x84, 0xf7, 0xbf, 0x9e, 0x4f, 0x9a, 0xa8, 0xc6, 0xaf, 0xd0, - 0x19, 0xef, 0x7b, 0xf3, 0xb5, 0x35, 0x7b, 0x39, 0xee, 0xac, 0x24, 0x59, 0x05, 0x56, 0x59, 0xb3, - 0x08, 0xe0, 0x87, 0x88, 0xe1, 0x17, 0x68, 0x91, 0xe8, 0x76, 0xe4, 0x4e, 0x92, 0xd3, 0x60, 0x9c, - 0x27, 0xac, 0xe1, 0x4e, 0xe2, 0x0a, 0xe5, 0x1b, 0x69, 0xc5, 0xa8, 0x06, 0xa7, 0x8c, 0x26, 0x79, - 0xb8, 0xf0, 0xbf, 0x10, 0xc6, 0x08, 0xba, 0xbb, 0x41, 0x92, 0x45, 0xa0, 0x42, 0x8e, 0xdf, 0xa3, - 0x25, 0xd7, 0x7a, 0xc7, 0xfb, 0x76, 0xa3, 0xac, 0x30, 0x3b, 0xed, 0xda, 0x41, 0x8e, 0x42, 0x6a, - 0xc7, 0xb7, 0x92, 0x9c, 0x05, 0x1b, 0x12, 0x15, 0xd7, 0x49, 0xb0, 0xfe, 0xcb, 0xe3, 0x6b, 0x94, - 0xfb, 0xf1, 0xb7, 0x83, 0xe9, 0x95, 0xb8, 0x23, 0x8f, 0x43, 0xe3, 0xcf, 0xe8, 0xff, 0xfb, 0x49, - 0x23, 0xa6, 0xeb, 0x20, 0x4a, 0xcd, 0x23, 0xff, 0x5f, 0x44, 0x6a, 0x98, 0xa1, 0x22, 0xaf, 0x61, - 0xf6, 0xa8, 0x98, 0xd6, 0x30, 0x9b, 0x15, 0xf3, 0x1a, 0x66, 0xf3, 0xe2, 0xa4, 0x86, 0xd9, 0x49, - 0x91, 0x5d, 0x5e, 0xfd, 0x38, 0x94, 0xe0, 0xfe, 0x50, 0x82, 0x87, 0x43, 0x09, 0x7e, 0x1f, 0x4a, - 0xf0, 0xfd, 0x58, 0x4e, 0x1e, 0x8e, 0xe5, 0xe4, 0xe7, 0xb1, 0x9c, 0x7c, 0x7c, 0xbd, 0x55, 0xee, - 0xd3, 0xae, 0xa3, 0xc2, 0xdc, 0xb0, 0x64, 0x1b, 0x22, 0xbb, 0x65, 0xf1, 0xdd, 0xf8, 0x2e, 0x6d, - 0x37, 0x0f, 0x7b, 0x7d, 0xfb, 0x27, 0x00, 0x00, 0xff, 0xff, 0xaa, 0xd3, 0x8a, 0x36, 0x4d, 0x02, - 0x00, 0x00, -} - -func (this *Plan) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*Plan) - if !ok { - that2, ok := that.(Plan) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if this.Index != that1.Index { - return false - } - if this.Block != that1.Block { - return false - } - if !this.Price.Equal(&that1.Price) { - return false - } - if this.AllowOveruse != that1.AllowOveruse { - return false - } - if this.OveruseRate != that1.OveruseRate { - return false - } - if this.Description != that1.Description { - return false - } - if this.Type != that1.Type { - return false - } - if this.AnnualDiscountPercentage != that1.AnnualDiscountPercentage { - return false - } - if !this.PlanPolicy.Equal(&that1.PlanPolicy) { - return false - } - return true -} -func (m *Plan) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Plan) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Plan) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - { - size, err := m.PlanPolicy.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintPlan(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x72 - if m.AnnualDiscountPercentage != 0 { - i = encodeVarintPlan(dAtA, i, uint64(m.AnnualDiscountPercentage)) - i-- - dAtA[i] = 0x68 - } - if len(m.Type) > 0 { - i -= len(m.Type) - copy(dAtA[i:], m.Type) - i = encodeVarintPlan(dAtA, i, uint64(len(m.Type))) - i-- - dAtA[i] = 0x62 - } - if len(m.Description) > 0 { - i -= len(m.Description) - copy(dAtA[i:], m.Description) - i = encodeVarintPlan(dAtA, i, uint64(len(m.Description))) - i-- - dAtA[i] = 0x5a - } - if m.OveruseRate != 0 { - i = encodeVarintPlan(dAtA, i, uint64(m.OveruseRate)) - i-- - dAtA[i] = 0x48 - } - if m.AllowOveruse { - i-- - if m.AllowOveruse { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x40 - } - { - size, err := m.Price.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintPlan(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x22 - if m.Block != 0 { - i = encodeVarintPlan(dAtA, i, uint64(m.Block)) - i-- - dAtA[i] = 0x18 - } - if len(m.Index) > 0 { - i -= len(m.Index) - copy(dAtA[i:], m.Index) - i = encodeVarintPlan(dAtA, i, uint64(len(m.Index))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func encodeVarintPlan(dAtA []byte, offset int, v uint64) int { - offset -= sovPlan(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *Plan) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Index) - if l > 0 { - n += 1 + l + sovPlan(uint64(l)) - } - if m.Block != 0 { - n += 1 + sovPlan(uint64(m.Block)) - } - l = m.Price.Size() - n += 1 + l + sovPlan(uint64(l)) - if m.AllowOveruse { - n += 2 - } - if m.OveruseRate != 0 { - n += 1 + sovPlan(uint64(m.OveruseRate)) - } - l = len(m.Description) - if l > 0 { - n += 1 + l + sovPlan(uint64(l)) - } - l = len(m.Type) - if l > 0 { - n += 1 + l + sovPlan(uint64(l)) - } - if m.AnnualDiscountPercentage != 0 { - n += 1 + sovPlan(uint64(m.AnnualDiscountPercentage)) - } - l = m.PlanPolicy.Size() - n += 1 + l + sovPlan(uint64(l)) - return n -} - -func sovPlan(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozPlan(x uint64) (n int) { - return sovPlan(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *Plan) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPlan - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Plan: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Plan: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Index", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPlan - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthPlan - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthPlan - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Index = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Block", wireType) - } - m.Block = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPlan - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Block |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Price", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPlan - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthPlan - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthPlan - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Price.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 8: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field AllowOveruse", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPlan - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.AllowOveruse = bool(v != 0) - case 9: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field OveruseRate", wireType) - } - m.OveruseRate = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPlan - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.OveruseRate |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 11: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Description", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPlan - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthPlan - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthPlan - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Description = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 12: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPlan - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthPlan - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthPlan - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Type = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 13: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field AnnualDiscountPercentage", wireType) - } - m.AnnualDiscountPercentage = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPlan - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.AnnualDiscountPercentage |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 14: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PlanPolicy", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPlan - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthPlan - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthPlan - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.PlanPolicy.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipPlan(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthPlan - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipPlan(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowPlan - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowPlan - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowPlan - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthPlan - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupPlan - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthPlan - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthPlan = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowPlan = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupPlan = fmt.Errorf("proto: unexpected end of group") -) diff --git a/x/plans/migrations/v9/plan.pb.go b/x/plans/migrations/v9/plan.pb.go deleted file mode 100644 index e4f2b01f9b..0000000000 --- a/x/plans/migrations/v9/plan.pb.go +++ /dev/null @@ -1,833 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: lavanet/lava/plans/plan.proto - -package types - -import ( - fmt "fmt" - types "github.com/cosmos/cosmos-sdk/types" - _ "github.com/cosmos/gogoproto/gogoproto" - proto "github.com/cosmos/gogoproto/proto" - _ "github.com/lavanet/lava/v4/x/spec/types" - io "io" - math "math" - math_bits "math/bits" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -// The geolocation values are encoded as bits in a bitmask, with two special values: -// GLS is set to 0 so it will be restrictive with the AND provider. -// GL is set to -1 so it will be permissive with the AND provider. -type Geolocation int32 - -const ( - Geolocation_GLS Geolocation = 0 - Geolocation_USC Geolocation = 1 - Geolocation_EU Geolocation = 2 - Geolocation_USE Geolocation = 4 - Geolocation_USW Geolocation = 8 - Geolocation_AF Geolocation = 16 - Geolocation_AS Geolocation = 32 - Geolocation_AU Geolocation = 64 - Geolocation_GL Geolocation = 65535 -) - -var Geolocation_name = map[int32]string{ - 0: "GLS", - 1: "USC", - 2: "EU", - 4: "USE", - 8: "USW", - 16: "AF", - 32: "AS", - 64: "AU", - 65535: "GL", -} - -var Geolocation_value = map[string]int32{ - "GLS": 0, - "USC": 1, - "EU": 2, - "USE": 4, - "USW": 8, - "AF": 16, - "AS": 32, - "AU": 64, - "GL": 65535, -} - -func (x Geolocation) String() string { - return proto.EnumName(Geolocation_name, int32(x)) -} - -func (Geolocation) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_64c3707a3b09a2e5, []int{0} -} - -type Plan struct { - Index string `protobuf:"bytes,1,opt,name=index,proto3" json:"index"` - Block uint64 `protobuf:"varint,3,opt,name=block,proto3" json:"block"` - Price types.Coin `protobuf:"bytes,4,opt,name=price,proto3" json:"price"` - AllowOveruse bool `protobuf:"varint,8,opt,name=allow_overuse,json=allowOveruse,proto3" json:"allow_overuse"` - OveruseRate uint64 `protobuf:"varint,9,opt,name=overuse_rate,json=overuseRate,proto3" json:"overuse_rate"` - Description string `protobuf:"bytes,11,opt,name=description,proto3" json:"description"` - Type string `protobuf:"bytes,12,opt,name=type,proto3" json:"type"` - AnnualDiscountPercentage uint64 `protobuf:"varint,13,opt,name=annual_discount_percentage,json=annualDiscountPercentage,proto3" json:"annual_discount_percentage"` - PlanPolicy Policy `protobuf:"bytes,14,opt,name=plan_policy,json=planPolicy,proto3" json:"plan_policy"` - Projects uint64 `protobuf:"varint,15,opt,name=projects,proto3" json:"projects"` -} - -func (m *Plan) Reset() { *m = Plan{} } -func (m *Plan) String() string { return proto.CompactTextString(m) } -func (*Plan) ProtoMessage() {} -func (*Plan) Descriptor() ([]byte, []int) { - return fileDescriptor_64c3707a3b09a2e5, []int{0} -} -func (m *Plan) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Plan) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Plan.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Plan) XXX_Merge(src proto.Message) { - xxx_messageInfo_Plan.Merge(m, src) -} -func (m *Plan) XXX_Size() int { - return m.Size() -} -func (m *Plan) XXX_DiscardUnknown() { - xxx_messageInfo_Plan.DiscardUnknown(m) -} - -var xxx_messageInfo_Plan proto.InternalMessageInfo - -func (m *Plan) GetIndex() string { - if m != nil { - return m.Index - } - return "" -} - -func (m *Plan) GetBlock() uint64 { - if m != nil { - return m.Block - } - return 0 -} - -func (m *Plan) GetPrice() types.Coin { - if m != nil { - return m.Price - } - return types.Coin{} -} - -func (m *Plan) GetAllowOveruse() bool { - if m != nil { - return m.AllowOveruse - } - return false -} - -func (m *Plan) GetOveruseRate() uint64 { - if m != nil { - return m.OveruseRate - } - return 0 -} - -func (m *Plan) GetDescription() string { - if m != nil { - return m.Description - } - return "" -} - -func (m *Plan) GetType() string { - if m != nil { - return m.Type - } - return "" -} - -func (m *Plan) GetAnnualDiscountPercentage() uint64 { - if m != nil { - return m.AnnualDiscountPercentage - } - return 0 -} - -func (m *Plan) GetPlanPolicy() Policy { - if m != nil { - return m.PlanPolicy - } - return Policy{} -} - -func (m *Plan) GetProjects() uint64 { - if m != nil { - return m.Projects - } - return 0 -} - -func init() { - proto.RegisterEnum("lavanet.lava.plans.GeolocationV9", Geolocation_name, Geolocation_value) - proto.RegisterType((*Plan)(nil), "lavanet.lava.plans.PlanV9") -} - -func init() { proto.RegisterFile("lavanet/lava/plans/plan.proto", fileDescriptor_64c3707a3b09a2e5) } - -var fileDescriptor_64c3707a3b09a2e5 = []byte{ - // 568 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x52, 0x41, 0x8b, 0xd3, 0x40, - 0x14, 0x6e, 0xda, 0x6c, 0x3b, 0x9d, 0xb4, 0xee, 0x38, 0x7a, 0x88, 0x45, 0x93, 0x22, 0x28, 0xc5, - 0x43, 0x42, 0x5d, 0xf0, 0x28, 0xda, 0xba, 0x16, 0xca, 0x82, 0x25, 0xa5, 0x08, 0x2a, 0x84, 0xe9, - 0x74, 0xa8, 0xd1, 0x6c, 0x26, 0x24, 0x69, 0xdd, 0xfd, 0x17, 0xfe, 0x0c, 0x7f, 0x85, 0xe7, 0x3d, - 0xee, 0xd1, 0x53, 0x90, 0xf6, 0x96, 0x3f, 0xb1, 0x32, 0x33, 0xd9, 0xd2, 0xa2, 0x78, 0x99, 0xef, - 0xbd, 0xef, 0xfb, 0x26, 0x79, 0xef, 0xcd, 0x83, 0x8f, 0x42, 0xb2, 0x26, 0x11, 0xcb, 0x5c, 0x81, - 0x6e, 0x1c, 0x92, 0x28, 0x95, 0xa7, 0x13, 0x27, 0x3c, 0xe3, 0x18, 0x97, 0xb2, 0x23, 0xd0, 0x91, - 0x72, 0xe7, 0xfe, 0x92, 0x2f, 0xb9, 0x94, 0x5d, 0x11, 0x29, 0x67, 0xc7, 0xa2, 0x3c, 0x3d, 0xe7, - 0xa9, 0x3b, 0x27, 0x29, 0x73, 0xd7, 0xfd, 0x39, 0xcb, 0x48, 0xdf, 0xa5, 0x3c, 0x28, 0xbf, 0xd4, - 0x79, 0x7a, 0xf0, 0xa3, 0x34, 0x66, 0xd4, 0x25, 0x71, 0xe0, 0x53, 0x1e, 0x86, 0x8c, 0x66, 0x01, - 0xbf, 0xf5, 0xd9, 0xff, 0x2a, 0x88, 0x87, 0x01, 0xbd, 0x54, 0x86, 0xc7, 0x3f, 0x75, 0xa8, 0x4f, - 0x42, 0x12, 0x61, 0x1b, 0x1e, 0x05, 0xd1, 0x82, 0x5d, 0x98, 0x5a, 0x57, 0xeb, 0x35, 0x07, 0xcd, - 0x22, 0xb7, 0x15, 0xe1, 0x29, 0x10, 0x86, 0x79, 0xc8, 0xe9, 0x57, 0xb3, 0xd6, 0xd5, 0x7a, 0xba, - 0x32, 0x48, 0xc2, 0x53, 0x80, 0x5f, 0xc2, 0xa3, 0x38, 0x09, 0x28, 0x33, 0xf5, 0xae, 0xd6, 0x33, - 0x9e, 0x3f, 0x70, 0x54, 0x0f, 0x8e, 0xe8, 0xc1, 0x29, 0x7b, 0x70, 0x86, 0x3c, 0x88, 0x06, 0xed, - 0xab, 0xdc, 0xae, 0x88, 0xfb, 0xd2, 0xef, 0x29, 0xc0, 0x2f, 0x60, 0x9b, 0x84, 0x21, 0xff, 0xe6, - 0xf3, 0x35, 0x4b, 0x56, 0x29, 0x33, 0x41, 0x57, 0xeb, 0x81, 0xc1, 0xdd, 0x22, 0xb7, 0x0f, 0x05, - 0xaf, 0x25, 0xd3, 0x77, 0x2a, 0xc3, 0x27, 0xb0, 0x55, 0x0a, 0x7e, 0x42, 0x32, 0x66, 0x36, 0x65, - 0x7d, 0xa8, 0xc8, 0xed, 0x03, 0xde, 0x33, 0x6e, 0xaf, 0x93, 0x8c, 0xe1, 0x3e, 0x34, 0x16, 0x2c, - 0xa5, 0x49, 0x10, 0x8b, 0x69, 0x99, 0x86, 0x6c, 0xfa, 0xb8, 0xc8, 0xed, 0x7d, 0xda, 0xdb, 0x4f, - 0xf0, 0x43, 0xa8, 0x67, 0x97, 0x31, 0x33, 0x5b, 0xd2, 0x0b, 0x8a, 0xdc, 0x96, 0xb9, 0x27, 0x4f, - 0xfc, 0x09, 0x76, 0x48, 0x14, 0xad, 0x48, 0xe8, 0x2f, 0x82, 0x94, 0xf2, 0x55, 0x94, 0xf9, 0x31, - 0x4b, 0x28, 0x8b, 0x32, 0xb2, 0x64, 0x66, 0x5b, 0xd6, 0x64, 0x15, 0xb9, 0xfd, 0x1f, 0x97, 0x67, - 0x2a, 0xed, 0x4d, 0x29, 0x4d, 0x76, 0x0a, 0x9e, 0x40, 0x43, 0x3c, 0x9e, 0xaf, 0xde, 0xce, 0xbc, - 0x23, 0x27, 0xdc, 0x71, 0xfe, 0xde, 0x27, 0x67, 0x22, 0x1d, 0x83, 0x7b, 0xe5, 0x88, 0xf7, 0xaf, - 0x79, 0x50, 0x24, 0xca, 0x80, 0x7b, 0x10, 0xc4, 0x09, 0xff, 0xc2, 0x68, 0x96, 0x9a, 0xc7, 0xb2, - 0xba, 0x56, 0x91, 0xdb, 0x3b, 0xce, 0xdb, 0x45, 0x63, 0x1d, 0x40, 0x64, 0x8c, 0x75, 0x50, 0x45, - 0xb5, 0xb1, 0x0e, 0x8e, 0x50, 0x7d, 0xac, 0x83, 0x3a, 0x6a, 0x8c, 0x75, 0xd0, 0x40, 0xe0, 0xd9, - 0x47, 0x68, 0x8c, 0x18, 0x0f, 0x39, 0x25, 0x72, 0x48, 0x0d, 0x58, 0x1b, 0x9d, 0x4d, 0x51, 0x45, - 0x04, 0xb3, 0xe9, 0x10, 0x69, 0xb8, 0x0e, 0xab, 0xa7, 0x33, 0x54, 0x55, 0xc4, 0x29, 0xd2, 0x55, - 0xf0, 0x1e, 0x01, 0xa1, 0xbc, 0x7e, 0x8b, 0x90, 0xc4, 0x29, 0xea, 0x4a, 0x9c, 0xa1, 0x57, 0x18, - 0xc0, 0xea, 0xe8, 0x0c, 0xdd, 0xdc, 0xd4, 0x06, 0xc3, 0x1f, 0x1b, 0x4b, 0xbb, 0xda, 0x58, 0xda, - 0xf5, 0xc6, 0xd2, 0x7e, 0x6f, 0x2c, 0xed, 0xfb, 0xd6, 0xaa, 0x5c, 0x6f, 0xad, 0xca, 0xaf, 0xad, - 0x55, 0xf9, 0xf0, 0x64, 0x19, 0x64, 0x9f, 0x57, 0x73, 0x87, 0xf2, 0x73, 0xf7, 0x60, 0xcf, 0x2f, - 0xca, 0x4d, 0x17, 0x0f, 0x93, 0xce, 0xeb, 0x72, 0xd3, 0x4f, 0xfe, 0x04, 0x00, 0x00, 0xff, 0xff, - 0xaa, 0xdb, 0x6c, 0x10, 0x9d, 0x03, 0x00, 0x00, -} - -func (this *Plan) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*Plan) - if !ok { - that2, ok := that.(Plan) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if this.Index != that1.Index { - return false - } - if this.Block != that1.Block { - return false - } - if !this.Price.Equal(&that1.Price) { - return false - } - if this.AllowOveruse != that1.AllowOveruse { - return false - } - if this.OveruseRate != that1.OveruseRate { - return false - } - if this.Description != that1.Description { - return false - } - if this.Type != that1.Type { - return false - } - if this.AnnualDiscountPercentage != that1.AnnualDiscountPercentage { - return false - } - if !this.PlanPolicy.Equal(&that1.PlanPolicy) { - return false - } - if this.Projects != that1.Projects { - return false - } - return true -} -func (m *Plan) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Plan) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Plan) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Projects != 0 { - i = encodeVarintPlan(dAtA, i, uint64(m.Projects)) - i-- - dAtA[i] = 0x78 - } - { - size, err := m.PlanPolicy.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintPlan(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x72 - if m.AnnualDiscountPercentage != 0 { - i = encodeVarintPlan(dAtA, i, uint64(m.AnnualDiscountPercentage)) - i-- - dAtA[i] = 0x68 - } - if len(m.Type) > 0 { - i -= len(m.Type) - copy(dAtA[i:], m.Type) - i = encodeVarintPlan(dAtA, i, uint64(len(m.Type))) - i-- - dAtA[i] = 0x62 - } - if len(m.Description) > 0 { - i -= len(m.Description) - copy(dAtA[i:], m.Description) - i = encodeVarintPlan(dAtA, i, uint64(len(m.Description))) - i-- - dAtA[i] = 0x5a - } - if m.OveruseRate != 0 { - i = encodeVarintPlan(dAtA, i, uint64(m.OveruseRate)) - i-- - dAtA[i] = 0x48 - } - if m.AllowOveruse { - i-- - if m.AllowOveruse { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x40 - } - { - size, err := m.Price.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintPlan(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x22 - if m.Block != 0 { - i = encodeVarintPlan(dAtA, i, uint64(m.Block)) - i-- - dAtA[i] = 0x18 - } - if len(m.Index) > 0 { - i -= len(m.Index) - copy(dAtA[i:], m.Index) - i = encodeVarintPlan(dAtA, i, uint64(len(m.Index))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func encodeVarintPlan(dAtA []byte, offset int, v uint64) int { - offset -= sovPlan(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *Plan) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Index) - if l > 0 { - n += 1 + l + sovPlan(uint64(l)) - } - if m.Block != 0 { - n += 1 + sovPlan(uint64(m.Block)) - } - l = m.Price.Size() - n += 1 + l + sovPlan(uint64(l)) - if m.AllowOveruse { - n += 2 - } - if m.OveruseRate != 0 { - n += 1 + sovPlan(uint64(m.OveruseRate)) - } - l = len(m.Description) - if l > 0 { - n += 1 + l + sovPlan(uint64(l)) - } - l = len(m.Type) - if l > 0 { - n += 1 + l + sovPlan(uint64(l)) - } - if m.AnnualDiscountPercentage != 0 { - n += 1 + sovPlan(uint64(m.AnnualDiscountPercentage)) - } - l = m.PlanPolicy.Size() - n += 1 + l + sovPlan(uint64(l)) - if m.Projects != 0 { - n += 1 + sovPlan(uint64(m.Projects)) - } - return n -} - -func sovPlan(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozPlan(x uint64) (n int) { - return sovPlan(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *Plan) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPlan - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Plan: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Plan: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Index", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPlan - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthPlan - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthPlan - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Index = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Block", wireType) - } - m.Block = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPlan - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Block |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Price", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPlan - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthPlan - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthPlan - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Price.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 8: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field AllowOveruse", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPlan - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.AllowOveruse = bool(v != 0) - case 9: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field OveruseRate", wireType) - } - m.OveruseRate = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPlan - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.OveruseRate |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 11: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Description", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPlan - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthPlan - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthPlan - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Description = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 12: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPlan - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthPlan - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthPlan - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Type = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 13: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field AnnualDiscountPercentage", wireType) - } - m.AnnualDiscountPercentage = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPlan - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.AnnualDiscountPercentage |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 14: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PlanPolicy", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPlan - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthPlan - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthPlan - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.PlanPolicy.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 15: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Projects", wireType) - } - m.Projects = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPlan - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Projects |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipPlan(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthPlan - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipPlan(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowPlan - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowPlan - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowPlan - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthPlan - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupPlan - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthPlan - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthPlan = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowPlan = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupPlan = fmt.Errorf("proto: unexpected end of group") -) diff --git a/x/plans/migrations/v9/policy.pb.go b/x/plans/migrations/v9/policy.pb.go deleted file mode 100644 index 5a43d1f1f6..0000000000 --- a/x/plans/migrations/v9/policy.pb.go +++ /dev/null @@ -1,1307 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: lavanet/lava/plans/policy.proto - -package types - -import ( - fmt "fmt" - _ "github.com/cosmos/cosmos-sdk/types" - _ "github.com/cosmos/gogoproto/gogoproto" - proto "github.com/cosmos/gogoproto/proto" - types "github.com/lavanet/lava/v4/x/spec/types" - io "io" - math "math" - math_bits "math/bits" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -// the enum below determines the pairing algorithm's behaviour with the selected providers feature -type SELECTED_PROVIDERS_MODE int32 - -const ( - SELECTED_PROVIDERS_MODE_ALLOWED SELECTED_PROVIDERS_MODE = 0 - SELECTED_PROVIDERS_MODE_MIXED SELECTED_PROVIDERS_MODE = 1 - SELECTED_PROVIDERS_MODE_EXCLUSIVE SELECTED_PROVIDERS_MODE = 2 - SELECTED_PROVIDERS_MODE_DISABLED SELECTED_PROVIDERS_MODE = 3 -) - -var SELECTED_PROVIDERS_MODE_name = map[int32]string{ - 0: "ALLOWED", - 1: "MIXED", - 2: "EXCLUSIVE", - 3: "DISABLED", -} - -var SELECTED_PROVIDERS_MODE_value = map[string]int32{ - "ALLOWED": 0, - "MIXED": 1, - "EXCLUSIVE": 2, - "DISABLED": 3, -} - -func (x SELECTED_PROVIDERS_MODE) String() string { - return proto.EnumName(SELECTED_PROVIDERS_MODE_name, int32(x)) -} - -func (SELECTED_PROVIDERS_MODE) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_c2388e0faa8deb9b, []int{0} -} - -// protobuf expected in YAML format: used "moretags" to simplify parsing -type Policy struct { - ChainPolicies []ChainPolicy `protobuf:"bytes,1,rep,name=chain_policies,json=chainPolicies,proto3" json:"chain_policies"` - GeolocationProfile int32 `protobuf:"varint,2,opt,name=geolocation_profile,json=geolocationProfile,proto3" json:"geolocation_profile"` - TotalCuLimit uint64 `protobuf:"varint,3,opt,name=total_cu_limit,json=totalCuLimit,proto3" json:"total_cu_limit"` - EpochCuLimit uint64 `protobuf:"varint,4,opt,name=epoch_cu_limit,json=epochCuLimit,proto3" json:"epoch_cu_limit"` - MaxProvidersToPair uint64 `protobuf:"varint,5,opt,name=max_providers_to_pair,json=maxProvidersToPair,proto3" json:"max_providers_to_pair"` - SelectedProvidersMode SELECTED_PROVIDERS_MODE `protobuf:"varint,6,opt,name=selected_providers_mode,json=selectedProvidersMode,proto3,enum=lavanet.lava.plans.SELECTED_PROVIDERS_MODE" json:"selected_providers_mode"` - SelectedProviders []string `protobuf:"bytes,7,rep,name=selected_providers,json=selectedProviders,proto3" json:"selected_providers"` -} - -func (m *Policy) Reset() { *m = Policy{} } -func (m *Policy) String() string { return proto.CompactTextString(m) } -func (*Policy) ProtoMessage() {} -func (*Policy) Descriptor() ([]byte, []int) { - return fileDescriptor_c2388e0faa8deb9b, []int{0} -} -func (m *Policy) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Policy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Policy.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Policy) XXX_Merge(src proto.Message) { - xxx_messageInfo_Policy.Merge(m, src) -} -func (m *Policy) XXX_Size() int { - return m.Size() -} -func (m *Policy) XXX_DiscardUnknown() { - xxx_messageInfo_Policy.DiscardUnknown(m) -} - -var xxx_messageInfo_Policy proto.InternalMessageInfo - -func (m *Policy) GetChainPolicies() []ChainPolicy { - if m != nil { - return m.ChainPolicies - } - return nil -} - -func (m *Policy) GetGeolocationProfile() int32 { - if m != nil { - return m.GeolocationProfile - } - return 0 -} - -func (m *Policy) GetTotalCuLimit() uint64 { - if m != nil { - return m.TotalCuLimit - } - return 0 -} - -func (m *Policy) GetEpochCuLimit() uint64 { - if m != nil { - return m.EpochCuLimit - } - return 0 -} - -func (m *Policy) GetMaxProvidersToPair() uint64 { - if m != nil { - return m.MaxProvidersToPair - } - return 0 -} - -func (m *Policy) GetSelectedProvidersMode() SELECTED_PROVIDERS_MODE { - if m != nil { - return m.SelectedProvidersMode - } - return SELECTED_PROVIDERS_MODE_ALLOWED -} - -func (m *Policy) GetSelectedProviders() []string { - if m != nil { - return m.SelectedProviders - } - return nil -} - -type ChainPolicy struct { - ChainId string `protobuf:"bytes,1,opt,name=chain_id,json=chainId,proto3" json:"chain_id"` - Apis []string `protobuf:"bytes,2,rep,name=apis,proto3" json:"apis"` - Requirements []ChainRequirement `protobuf:"bytes,3,rep,name=requirements,proto3" json:"requirements"` -} - -func (m *ChainPolicy) Reset() { *m = ChainPolicy{} } -func (m *ChainPolicy) String() string { return proto.CompactTextString(m) } -func (*ChainPolicy) ProtoMessage() {} -func (*ChainPolicy) Descriptor() ([]byte, []int) { - return fileDescriptor_c2388e0faa8deb9b, []int{1} -} -func (m *ChainPolicy) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ChainPolicy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ChainPolicy.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *ChainPolicy) XXX_Merge(src proto.Message) { - xxx_messageInfo_ChainPolicy.Merge(m, src) -} -func (m *ChainPolicy) XXX_Size() int { - return m.Size() -} -func (m *ChainPolicy) XXX_DiscardUnknown() { - xxx_messageInfo_ChainPolicy.DiscardUnknown(m) -} - -var xxx_messageInfo_ChainPolicy proto.InternalMessageInfo - -func (m *ChainPolicy) GetChainId() string { - if m != nil { - return m.ChainId - } - return "" -} - -func (m *ChainPolicy) GetApis() []string { - if m != nil { - return m.Apis - } - return nil -} - -func (m *ChainPolicy) GetRequirements() []ChainRequirement { - if m != nil { - return m.Requirements - } - return nil -} - -type ChainRequirement struct { - Collection types.CollectionData `protobuf:"bytes,1,opt,name=collection,proto3" json:"collection"` - Extensions []string `protobuf:"bytes,2,rep,name=extensions,proto3" json:"extensions"` - Mixed bool `protobuf:"varint,3,opt,name=mixed,proto3" json:"mixed"` -} - -func (m *ChainRequirement) Reset() { *m = ChainRequirement{} } -func (m *ChainRequirement) String() string { return proto.CompactTextString(m) } -func (*ChainRequirement) ProtoMessage() {} -func (*ChainRequirement) Descriptor() ([]byte, []int) { - return fileDescriptor_c2388e0faa8deb9b, []int{2} -} -func (m *ChainRequirement) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ChainRequirement) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ChainRequirement.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *ChainRequirement) XXX_Merge(src proto.Message) { - xxx_messageInfo_ChainRequirement.Merge(m, src) -} -func (m *ChainRequirement) XXX_Size() int { - return m.Size() -} -func (m *ChainRequirement) XXX_DiscardUnknown() { - xxx_messageInfo_ChainRequirement.DiscardUnknown(m) -} - -var xxx_messageInfo_ChainRequirement proto.InternalMessageInfo - -func (m *ChainRequirement) GetCollection() types.CollectionData { - if m != nil { - return m.Collection - } - return types.CollectionData{} -} - -func (m *ChainRequirement) GetExtensions() []string { - if m != nil { - return m.Extensions - } - return nil -} - -func (m *ChainRequirement) GetMixed() bool { - if m != nil { - return m.Mixed - } - return false -} - -func init() { - proto.RegisterEnum("lavanet.lava.plans.SELECTED_PROVIDERS_MODEV9", SELECTED_PROVIDERS_MODE_name, SELECTED_PROVIDERS_MODE_value) - proto.RegisterType((*Policy)(nil), "lavanet.lava.plans.PolicyV9") - proto.RegisterType((*ChainPolicy)(nil), "lavanet.lava.plans.ChainPolicyV9") - proto.RegisterType((*ChainRequirement)(nil), "lavanet.lava.plans.ChainRequirementV9") -} - -func init() { proto.RegisterFile("lavanet/lava/plans/policy.proto", fileDescriptor_c2388e0faa8deb9b) } - -var fileDescriptor_c2388e0faa8deb9b = []byte{ - // 678 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x54, 0xcb, 0x6e, 0xda, 0x4a, - 0x18, 0x66, 0x02, 0x24, 0x30, 0x10, 0xc4, 0x99, 0x93, 0x8b, 0x4f, 0xce, 0x91, 0xcd, 0x89, 0x7a, - 0x41, 0xad, 0x64, 0x2b, 0xe9, 0xa6, 0xdb, 0x18, 0x5b, 0x2a, 0x12, 0x69, 0xd0, 0x90, 0xa4, 0x51, - 0x17, 0xb1, 0x06, 0x33, 0x25, 0x23, 0xd9, 0x8c, 0x6b, 0x9b, 0x88, 0xec, 0xfa, 0x08, 0x7d, 0x8c, - 0x3e, 0x40, 0x57, 0x5d, 0x74, 0x9d, 0x65, 0x96, 0x5d, 0x59, 0x15, 0xd9, 0xf9, 0x29, 0x2a, 0x0f, - 0x57, 0x27, 0x64, 0xc3, 0xfc, 0x97, 0xef, 0xfb, 0xff, 0x0f, 0xf8, 0x66, 0xa0, 0xe2, 0x90, 0x6b, - 0x32, 0xa0, 0xa1, 0x96, 0x9c, 0x9a, 0xe7, 0x90, 0x41, 0xa0, 0x79, 0xdc, 0x61, 0xf6, 0x8d, 0xea, - 0xf9, 0x3c, 0xe4, 0x08, 0x4d, 0x01, 0x6a, 0x72, 0xaa, 0x02, 0xb0, 0xb7, 0xd5, 0xe7, 0x7d, 0x2e, - 0xda, 0x5a, 0x12, 0x4d, 0x90, 0x7b, 0xb2, 0xcd, 0x03, 0x97, 0x07, 0x5a, 0x97, 0x04, 0x54, 0xbb, - 0x3e, 0xe8, 0xd2, 0x90, 0x1c, 0x68, 0x36, 0x67, 0x83, 0x69, 0xff, 0x45, 0x6a, 0x55, 0xe0, 0x51, - 0x5b, 0x23, 0x1e, 0xb3, 0x6c, 0xee, 0x38, 0xd4, 0x0e, 0x19, 0x9f, 0xe2, 0xf6, 0x7f, 0xe6, 0xe0, - 0x7a, 0x5b, 0x48, 0x40, 0x97, 0xb0, 0x62, 0x5f, 0x11, 0x36, 0xb0, 0x84, 0x24, 0x46, 0x03, 0x09, - 0xd4, 0xb2, 0xf5, 0xd2, 0xa1, 0xa2, 0x3e, 0x56, 0xa5, 0x36, 0x12, 0xe4, 0x84, 0xa8, 0xef, 0xdc, - 0x46, 0x4a, 0x26, 0x8e, 0x94, 0x07, 0x74, 0xbc, 0x69, 0xcf, 0x41, 0x8c, 0x06, 0xe8, 0x1d, 0xfc, - 0xbb, 0x4f, 0xb9, 0xc3, 0x6d, 0x92, 0xec, 0xb7, 0x3c, 0x9f, 0x7f, 0x62, 0x0e, 0x95, 0xd6, 0x6a, - 0xa0, 0x9e, 0xd7, 0x77, 0xe3, 0x48, 0x59, 0xd5, 0xc6, 0x68, 0xa9, 0xd8, 0x9e, 0xd4, 0xd0, 0x5b, - 0x58, 0x09, 0x79, 0x48, 0x1c, 0xcb, 0x1e, 0x5a, 0x0e, 0x73, 0x59, 0x28, 0x65, 0x6b, 0xa0, 0x9e, - 0xd3, 0x51, 0x22, 0x22, 0xdd, 0xc1, 0x65, 0x91, 0x37, 0x86, 0xad, 0x24, 0x4b, 0x98, 0xd4, 0xe3, - 0xf6, 0xd5, 0x82, 0x99, 0x5b, 0x30, 0xd3, 0x1d, 0x5c, 0x16, 0xf9, 0x8c, 0xd9, 0x82, 0xdb, 0x2e, - 0x19, 0x25, 0xb2, 0xae, 0x59, 0x8f, 0xfa, 0x81, 0x15, 0x72, 0xcb, 0x23, 0xcc, 0x97, 0xf2, 0x62, - 0xc0, 0x3f, 0x71, 0xa4, 0xac, 0x06, 0x60, 0xe4, 0x92, 0x51, 0x7b, 0x56, 0x3d, 0xe5, 0x6d, 0xc2, - 0x7c, 0xf4, 0x05, 0xc0, 0xdd, 0x80, 0x26, 0x7f, 0x05, 0xed, 0x2d, 0x51, 0x5c, 0xde, 0xa3, 0xd2, - 0x7a, 0x0d, 0xd4, 0x2b, 0x87, 0xaf, 0x57, 0xfd, 0xea, 0x1d, 0xb3, 0x65, 0x36, 0x4e, 0x4d, 0xc3, - 0x6a, 0xe3, 0x93, 0xf3, 0xa6, 0x61, 0xe2, 0x8e, 0x75, 0x7c, 0x62, 0x98, 0xfa, 0xbf, 0x71, 0xa4, - 0x3c, 0x35, 0x0f, 0x6f, 0xcf, 0x1a, 0x73, 0x11, 0xc7, 0xbc, 0x47, 0x91, 0x09, 0xd1, 0x63, 0x86, - 0xb4, 0x51, 0xcb, 0xd6, 0x8b, 0xfa, 0x4e, 0x1c, 0x29, 0x2b, 0xba, 0xf8, 0xaf, 0x47, 0xa3, 0xf6, - 0xbf, 0x03, 0x58, 0x5a, 0x32, 0x03, 0x7a, 0x09, 0x0b, 0x13, 0x1b, 0xb0, 0x9e, 0x04, 0x6a, 0xa0, - 0x5e, 0xd4, 0xcb, 0x71, 0xa4, 0xcc, 0x6b, 0x78, 0x43, 0x44, 0xcd, 0x1e, 0xfa, 0x0f, 0xe6, 0x88, - 0xc7, 0x02, 0x69, 0x4d, 0x6c, 0x2c, 0xc4, 0x91, 0x22, 0x72, 0x2c, 0x3e, 0xd1, 0x25, 0x2c, 0xfb, - 0xf4, 0xf3, 0x90, 0xf9, 0xd4, 0xa5, 0x83, 0x30, 0x90, 0xb2, 0xc2, 0x8a, 0xcf, 0x9e, 0xb4, 0x22, - 0x5e, 0x80, 0xf5, 0xad, 0xa9, 0x1f, 0x53, 0x13, 0x70, 0x2a, 0xdb, 0xff, 0x01, 0x60, 0xf5, 0x21, - 0x11, 0x9d, 0x41, 0xb8, 0xb8, 0x20, 0x42, 0x7d, 0xe9, 0xf0, 0xff, 0xf4, 0xca, 0xe4, 0x26, 0xa9, - 0x8d, 0x39, 0xc8, 0x20, 0x21, 0xd1, 0xd1, 0x74, 0xdf, 0x12, 0x19, 0x2f, 0xc5, 0x48, 0x85, 0x90, - 0x8e, 0x42, 0x3a, 0x08, 0x18, 0x1f, 0xcc, 0xbe, 0x6f, 0x25, 0xc1, 0x2f, 0xaa, 0x78, 0x29, 0x46, - 0x0a, 0xcc, 0xbb, 0x6c, 0x44, 0x7b, 0xc2, 0xd5, 0x05, 0xbd, 0x18, 0x47, 0xca, 0xa4, 0x80, 0x27, - 0xc7, 0xab, 0xf7, 0x70, 0xf7, 0x09, 0x27, 0xa0, 0x12, 0xdc, 0x38, 0x6a, 0xb5, 0x4e, 0x3e, 0x98, - 0x46, 0x35, 0x83, 0x8a, 0x30, 0x7f, 0xdc, 0xbc, 0x30, 0x8d, 0x2a, 0x40, 0x9b, 0xb0, 0x68, 0x5e, - 0x34, 0x5a, 0x67, 0x9d, 0xe6, 0xb9, 0x59, 0x5d, 0x43, 0x65, 0x58, 0x30, 0x9a, 0x9d, 0x23, 0xbd, - 0x65, 0x1a, 0xd5, 0xac, 0xde, 0xf8, 0x36, 0x96, 0xc1, 0xed, 0x58, 0x06, 0x77, 0x63, 0x19, 0xfc, - 0x1e, 0xcb, 0xe0, 0xeb, 0xbd, 0x9c, 0xb9, 0xbb, 0x97, 0x33, 0xbf, 0xee, 0xe5, 0xcc, 0xc7, 0xe7, - 0x7d, 0x16, 0x5e, 0x0d, 0xbb, 0xaa, 0xcd, 0x5d, 0x2d, 0xf5, 0xaa, 0x8c, 0xa6, 0x4f, 0x58, 0x78, - 0xe3, 0xd1, 0xa0, 0xbb, 0x2e, 0x1e, 0x94, 0x37, 0x7f, 0x02, 0x00, 0x00, 0xff, 0xff, 0x08, 0x9a, - 0x8f, 0xe7, 0xe5, 0x04, 0x00, 0x00, -} - -func (this *Policy) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*Policy) - if !ok { - that2, ok := that.(Policy) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if len(this.ChainPolicies) != len(that1.ChainPolicies) { - return false - } - for i := range this.ChainPolicies { - if !this.ChainPolicies[i].Equal(&that1.ChainPolicies[i]) { - return false - } - } - if this.GeolocationProfile != that1.GeolocationProfile { - return false - } - if this.TotalCuLimit != that1.TotalCuLimit { - return false - } - if this.EpochCuLimit != that1.EpochCuLimit { - return false - } - if this.MaxProvidersToPair != that1.MaxProvidersToPair { - return false - } - if this.SelectedProvidersMode != that1.SelectedProvidersMode { - return false - } - if len(this.SelectedProviders) != len(that1.SelectedProviders) { - return false - } - for i := range this.SelectedProviders { - if this.SelectedProviders[i] != that1.SelectedProviders[i] { - return false - } - } - return true -} -func (this *ChainPolicy) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*ChainPolicy) - if !ok { - that2, ok := that.(ChainPolicy) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if this.ChainId != that1.ChainId { - return false - } - if len(this.Apis) != len(that1.Apis) { - return false - } - for i := range this.Apis { - if this.Apis[i] != that1.Apis[i] { - return false - } - } - if len(this.Requirements) != len(that1.Requirements) { - return false - } - for i := range this.Requirements { - if !this.Requirements[i].Equal(&that1.Requirements[i]) { - return false - } - } - return true -} -func (this *ChainRequirement) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*ChainRequirement) - if !ok { - that2, ok := that.(ChainRequirement) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if !this.Collection.Equal(&that1.Collection) { - return false - } - if len(this.Extensions) != len(that1.Extensions) { - return false - } - for i := range this.Extensions { - if this.Extensions[i] != that1.Extensions[i] { - return false - } - } - if this.Mixed != that1.Mixed { - return false - } - return true -} -func (m *Policy) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Policy) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Policy) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.SelectedProviders) > 0 { - for iNdEx := len(m.SelectedProviders) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.SelectedProviders[iNdEx]) - copy(dAtA[i:], m.SelectedProviders[iNdEx]) - i = encodeVarintPolicy(dAtA, i, uint64(len(m.SelectedProviders[iNdEx]))) - i-- - dAtA[i] = 0x3a - } - } - if m.SelectedProvidersMode != 0 { - i = encodeVarintPolicy(dAtA, i, uint64(m.SelectedProvidersMode)) - i-- - dAtA[i] = 0x30 - } - if m.MaxProvidersToPair != 0 { - i = encodeVarintPolicy(dAtA, i, uint64(m.MaxProvidersToPair)) - i-- - dAtA[i] = 0x28 - } - if m.EpochCuLimit != 0 { - i = encodeVarintPolicy(dAtA, i, uint64(m.EpochCuLimit)) - i-- - dAtA[i] = 0x20 - } - if m.TotalCuLimit != 0 { - i = encodeVarintPolicy(dAtA, i, uint64(m.TotalCuLimit)) - i-- - dAtA[i] = 0x18 - } - if m.GeolocationProfile != 0 { - i = encodeVarintPolicy(dAtA, i, uint64(m.GeolocationProfile)) - i-- - dAtA[i] = 0x10 - } - if len(m.ChainPolicies) > 0 { - for iNdEx := len(m.ChainPolicies) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.ChainPolicies[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintPolicy(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *ChainPolicy) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ChainPolicy) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ChainPolicy) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Requirements) > 0 { - for iNdEx := len(m.Requirements) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Requirements[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintPolicy(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } - } - if len(m.Apis) > 0 { - for iNdEx := len(m.Apis) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Apis[iNdEx]) - copy(dAtA[i:], m.Apis[iNdEx]) - i = encodeVarintPolicy(dAtA, i, uint64(len(m.Apis[iNdEx]))) - i-- - dAtA[i] = 0x12 - } - } - if len(m.ChainId) > 0 { - i -= len(m.ChainId) - copy(dAtA[i:], m.ChainId) - i = encodeVarintPolicy(dAtA, i, uint64(len(m.ChainId))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *ChainRequirement) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ChainRequirement) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ChainRequirement) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Mixed { - i-- - if m.Mixed { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x18 - } - if len(m.Extensions) > 0 { - for iNdEx := len(m.Extensions) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Extensions[iNdEx]) - copy(dAtA[i:], m.Extensions[iNdEx]) - i = encodeVarintPolicy(dAtA, i, uint64(len(m.Extensions[iNdEx]))) - i-- - dAtA[i] = 0x12 - } - } - { - size, err := m.Collection.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintPolicy(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func encodeVarintPolicy(dAtA []byte, offset int, v uint64) int { - offset -= sovPolicy(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *Policy) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.ChainPolicies) > 0 { - for _, e := range m.ChainPolicies { - l = e.Size() - n += 1 + l + sovPolicy(uint64(l)) - } - } - if m.GeolocationProfile != 0 { - n += 1 + sovPolicy(uint64(m.GeolocationProfile)) - } - if m.TotalCuLimit != 0 { - n += 1 + sovPolicy(uint64(m.TotalCuLimit)) - } - if m.EpochCuLimit != 0 { - n += 1 + sovPolicy(uint64(m.EpochCuLimit)) - } - if m.MaxProvidersToPair != 0 { - n += 1 + sovPolicy(uint64(m.MaxProvidersToPair)) - } - if m.SelectedProvidersMode != 0 { - n += 1 + sovPolicy(uint64(m.SelectedProvidersMode)) - } - if len(m.SelectedProviders) > 0 { - for _, s := range m.SelectedProviders { - l = len(s) - n += 1 + l + sovPolicy(uint64(l)) - } - } - return n -} - -func (m *ChainPolicy) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.ChainId) - if l > 0 { - n += 1 + l + sovPolicy(uint64(l)) - } - if len(m.Apis) > 0 { - for _, s := range m.Apis { - l = len(s) - n += 1 + l + sovPolicy(uint64(l)) - } - } - if len(m.Requirements) > 0 { - for _, e := range m.Requirements { - l = e.Size() - n += 1 + l + sovPolicy(uint64(l)) - } - } - return n -} - -func (m *ChainRequirement) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.Collection.Size() - n += 1 + l + sovPolicy(uint64(l)) - if len(m.Extensions) > 0 { - for _, s := range m.Extensions { - l = len(s) - n += 1 + l + sovPolicy(uint64(l)) - } - } - if m.Mixed { - n += 2 - } - return n -} - -func sovPolicy(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozPolicy(x uint64) (n int) { - return sovPolicy(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *Policy) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPolicy - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Policy: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Policy: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ChainPolicies", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPolicy - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthPolicy - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthPolicy - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ChainPolicies = append(m.ChainPolicies, ChainPolicy{}) - if err := m.ChainPolicies[len(m.ChainPolicies)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field GeolocationProfile", wireType) - } - m.GeolocationProfile = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPolicy - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.GeolocationProfile |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TotalCuLimit", wireType) - } - m.TotalCuLimit = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPolicy - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.TotalCuLimit |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field EpochCuLimit", wireType) - } - m.EpochCuLimit = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPolicy - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.EpochCuLimit |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field MaxProvidersToPair", wireType) - } - m.MaxProvidersToPair = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPolicy - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.MaxProvidersToPair |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 6: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field SelectedProvidersMode", wireType) - } - m.SelectedProvidersMode = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPolicy - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.SelectedProvidersMode |= SELECTED_PROVIDERS_MODE(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SelectedProviders", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPolicy - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthPolicy - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthPolicy - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.SelectedProviders = append(m.SelectedProviders, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipPolicy(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthPolicy - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ChainPolicy) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPolicy - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ChainPolicy: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ChainPolicy: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ChainId", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPolicy - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthPolicy - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthPolicy - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ChainId = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Apis", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPolicy - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthPolicy - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthPolicy - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Apis = append(m.Apis, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Requirements", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPolicy - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthPolicy - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthPolicy - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Requirements = append(m.Requirements, ChainRequirement{}) - if err := m.Requirements[len(m.Requirements)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipPolicy(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthPolicy - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ChainRequirement) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPolicy - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ChainRequirement: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ChainRequirement: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Collection", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPolicy - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthPolicy - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthPolicy - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Collection.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Extensions", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPolicy - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthPolicy - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthPolicy - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Extensions = append(m.Extensions, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Mixed", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPolicy - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Mixed = bool(v != 0) - default: - iNdEx = preIndex - skippy, err := skipPolicy(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthPolicy - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipPolicy(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowPolicy - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowPolicy - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowPolicy - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthPolicy - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupPolicy - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthPolicy - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthPolicy = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowPolicy = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupPolicy = fmt.Errorf("proto: unexpected end of group") -) diff --git a/x/plans/module.go b/x/plans/module.go index 51e5b60028..fd378e0f8a 100644 --- a/x/plans/module.go +++ b/x/plans/module.go @@ -124,32 +124,6 @@ func (am AppModule) RegisterServices(cfg module.Configurator) { migrator := keeper.NewMigrator(am.keeper) - // register v2 -> v3 migration - if err := cfg.RegisterMigration(types.ModuleName, 2, migrator.Migrate2to3); err != nil { - // panic:ok: at start up, migration cannot proceed anyhow - panic(fmt.Errorf("%s: failed to register migration to v3: %w", types.ModuleName, err)) - } - // register v3 -> v4 migration - if err := cfg.RegisterMigration(types.ModuleName, 3, migrator.Migrate3to4); err != nil { - // panic:ok: at start up, migration cannot proceed anyhow - panic(fmt.Errorf("%s: failed to register migration to v4: %w", types.ModuleName, err)) - } - // register v4 -> v5 migration - if err := cfg.RegisterMigration(types.ModuleName, 4, migrator.Migrate4to5); err != nil { - // panic:ok: at start up, migration cannot proceed anyhow - panic(fmt.Errorf("%s: failed to register migration to v5: %w", types.ModuleName, err)) - } - // register v5 -> v6 migration - if err := cfg.RegisterMigration(types.ModuleName, 5, migrator.Migrate5to6); err != nil { - // panic:ok: at start up, migration cannot proceed anyhow - panic(fmt.Errorf("%s: failed to register migration to v6: %w", types.ModuleName, err)) - } - // register v6 -> v7 migration - if err := cfg.RegisterMigration(types.ModuleName, 6, migrator.Migrate6to7); err != nil { - // panic:ok: at start up, migration cannot proceed anyhow - panic(fmt.Errorf("%s: failed to register migration to v7: %w", types.ModuleName, err)) - } - // register v7 -> v8 migration if err := cfg.RegisterMigration(types.ModuleName, 7, migrator.Migrate7to8); err != nil { // panic:ok: at start up, migration cannot proceed anyhow diff --git a/x/plans/module_simulation.go b/x/plans/module_simulation.go deleted file mode 100644 index 229fc02022..0000000000 --- a/x/plans/module_simulation.go +++ /dev/null @@ -1,56 +0,0 @@ -package plans - -import ( - "github.com/cosmos/cosmos-sdk/baseapp" - "github.com/cosmos/cosmos-sdk/testutil/sims" - sdk "github.com/cosmos/cosmos-sdk/types" - "github.com/cosmos/cosmos-sdk/types/module" - simtypes "github.com/cosmos/cosmos-sdk/types/simulation" - "github.com/cosmos/cosmos-sdk/x/simulation" - "github.com/lavanet/lava/v4/testutil/sample" - plansimulation "github.com/lavanet/lava/v4/x/plans/simulation" - "github.com/lavanet/lava/v4/x/plans/types" -) - -// avoid unused import issue -var ( - _ = sample.AccAddress - _ = plansimulation.FindAccount - _ = sims.StakePerAccount - _ = simulation.MsgEntryKind - _ = baseapp.Paramspace -) - -const ( -// this line is used by starport scaffolding # simapp/module/const -) - -// GenerateGenesisState creates a randomized GenState of the module -func (AppModule) GenerateGenesisState(simState *module.SimulationState) { - accs := make([]string, len(simState.Accounts)) - for i, acc := range simState.Accounts { - accs[i] = acc.Address.String() - } - planGenesis := types.GenesisState{ - Params: types.DefaultParams(), - // this line is used by starport scaffolding # simapp/module/genesisState - } - simState.GenState[types.ModuleName] = simState.Cdc.MustMarshalJSON(&planGenesis) -} - -// ProposalContents doesn't return any content functions for governance proposals -func (AppModule) ProposalContents(_ module.SimulationState) []simtypes.WeightedProposalMsg { - return nil -} - -// RegisterStoreDecoder registers a decoder -func (am AppModule) RegisterStoreDecoder(_ sdk.StoreDecoderRegistry) {} - -// WeightedOperations returns the all the gov module operations with their respective weights. -func (am AppModule) WeightedOperations(simState module.SimulationState) []simtypes.WeightedOperation { - operations := make([]simtypes.WeightedOperation, 0) - - // this line is used by starport scaffolding # simapp/module/operation - - return operations -} diff --git a/x/plans/simulation/simap.go b/x/plans/simulation/simap.go deleted file mode 100644 index 92c437c0d1..0000000000 --- a/x/plans/simulation/simap.go +++ /dev/null @@ -1,15 +0,0 @@ -package simulation - -import ( - sdk "github.com/cosmos/cosmos-sdk/types" - simtypes "github.com/cosmos/cosmos-sdk/types/simulation" -) - -// FindAccount find a specific address from an account list -func FindAccount(accs []simtypes.Account, address string) (simtypes.Account, bool) { - creator, err := sdk.AccAddressFromBech32(address) - if err != nil { - panic(err) - } - return simtypes.FindAccount(accs, creator) -} diff --git a/x/projects/keeper/migrations.go b/x/projects/keeper/migrations.go index 614e364816..b593b2ebd5 100644 --- a/x/projects/keeper/migrations.go +++ b/x/projects/keeper/migrations.go @@ -2,11 +2,6 @@ package keeper import ( sdk "github.com/cosmos/cosmos-sdk/types" - "github.com/lavanet/lava/v4/utils" - v2 "github.com/lavanet/lava/v4/x/projects/migrations/v2" - v3 "github.com/lavanet/lava/v4/x/projects/migrations/v3" - v4 "github.com/lavanet/lava/v4/x/projects/migrations/v4" - v5 "github.com/lavanet/lava/v4/x/projects/migrations/v5" ) type Migrator struct { @@ -23,213 +18,6 @@ func (m Migrator) migrateFixationsVersion(ctx sdk.Context) error { return nil } -// Migrate2to3 implements store migration from v2 to v3: -// - Trigger version upgrade of the projectsFS, develooperKeysFS fixation stores -// - Update keys contents -func (m Migrator) Migrate2to3(ctx sdk.Context) error { - if err := m.migrateFixationsVersion(ctx); err != nil { - return err - } - - projectIndices := m.keeper.projectsFS.AllEntryIndicesFilter(ctx, "", nil) - for _, projectIndex := range projectIndices { - blocks := m.keeper.projectsFS.GetAllEntryVersions(ctx, projectIndex) - for _, block := range blocks { - var project_v2 v2.Project - m.keeper.projectsFS.ReadEntry(ctx, projectIndex, block, &project_v2) - - // convert project keys from type v2.ProjectKey to types.ProjectKey - var projectKeys_v3 []v3.ProjectKey - for _, projectKey_v2 := range project_v2.ProjectKeys { - projectKey_v3 := v3.ProjectKey{ - Key: projectKey_v2.Key, - } - - for _, projectKeyType_v2 := range projectKey_v2.Types { - if projectKeyType_v2 == v2.ProjectKey_ADMIN { - projectKey_v3.Types = append(projectKey_v3.Types, v3.ProjectKey_ADMIN) - } else if projectKeyType_v2 == v2.ProjectKey_DEVELOPER { - projectKey_v3.Types = append(projectKey_v3.Types, v3.ProjectKey_DEVELOPER) - } - } - } - - // convert chainPolicies from type v2.ChainPolicy to v3.ChainPolicy - var chainPolicies_v3 []v3.ChainPolicy - for _, chainPolicy_v2 := range project_v2.Policy.ChainPolicies { - chainPolicies_v3 = append(chainPolicies_v3, v3.ChainPolicy{ - ChainId: chainPolicy_v2.ChainId, - Apis: chainPolicy_v2.Apis, - }) - } - - // convert policy from type v2.Policy to v3.Policy - policy_v3 := v3.Policy{ - ChainPolicies: chainPolicies_v3, - GeolocationProfile: project_v2.Policy.GeolocationProfile, - TotalCuLimit: project_v2.Policy.TotalCuLimit, - EpochCuLimit: project_v2.Policy.EpochCuLimit, - MaxProvidersToPair: project_v2.Policy.MaxProvidersToPair, - } - - // convert project from type v2.Project to v3.Project - projectStruct_v3 := v3.Project{ - Index: project_v2.Index, - Subscription: project_v2.Subscription, - Description: project_v2.Description, - Enabled: project_v2.Enabled, - ProjectKeys: projectKeys_v3, - AdminPolicy: &policy_v3, - SubscriptionPolicy: &policy_v3, - UsedCu: project_v2.UsedCu, - } - - m.keeper.projectsFS.ModifyEntry(ctx, projectIndex, block, &projectStruct_v3) - } - } - - developerDataIndices := m.keeper.developerKeysFS.AllEntryIndicesFilter(ctx, "", nil) - for _, developerDataIndex := range developerDataIndices { - blocks := m.keeper.developerKeysFS.GetAllEntryVersions(ctx, developerDataIndex) - for _, block := range blocks { - var developerDataStruct_v2 v2.ProtoDeveloperData - m.keeper.developerKeysFS.ReadEntry(ctx, developerDataIndex, block, &developerDataStruct_v2) - - developerData_v3 := v3.ProtoDeveloperData{ - ProjectID: developerDataStruct_v2.ProjectID, - } - - m.keeper.developerKeysFS.ModifyEntry(ctx, developerDataIndex, block, &developerData_v3) - } - } - - return nil -} - -// Migrate3to4 implements store migration from v3 to v4: -// - Trigger version upgrade of the projectsFS, develooperKeysFS fixation-stores -func (m Migrator) Migrate3to4(ctx sdk.Context) error { - if err := m.migrateFixationsVersion(ctx); err != nil { - return err - } - return nil -} - -// Migrate4to5 implements store migration from v4 to v5: -// - Trigger version upgrade of the projectsFS, developerKeysFS fixation stores -// - Update keys types (from list of types to bitmap) -func (m Migrator) Migrate4to5(ctx sdk.Context) error { - if err := m.migrateFixationsVersion(ctx); err != nil { - return err - } - - projectIndices := m.keeper.projectsFS.GetAllEntryIndices(ctx) - for _, projectIndex := range projectIndices { - utils.LavaFormatDebug("migrate:", - utils.Attribute{Key: "project", Value: projectIndex}) - - blocks := m.keeper.projectsFS.GetAllEntryVersions(ctx, projectIndex) - for _, block := range blocks { - utils.LavaFormatDebug(" project:", - utils.Attribute{Key: "block", Value: block}) - - var project_v4 v4.Project - m.keeper.projectsFS.ReadEntry(ctx, projectIndex, block, &project_v4) - - // convert project keys from type v4.ProjectKey to v5.ProjectKey - var projectKeys_v5 []v5.ProjectKey - for _, projectKey_v4 := range project_v4.ProjectKeys { - utils.LavaFormatDebug(" block:", - utils.Attribute{Key: "key", Value: projectKey_v4}) - - projectKey_v5 := v5.NewProjectKey(projectKey_v4.Key, 0x0) - - for _, projectKeyType_v4 := range projectKey_v4.Types { - if projectKeyType_v4 == v4.ProjectKey_ADMIN { - projectKey_v5 = projectKey_v5.AddType(v5.ProjectKey_ADMIN) - } else if projectKeyType_v4 == v4.ProjectKey_DEVELOPER { - projectKey_v5 = projectKey_v5.AddType(v5.ProjectKey_DEVELOPER) - } - } - - projectKeys_v5 = append(projectKeys_v5, projectKey_v5) - } - - // convert policy from type v4.Policy to v5.Policy - // convert chainPolicies from type v4.ChainPolicy to v5.ChainPolicy - var adminPolicy_v5 *v5.Policy - if project_v4.AdminPolicy != nil { - var adminChainPolicies_v5 []v5.ChainPolicy - for _, chainPolicy_v4 := range project_v4.AdminPolicy.ChainPolicies { - adminChainPolicies_v5 = append(adminChainPolicies_v5, v5.ChainPolicy{ - ChainId: chainPolicy_v4.ChainId, - Apis: chainPolicy_v4.Apis, - }) - } - - adminPolicy_v5_temp := v5.Policy{ - ChainPolicies: adminChainPolicies_v5, - GeolocationProfile: project_v4.AdminPolicy.GeolocationProfile, - TotalCuLimit: project_v4.AdminPolicy.TotalCuLimit, - EpochCuLimit: project_v4.AdminPolicy.EpochCuLimit, - MaxProvidersToPair: project_v4.AdminPolicy.MaxProvidersToPair, - } - - adminPolicy_v5 = &adminPolicy_v5_temp - } - - var subscriptionPolicy_v5 *v5.Policy - if project_v4.SubscriptionPolicy != nil { - var subscriptionChainPolicies_v5 []v5.ChainPolicy - for _, chainPolicy_v4 := range project_v4.SubscriptionPolicy.ChainPolicies { - subscriptionChainPolicies_v5 = append(subscriptionChainPolicies_v5, v5.ChainPolicy{ - ChainId: chainPolicy_v4.ChainId, - Apis: chainPolicy_v4.Apis, - }) - } - - subscriptionPolicy_v5_temp := v5.Policy{ - ChainPolicies: subscriptionChainPolicies_v5, - GeolocationProfile: project_v4.SubscriptionPolicy.GeolocationProfile, - TotalCuLimit: project_v4.SubscriptionPolicy.TotalCuLimit, - EpochCuLimit: project_v4.SubscriptionPolicy.EpochCuLimit, - MaxProvidersToPair: project_v4.SubscriptionPolicy.MaxProvidersToPair, - } - - subscriptionPolicy_v5 = &subscriptionPolicy_v5_temp - } - - // convert project from type v4.Project to v5.Project - project_v5 := v5.Project{ - Index: project_v4.Index, - Subscription: project_v4.Subscription, - Description: project_v4.Description, - Enabled: project_v4.Enabled, - ProjectKeys: projectKeys_v5, - AdminPolicy: adminPolicy_v5, - SubscriptionPolicy: subscriptionPolicy_v5, - UsedCu: project_v4.UsedCu, - Snapshot: project_v4.Snapshot, - } - - utils.LavaFormatDebug(" project:", - utils.Attribute{Key: "entry_v4", Value: project_v4}) - utils.LavaFormatDebug(" project:", - utils.Attribute{Key: "entry_v5", Value: project_v5}) - - m.keeper.projectsFS.ModifyEntry(ctx, projectIndex, block, &project_v5) - } - } - - return nil -} - -// Migrate5to6 implements store migration from v5 to v6: -// -- trigger fixation migration, deleteat and live variables -func (m Migrator) Migrate5to6(ctx sdk.Context) error { - return m.migrateFixationsVersion(ctx) -} - // Migrate6to7 implements store migration from v6 to v7: // -- trigger fixation migration (v4->v5), initialize IsLatest field func (m Migrator) Migrate6to7(ctx sdk.Context) error { diff --git a/x/projects/migrations/v2/project.pb.go b/x/projects/migrations/v2/project.pb.go deleted file mode 100644 index 433b0bb3a1..0000000000 --- a/x/projects/migrations/v2/project.pb.go +++ /dev/null @@ -1,1755 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: projects/project.proto - -package types - -import ( - fmt "fmt" - _ "github.com/gogo/protobuf/gogoproto" - proto "github.com/gogo/protobuf/proto" - io "io" - math "math" - math_bits "math/bits" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -type ProjectKey_KEY_TYPE int32 - -const ( - ProjectKey_NONE ProjectKey_KEY_TYPE = 0 - ProjectKey_ADMIN ProjectKey_KEY_TYPE = 1 - ProjectKey_DEVELOPER ProjectKey_KEY_TYPE = 2 -) - -var ProjectKey_KEY_TYPE_name = map[int32]string{ - 0: "NONE", - 1: "ADMIN", - 2: "DEVELOPER", -} - -var ProjectKey_KEY_TYPE_value = map[string]int32{ - "NONE": 0, - "ADMIN": 1, - "DEVELOPER": 2, -} - -func (x ProjectKey_KEY_TYPE) String() string { - return proto.EnumName(ProjectKey_KEY_TYPE_name, int32(x)) -} - -func (ProjectKey_KEY_TYPE) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_9f89a31663a330ce, []int{1, 0} -} - -type Project struct { - Index string `protobuf:"bytes,1,opt,name=index,proto3" json:"index,omitempty"` - Subscription string `protobuf:"bytes,2,opt,name=subscription,proto3" json:"subscription,omitempty"` - Description string `protobuf:"bytes,3,opt,name=description,proto3" json:"description,omitempty"` - Enabled bool `protobuf:"varint,4,opt,name=enabled,proto3" json:"enabled,omitempty"` - ProjectKeys []ProjectKey `protobuf:"bytes,5,rep,name=project_keys,json=projectKeys,proto3" json:"project_keys"` - Policy Policy `protobuf:"bytes,6,opt,name=policy,proto3" json:"policy"` - UsedCu uint64 `protobuf:"varint,7,opt,name=used_cu,json=usedCu,proto3" json:"used_cu,omitempty"` -} - -func (m *Project) Reset() { *m = Project{} } -func (m *Project) String() string { return proto.CompactTextString(m) } -func (*Project) ProtoMessage() {} -func (*Project) Descriptor() ([]byte, []int) { - return fileDescriptor_9f89a31663a330ce, []int{0} -} -func (m *Project) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Project) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Project.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Project) XXX_Merge(src proto.Message) { - xxx_messageInfo_Project.Merge(m, src) -} -func (m *Project) XXX_Size() int { - return m.Size() -} -func (m *Project) XXX_DiscardUnknown() { - xxx_messageInfo_Project.DiscardUnknown(m) -} - -var xxx_messageInfo_Project proto.InternalMessageInfo - -func (m *Project) GetIndex() string { - if m != nil { - return m.Index - } - return "" -} - -func (m *Project) GetSubscription() string { - if m != nil { - return m.Subscription - } - return "" -} - -func (m *Project) GetDescription() string { - if m != nil { - return m.Description - } - return "" -} - -func (m *Project) GetEnabled() bool { - if m != nil { - return m.Enabled - } - return false -} - -func (m *Project) GetProjectKeys() []ProjectKey { - if m != nil { - return m.ProjectKeys - } - return nil -} - -func (m *Project) GetPolicy() Policy { - if m != nil { - return m.Policy - } - return Policy{} -} - -func (m *Project) GetUsedCu() uint64 { - if m != nil { - return m.UsedCu - } - return 0 -} - -type ProjectKey struct { - Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` - Types []ProjectKey_KEY_TYPE `protobuf:"varint,2,rep,packed,name=types,proto3,enum=lavanet.lava.projects.ProjectKey_KEY_TYPE" json:"types,omitempty"` - Vrfpk string `protobuf:"bytes,3,opt,name=vrfpk,proto3" json:"vrfpk,omitempty"` -} - -func (m *ProjectKey) Reset() { *m = ProjectKey{} } -func (m *ProjectKey) String() string { return proto.CompactTextString(m) } -func (*ProjectKey) ProtoMessage() {} -func (*ProjectKey) Descriptor() ([]byte, []int) { - return fileDescriptor_9f89a31663a330ce, []int{1} -} -func (m *ProjectKey) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ProjectKey) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ProjectKey.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *ProjectKey) XXX_Merge(src proto.Message) { - xxx_messageInfo_ProjectKey.Merge(m, src) -} -func (m *ProjectKey) XXX_Size() int { - return m.Size() -} -func (m *ProjectKey) XXX_DiscardUnknown() { - xxx_messageInfo_ProjectKey.DiscardUnknown(m) -} - -var xxx_messageInfo_ProjectKey proto.InternalMessageInfo - -func (m *ProjectKey) GetKey() string { - if m != nil { - return m.Key - } - return "" -} - -func (m *ProjectKey) GetTypes() []ProjectKey_KEY_TYPE { - if m != nil { - return m.Types - } - return nil -} - -func (m *ProjectKey) GetVrfpk() string { - if m != nil { - return m.Vrfpk - } - return "" -} - -type Policy struct { - ChainPolicies []ChainPolicy `protobuf:"bytes,1,rep,name=chain_policies,json=chainPolicies,proto3" json:"chain_policies"` - GeolocationProfile uint64 `protobuf:"varint,2,opt,name=geolocation_profile,json=geolocationProfile,proto3" json:"geolocation_profile,omitempty"` - TotalCuLimit uint64 `protobuf:"varint,3,opt,name=total_cu_limit,json=totalCuLimit,proto3" json:"total_cu_limit,omitempty"` - EpochCuLimit uint64 `protobuf:"varint,4,opt,name=epoch_cu_limit,json=epochCuLimit,proto3" json:"epoch_cu_limit,omitempty"` - MaxProvidersToPair uint64 `protobuf:"varint,5,opt,name=max_providers_to_pair,json=maxProvidersToPair,proto3" json:"max_providers_to_pair,omitempty"` -} - -func (m *Policy) Reset() { *m = Policy{} } -func (m *Policy) String() string { return proto.CompactTextString(m) } -func (*Policy) ProtoMessage() {} -func (*Policy) Descriptor() ([]byte, []int) { - return fileDescriptor_9f89a31663a330ce, []int{2} -} -func (m *Policy) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Policy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Policy.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Policy) XXX_Merge(src proto.Message) { - xxx_messageInfo_Policy.Merge(m, src) -} -func (m *Policy) XXX_Size() int { - return m.Size() -} -func (m *Policy) XXX_DiscardUnknown() { - xxx_messageInfo_Policy.DiscardUnknown(m) -} - -var xxx_messageInfo_Policy proto.InternalMessageInfo - -func (m *Policy) GetChainPolicies() []ChainPolicy { - if m != nil { - return m.ChainPolicies - } - return nil -} - -func (m *Policy) GetGeolocationProfile() uint64 { - if m != nil { - return m.GeolocationProfile - } - return 0 -} - -func (m *Policy) GetTotalCuLimit() uint64 { - if m != nil { - return m.TotalCuLimit - } - return 0 -} - -func (m *Policy) GetEpochCuLimit() uint64 { - if m != nil { - return m.EpochCuLimit - } - return 0 -} - -func (m *Policy) GetMaxProvidersToPair() uint64 { - if m != nil { - return m.MaxProvidersToPair - } - return 0 -} - -type ChainPolicy struct { - ChainId string `protobuf:"bytes,1,opt,name=chain_id,json=chainId,proto3" json:"chain_id,omitempty"` - Apis []string `protobuf:"bytes,2,rep,name=apis,proto3" json:"apis,omitempty"` -} - -func (m *ChainPolicy) Reset() { *m = ChainPolicy{} } -func (m *ChainPolicy) String() string { return proto.CompactTextString(m) } -func (*ChainPolicy) ProtoMessage() {} -func (*ChainPolicy) Descriptor() ([]byte, []int) { - return fileDescriptor_9f89a31663a330ce, []int{3} -} -func (m *ChainPolicy) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ChainPolicy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ChainPolicy.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *ChainPolicy) XXX_Merge(src proto.Message) { - xxx_messageInfo_ChainPolicy.Merge(m, src) -} -func (m *ChainPolicy) XXX_Size() int { - return m.Size() -} -func (m *ChainPolicy) XXX_DiscardUnknown() { - xxx_messageInfo_ChainPolicy.DiscardUnknown(m) -} - -var xxx_messageInfo_ChainPolicy proto.InternalMessageInfo - -func (m *ChainPolicy) GetChainId() string { - if m != nil { - return m.ChainId - } - return "" -} - -func (m *ChainPolicy) GetApis() []string { - if m != nil { - return m.Apis - } - return nil -} - -type ProtoDeveloperData struct { - ProjectID string `protobuf:"bytes,1,opt,name=projectID,proto3" json:"projectID,omitempty"` - Vrfpk string `protobuf:"bytes,2,opt,name=vrfpk,proto3" json:"vrfpk,omitempty"` -} - -func (m *ProtoDeveloperData) Reset() { *m = ProtoDeveloperData{} } -func (m *ProtoDeveloperData) String() string { return proto.CompactTextString(m) } -func (*ProtoDeveloperData) ProtoMessage() {} -func (*ProtoDeveloperData) Descriptor() ([]byte, []int) { - return fileDescriptor_9f89a31663a330ce, []int{4} -} -func (m *ProtoDeveloperData) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ProtoDeveloperData) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ProtoDeveloperData.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *ProtoDeveloperData) XXX_Merge(src proto.Message) { - xxx_messageInfo_ProtoDeveloperData.Merge(m, src) -} -func (m *ProtoDeveloperData) XXX_Size() int { - return m.Size() -} -func (m *ProtoDeveloperData) XXX_DiscardUnknown() { - xxx_messageInfo_ProtoDeveloperData.DiscardUnknown(m) -} - -var xxx_messageInfo_ProtoDeveloperData proto.InternalMessageInfo - -func (m *ProtoDeveloperData) GetProjectID() string { - if m != nil { - return m.ProjectID - } - return "" -} - -func (m *ProtoDeveloperData) GetVrfpk() string { - if m != nil { - return m.Vrfpk - } - return "" -} - -func init() { - proto.RegisterEnum("lavanet.lava.projects.ProjectKey_KEY_TYPE_V2", ProjectKey_KEY_TYPE_name, ProjectKey_KEY_TYPE_value) - proto.RegisterType((*Project)(nil), "lavanet.lava.projects.Project_V2") - proto.RegisterType((*ProjectKey)(nil), "lavanet.lava.projects.ProjectKey_V2") - proto.RegisterType((*Policy)(nil), "lavanet.lava.projects.Policy_V2") - proto.RegisterType((*ChainPolicy)(nil), "lavanet.lava.projects.ChainPolicy_V2") - proto.RegisterType((*ProtoDeveloperData)(nil), "lavanet.lava.projects.ProtoDeveloperData_V2") -} - -func init() { proto.RegisterFile("projects/project.proto", fileDescriptor_9f89a31663a330ce) } - -var fileDescriptor_9f89a31663a330ce = []byte{ - // 593 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x84, 0x93, 0xcd, 0x6e, 0xd3, 0x40, - 0x10, 0xc7, 0x63, 0xc7, 0xf9, 0x9a, 0xb4, 0x55, 0xb4, 0xb4, 0x60, 0x10, 0x18, 0x63, 0x71, 0xb0, - 0x38, 0xd8, 0xa2, 0x1c, 0x39, 0x91, 0xc6, 0x88, 0xd2, 0xd2, 0x5a, 0x56, 0x85, 0x54, 0x2e, 0xd6, - 0xc6, 0xde, 0xa6, 0x4b, 0x9d, 0xec, 0xca, 0x1f, 0x51, 0xf2, 0x08, 0xdc, 0x78, 0x05, 0xee, 0x3c, - 0x48, 0x8f, 0x3d, 0x72, 0x42, 0xa8, 0x79, 0x11, 0xe4, 0xf5, 0xe6, 0xa3, 0x12, 0x15, 0x27, 0xef, - 0xfc, 0xe7, 0xe7, 0xdd, 0x99, 0xf9, 0xef, 0xc2, 0x43, 0x9e, 0xb2, 0xaf, 0x24, 0xca, 0x33, 0x57, - 0x2e, 0x1c, 0x9e, 0xb2, 0x9c, 0xa1, 0xbd, 0x04, 0x4f, 0xf1, 0x84, 0xe4, 0x4e, 0xf9, 0x75, 0x96, - 0xd0, 0x93, 0xdd, 0x11, 0x1b, 0x31, 0x41, 0xb8, 0xe5, 0xaa, 0x82, 0xad, 0x1f, 0x2a, 0xb4, 0xfc, - 0x0a, 0x41, 0xbb, 0xd0, 0xa0, 0x93, 0x98, 0xcc, 0x74, 0xc5, 0x54, 0xec, 0x4e, 0x50, 0x05, 0xc8, - 0x82, 0xad, 0xac, 0x18, 0x66, 0x51, 0x4a, 0x79, 0x4e, 0xd9, 0x44, 0x57, 0x45, 0xf2, 0x8e, 0x86, - 0x4c, 0xe8, 0xc6, 0x64, 0x8d, 0xd4, 0x05, 0xb2, 0x29, 0x21, 0x1d, 0x5a, 0x64, 0x82, 0x87, 0x09, - 0x89, 0x75, 0xcd, 0x54, 0xec, 0x76, 0xb0, 0x0c, 0xd1, 0x47, 0xd8, 0x92, 0x35, 0x86, 0x57, 0x64, - 0x9e, 0xe9, 0x0d, 0xb3, 0x6e, 0x77, 0xf7, 0x5f, 0x38, 0xff, 0xec, 0xc2, 0x91, 0xb5, 0x1e, 0x91, - 0x79, 0x5f, 0xbb, 0xfe, 0xfd, 0xbc, 0x16, 0x74, 0xf9, 0x4a, 0xc9, 0xd0, 0x5b, 0x68, 0x72, 0x96, - 0xd0, 0x68, 0xae, 0x37, 0x4d, 0xc5, 0xee, 0xee, 0x3f, 0xbb, 0x6f, 0x17, 0x01, 0xc9, 0x1d, 0xe4, - 0x2f, 0xe8, 0x11, 0xb4, 0x8a, 0x8c, 0xc4, 0x61, 0x54, 0xe8, 0x2d, 0x53, 0xb1, 0xb5, 0xa0, 0x59, - 0x86, 0x07, 0x85, 0xf5, 0x53, 0x01, 0x58, 0x9f, 0x8b, 0x7a, 0x50, 0xbf, 0x22, 0x73, 0x39, 0xa4, - 0x72, 0x89, 0xde, 0x43, 0x23, 0x9f, 0x73, 0x92, 0xe9, 0xaa, 0x59, 0xb7, 0x77, 0xf6, 0x5f, 0xfd, - 0xb7, 0x76, 0xe7, 0xc8, 0x3b, 0x0f, 0xcf, 0xce, 0x7d, 0x4f, 0x96, 0x50, 0xfd, 0x5e, 0x1a, 0x30, - 0x4d, 0x2f, 0xf8, 0x95, 0x1c, 0x60, 0x15, 0x58, 0x0e, 0xb4, 0x97, 0x38, 0x6a, 0x83, 0x76, 0x72, - 0x7a, 0xe2, 0xf5, 0x6a, 0xa8, 0x03, 0x8d, 0x77, 0x83, 0x4f, 0x87, 0x27, 0x3d, 0x05, 0x6d, 0x43, - 0x67, 0xe0, 0x7d, 0xf6, 0x8e, 0x4f, 0x7d, 0x2f, 0xe8, 0xa9, 0xd6, 0x37, 0x15, 0x9a, 0x55, 0x83, - 0xe8, 0x14, 0x76, 0xa2, 0x4b, 0x4c, 0x27, 0xa1, 0x68, 0x91, 0x92, 0x4c, 0x57, 0xc4, 0x74, 0xad, - 0x7b, 0x2a, 0x3c, 0x28, 0xe1, 0x3b, 0xc3, 0xd9, 0x8e, 0x56, 0x12, 0x25, 0x19, 0x72, 0xe1, 0xc1, - 0x88, 0xb0, 0x84, 0x45, 0xb8, 0x74, 0x35, 0xe4, 0x29, 0xbb, 0xa0, 0x09, 0x11, 0x77, 0x42, 0x0b, - 0xd0, 0x46, 0xca, 0xaf, 0x32, 0xe8, 0x25, 0xec, 0xe4, 0x2c, 0xc7, 0x49, 0x18, 0x15, 0x61, 0x42, - 0xc7, 0x34, 0x17, 0xbd, 0x69, 0xc1, 0x96, 0x50, 0x0f, 0x8a, 0xe3, 0x52, 0x2b, 0x29, 0xc2, 0x59, - 0x74, 0xb9, 0xa6, 0xb4, 0x8a, 0x12, 0xea, 0x92, 0x7a, 0x0d, 0x7b, 0x63, 0x3c, 0x2b, 0x0f, 0x9d, - 0xd2, 0x98, 0xa4, 0x59, 0x98, 0xb3, 0x90, 0x63, 0x9a, 0xea, 0x8d, 0xea, 0xf8, 0x31, 0x9e, 0xf9, - 0xcb, 0xdc, 0x19, 0xf3, 0x31, 0x4d, 0xad, 0x3e, 0x74, 0x37, 0x7a, 0x42, 0x8f, 0xa1, 0x5d, 0xcd, - 0x83, 0xc6, 0xd2, 0xbf, 0x96, 0x88, 0x0f, 0x63, 0xa4, 0x83, 0x86, 0x39, 0xad, 0x2c, 0xec, 0xc8, - 0xe6, 0x85, 0x62, 0x7d, 0x00, 0xe4, 0x97, 0x6f, 0x65, 0x40, 0xa6, 0x24, 0x61, 0x9c, 0xa4, 0x03, - 0x9c, 0x63, 0xf4, 0x14, 0x3a, 0x72, 0x6c, 0x87, 0x03, 0xb9, 0xd7, 0x5a, 0x58, 0x3b, 0xa9, 0x6e, - 0x38, 0xd9, 0xef, 0x5f, 0xdf, 0x1a, 0xca, 0xcd, 0xad, 0xa1, 0xfc, 0xb9, 0x35, 0x94, 0xef, 0x0b, - 0xa3, 0x76, 0xb3, 0x30, 0x6a, 0xbf, 0x16, 0x46, 0xed, 0x8b, 0x3d, 0xa2, 0xf9, 0x65, 0x31, 0x74, - 0x22, 0x36, 0x76, 0xa5, 0x35, 0xe2, 0xeb, 0xce, 0xdc, 0xd5, 0x2b, 0x17, 0x77, 0x64, 0xd8, 0x14, - 0xef, 0xf6, 0xcd, 0xdf, 0x00, 0x00, 0x00, 0xff, 0xff, 0xa8, 0xe0, 0xc7, 0x3c, 0xfe, 0x03, 0x00, - 0x00, -} - -func (m *Project) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Project) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Project) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.UsedCu != 0 { - i = encodeVarintProject(dAtA, i, uint64(m.UsedCu)) - i-- - dAtA[i] = 0x38 - } - { - size, err := m.Policy.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintProject(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x32 - if len(m.ProjectKeys) > 0 { - for iNdEx := len(m.ProjectKeys) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.ProjectKeys[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintProject(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x2a - } - } - if m.Enabled { - i-- - if m.Enabled { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x20 - } - if len(m.Description) > 0 { - i -= len(m.Description) - copy(dAtA[i:], m.Description) - i = encodeVarintProject(dAtA, i, uint64(len(m.Description))) - i-- - dAtA[i] = 0x1a - } - if len(m.Subscription) > 0 { - i -= len(m.Subscription) - copy(dAtA[i:], m.Subscription) - i = encodeVarintProject(dAtA, i, uint64(len(m.Subscription))) - i-- - dAtA[i] = 0x12 - } - if len(m.Index) > 0 { - i -= len(m.Index) - copy(dAtA[i:], m.Index) - i = encodeVarintProject(dAtA, i, uint64(len(m.Index))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *ProjectKey) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ProjectKey) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ProjectKey) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Vrfpk) > 0 { - i -= len(m.Vrfpk) - copy(dAtA[i:], m.Vrfpk) - i = encodeVarintProject(dAtA, i, uint64(len(m.Vrfpk))) - i-- - dAtA[i] = 0x1a - } - if len(m.Types) > 0 { - dAtA3 := make([]byte, len(m.Types)*10) - var j2 int - for _, num := range m.Types { - for num >= 1<<7 { - dAtA3[j2] = uint8(uint64(num)&0x7f | 0x80) - num >>= 7 - j2++ - } - dAtA3[j2] = uint8(num) - j2++ - } - i -= j2 - copy(dAtA[i:], dAtA3[:j2]) - i = encodeVarintProject(dAtA, i, uint64(j2)) - i-- - dAtA[i] = 0x12 - } - if len(m.Key) > 0 { - i -= len(m.Key) - copy(dAtA[i:], m.Key) - i = encodeVarintProject(dAtA, i, uint64(len(m.Key))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *Policy) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Policy) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Policy) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.MaxProvidersToPair != 0 { - i = encodeVarintProject(dAtA, i, uint64(m.MaxProvidersToPair)) - i-- - dAtA[i] = 0x28 - } - if m.EpochCuLimit != 0 { - i = encodeVarintProject(dAtA, i, uint64(m.EpochCuLimit)) - i-- - dAtA[i] = 0x20 - } - if m.TotalCuLimit != 0 { - i = encodeVarintProject(dAtA, i, uint64(m.TotalCuLimit)) - i-- - dAtA[i] = 0x18 - } - if m.GeolocationProfile != 0 { - i = encodeVarintProject(dAtA, i, uint64(m.GeolocationProfile)) - i-- - dAtA[i] = 0x10 - } - if len(m.ChainPolicies) > 0 { - for iNdEx := len(m.ChainPolicies) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.ChainPolicies[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintProject(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *ChainPolicy) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ChainPolicy) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ChainPolicy) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Apis) > 0 { - for iNdEx := len(m.Apis) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Apis[iNdEx]) - copy(dAtA[i:], m.Apis[iNdEx]) - i = encodeVarintProject(dAtA, i, uint64(len(m.Apis[iNdEx]))) - i-- - dAtA[i] = 0x12 - } - } - if len(m.ChainId) > 0 { - i -= len(m.ChainId) - copy(dAtA[i:], m.ChainId) - i = encodeVarintProject(dAtA, i, uint64(len(m.ChainId))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *ProtoDeveloperData) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ProtoDeveloperData) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ProtoDeveloperData) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Vrfpk) > 0 { - i -= len(m.Vrfpk) - copy(dAtA[i:], m.Vrfpk) - i = encodeVarintProject(dAtA, i, uint64(len(m.Vrfpk))) - i-- - dAtA[i] = 0x12 - } - if len(m.ProjectID) > 0 { - i -= len(m.ProjectID) - copy(dAtA[i:], m.ProjectID) - i = encodeVarintProject(dAtA, i, uint64(len(m.ProjectID))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func encodeVarintProject(dAtA []byte, offset int, v uint64) int { - offset -= sovProject(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *Project) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Index) - if l > 0 { - n += 1 + l + sovProject(uint64(l)) - } - l = len(m.Subscription) - if l > 0 { - n += 1 + l + sovProject(uint64(l)) - } - l = len(m.Description) - if l > 0 { - n += 1 + l + sovProject(uint64(l)) - } - if m.Enabled { - n += 2 - } - if len(m.ProjectKeys) > 0 { - for _, e := range m.ProjectKeys { - l = e.Size() - n += 1 + l + sovProject(uint64(l)) - } - } - l = m.Policy.Size() - n += 1 + l + sovProject(uint64(l)) - if m.UsedCu != 0 { - n += 1 + sovProject(uint64(m.UsedCu)) - } - return n -} - -func (m *ProjectKey) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Key) - if l > 0 { - n += 1 + l + sovProject(uint64(l)) - } - if len(m.Types) > 0 { - l = 0 - for _, e := range m.Types { - l += sovProject(uint64(e)) - } - n += 1 + sovProject(uint64(l)) + l - } - l = len(m.Vrfpk) - if l > 0 { - n += 1 + l + sovProject(uint64(l)) - } - return n -} - -func (m *Policy) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.ChainPolicies) > 0 { - for _, e := range m.ChainPolicies { - l = e.Size() - n += 1 + l + sovProject(uint64(l)) - } - } - if m.GeolocationProfile != 0 { - n += 1 + sovProject(uint64(m.GeolocationProfile)) - } - if m.TotalCuLimit != 0 { - n += 1 + sovProject(uint64(m.TotalCuLimit)) - } - if m.EpochCuLimit != 0 { - n += 1 + sovProject(uint64(m.EpochCuLimit)) - } - if m.MaxProvidersToPair != 0 { - n += 1 + sovProject(uint64(m.MaxProvidersToPair)) - } - return n -} - -func (m *ChainPolicy) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.ChainId) - if l > 0 { - n += 1 + l + sovProject(uint64(l)) - } - if len(m.Apis) > 0 { - for _, s := range m.Apis { - l = len(s) - n += 1 + l + sovProject(uint64(l)) - } - } - return n -} - -func (m *ProtoDeveloperData) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.ProjectID) - if l > 0 { - n += 1 + l + sovProject(uint64(l)) - } - l = len(m.Vrfpk) - if l > 0 { - n += 1 + l + sovProject(uint64(l)) - } - return n -} - -func sovProject(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozProject(x uint64) (n int) { - return sovProject(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *Project) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProject - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Project: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Project: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Index", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProject - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthProject - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthProject - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Index = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Subscription", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProject - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthProject - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthProject - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Subscription = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Description", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProject - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthProject - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthProject - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Description = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Enabled", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProject - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Enabled = bool(v != 0) - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ProjectKeys", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProject - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthProject - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthProject - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ProjectKeys = append(m.ProjectKeys, ProjectKey{}) - if err := m.ProjectKeys[len(m.ProjectKeys)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Policy", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProject - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthProject - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthProject - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Policy.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 7: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field UsedCu", wireType) - } - m.UsedCu = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProject - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.UsedCu |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipProject(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthProject - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ProjectKey) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProject - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ProjectKey: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ProjectKey: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProject - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthProject - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthProject - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Key = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType == 0 { - var v ProjectKey_KEY_TYPE - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProject - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= ProjectKey_KEY_TYPE(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Types = append(m.Types, v) - } else if wireType == 2 { - var packedLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProject - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - packedLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if packedLen < 0 { - return ErrInvalidLengthProject - } - postIndex := iNdEx + packedLen - if postIndex < 0 { - return ErrInvalidLengthProject - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - var elementCount int - if elementCount != 0 && len(m.Types) == 0 { - m.Types = make([]ProjectKey_KEY_TYPE, 0, elementCount) - } - for iNdEx < postIndex { - var v ProjectKey_KEY_TYPE - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProject - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= ProjectKey_KEY_TYPE(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Types = append(m.Types, v) - } - } else { - return fmt.Errorf("proto: wrong wireType = %d for field Types", wireType) - } - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Vrfpk", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProject - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthProject - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthProject - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Vrfpk = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipProject(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthProject - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Policy) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProject - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Policy: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Policy: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ChainPolicies", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProject - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthProject - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthProject - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ChainPolicies = append(m.ChainPolicies, ChainPolicy{}) - if err := m.ChainPolicies[len(m.ChainPolicies)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field GeolocationProfile", wireType) - } - m.GeolocationProfile = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProject - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.GeolocationProfile |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TotalCuLimit", wireType) - } - m.TotalCuLimit = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProject - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.TotalCuLimit |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field EpochCuLimit", wireType) - } - m.EpochCuLimit = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProject - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.EpochCuLimit |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field MaxProvidersToPair", wireType) - } - m.MaxProvidersToPair = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProject - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.MaxProvidersToPair |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipProject(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthProject - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ChainPolicy) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProject - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ChainPolicy: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ChainPolicy: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ChainId", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProject - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthProject - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthProject - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ChainId = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Apis", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProject - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthProject - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthProject - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Apis = append(m.Apis, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipProject(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthProject - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ProtoDeveloperData) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProject - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ProtoDeveloperData: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ProtoDeveloperData: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ProjectID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProject - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthProject - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthProject - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ProjectID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Vrfpk", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProject - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthProject - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthProject - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Vrfpk = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipProject(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthProject - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipProject(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowProject - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowProject - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowProject - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthProject - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupProject - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthProject - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthProject = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowProject = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupProject = fmt.Errorf("proto: unexpected end of group") -) diff --git a/x/projects/migrations/v3/project.pb.go b/x/projects/migrations/v3/project.pb.go deleted file mode 100644 index a29c1ebfe9..0000000000 --- a/x/projects/migrations/v3/project.pb.go +++ /dev/null @@ -1,2375 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: projects/project.proto - -package types - -import ( - fmt "fmt" - _ "github.com/gogo/protobuf/gogoproto" - proto "github.com/gogo/protobuf/proto" - io "io" - math "math" - math_bits "math/bits" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -type ProjectKey_KEY_TYPE int32 - -const ( - ProjectKey_NONE ProjectKey_KEY_TYPE = 0 - ProjectKey_ADMIN ProjectKey_KEY_TYPE = 1 - ProjectKey_DEVELOPER ProjectKey_KEY_TYPE = 2 -) - -var ProjectKey_KEY_TYPE_name = map[int32]string{ - 0: "NONE", - 1: "ADMIN", - 2: "DEVELOPER", -} - -var ProjectKey_KEY_TYPE_value = map[string]int32{ - "NONE": 0, - "ADMIN": 1, - "DEVELOPER": 2, -} - -func (x ProjectKey_KEY_TYPE) String() string { - return proto.EnumName(ProjectKey_KEY_TYPE_name, int32(x)) -} - -func (ProjectKey_KEY_TYPE) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_9f89a31663a330ce, []int{1, 0} -} - -type Project struct { - Index string `protobuf:"bytes,1,opt,name=index,proto3" json:"index,omitempty"` - Subscription string `protobuf:"bytes,2,opt,name=subscription,proto3" json:"subscription,omitempty"` - Description string `protobuf:"bytes,3,opt,name=description,proto3" json:"description,omitempty"` - Enabled bool `protobuf:"varint,4,opt,name=enabled,proto3" json:"enabled,omitempty"` - ProjectKeys []ProjectKey `protobuf:"bytes,5,rep,name=project_keys,json=projectKeys,proto3" json:"project_keys"` - AdminPolicy *Policy `protobuf:"bytes,6,opt,name=admin_policy,json=adminPolicy,proto3" json:"admin_policy,omitempty"` - UsedCu uint64 `protobuf:"varint,7,opt,name=used_cu,json=usedCu,proto3" json:"used_cu,omitempty"` - SubscriptionPolicy *Policy `protobuf:"bytes,8,opt,name=subscription_policy,json=subscriptionPolicy,proto3" json:"subscription_policy,omitempty"` - Snapshot uint64 `protobuf:"varint,9,opt,name=snapshot,proto3" json:"snapshot,omitempty"` -} - -func (m *Project) Reset() { *m = Project{} } -func (m *Project) String() string { return proto.CompactTextString(m) } -func (*Project) ProtoMessage() {} -func (*Project) Descriptor() ([]byte, []int) { - return fileDescriptor_9f89a31663a330ce, []int{0} -} -func (m *Project) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Project) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Project.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Project) XXX_Merge(src proto.Message) { - xxx_messageInfo_Project.Merge(m, src) -} -func (m *Project) XXX_Size() int { - return m.Size() -} -func (m *Project) XXX_DiscardUnknown() { - xxx_messageInfo_Project.DiscardUnknown(m) -} - -var xxx_messageInfo_Project proto.InternalMessageInfo - -func (m *Project) GetIndex() string { - if m != nil { - return m.Index - } - return "" -} - -func (m *Project) GetSubscription() string { - if m != nil { - return m.Subscription - } - return "" -} - -func (m *Project) GetDescription() string { - if m != nil { - return m.Description - } - return "" -} - -func (m *Project) GetEnabled() bool { - if m != nil { - return m.Enabled - } - return false -} - -func (m *Project) GetProjectKeys() []ProjectKey { - if m != nil { - return m.ProjectKeys - } - return nil -} - -func (m *Project) GetAdminPolicy() *Policy { - if m != nil { - return m.AdminPolicy - } - return nil -} - -func (m *Project) GetUsedCu() uint64 { - if m != nil { - return m.UsedCu - } - return 0 -} - -func (m *Project) GetSubscriptionPolicy() *Policy { - if m != nil { - return m.SubscriptionPolicy - } - return nil -} - -func (m *Project) GetSnapshot() uint64 { - if m != nil { - return m.Snapshot - } - return 0 -} - -type ProjectKey struct { - Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` - Types []ProjectKey_KEY_TYPE `protobuf:"varint,2,rep,packed,name=types,proto3,enum=lavanet.lava.projects.ProjectKey_KEY_TYPE" json:"types,omitempty"` -} - -func (m *ProjectKey) Reset() { *m = ProjectKey{} } -func (m *ProjectKey) String() string { return proto.CompactTextString(m) } -func (*ProjectKey) ProtoMessage() {} -func (*ProjectKey) Descriptor() ([]byte, []int) { - return fileDescriptor_9f89a31663a330ce, []int{1} -} -func (m *ProjectKey) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ProjectKey) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ProjectKey.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *ProjectKey) XXX_Merge(src proto.Message) { - xxx_messageInfo_ProjectKey.Merge(m, src) -} -func (m *ProjectKey) XXX_Size() int { - return m.Size() -} -func (m *ProjectKey) XXX_DiscardUnknown() { - xxx_messageInfo_ProjectKey.DiscardUnknown(m) -} - -var xxx_messageInfo_ProjectKey proto.InternalMessageInfo - -func (m *ProjectKey) GetKey() string { - if m != nil { - return m.Key - } - return "" -} - -func (m *ProjectKey) GetTypes() []ProjectKey_KEY_TYPE { - if m != nil { - return m.Types - } - return nil -} - -// protobuf expected in YAML format: used "moretags" to simplify parsing -type Policy struct { - ChainPolicies []ChainPolicy `protobuf:"bytes,1,rep,name=chain_policies,json=chainPolicies,proto3" json:"chain_policies" mapstructure:"chain_policies"` - GeolocationProfile uint64 `protobuf:"varint,2,opt,name=geolocation_profile,json=geolocationProfile,proto3" json:"geolocation_profile" mapstructure:"geolocation_profile"` - TotalCuLimit uint64 `protobuf:"varint,3,opt,name=total_cu_limit,json=totalCuLimit,proto3" json:"total_cu_limit" mapstructure:"total_cu_limit"` - EpochCuLimit uint64 `protobuf:"varint,4,opt,name=epoch_cu_limit,json=epochCuLimit,proto3" json:"epoch_cu_limit" mapstructure:"epoch_cu_limit"` - MaxProvidersToPair uint64 `protobuf:"varint,5,opt,name=max_providers_to_pair,json=maxProvidersToPair,proto3" json:"max_providers_to_pair" mapstructure:"max_providers_to_pair"` -} - -func (m *Policy) Reset() { *m = Policy{} } -func (m *Policy) String() string { return proto.CompactTextString(m) } -func (*Policy) ProtoMessage() {} -func (*Policy) Descriptor() ([]byte, []int) { - return fileDescriptor_9f89a31663a330ce, []int{2} -} -func (m *Policy) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Policy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Policy.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Policy) XXX_Merge(src proto.Message) { - xxx_messageInfo_Policy.Merge(m, src) -} -func (m *Policy) XXX_Size() int { - return m.Size() -} -func (m *Policy) XXX_DiscardUnknown() { - xxx_messageInfo_Policy.DiscardUnknown(m) -} - -var xxx_messageInfo_Policy proto.InternalMessageInfo - -func (m *Policy) GetChainPolicies() []ChainPolicy { - if m != nil { - return m.ChainPolicies - } - return nil -} - -func (m *Policy) GetGeolocationProfile() uint64 { - if m != nil { - return m.GeolocationProfile - } - return 0 -} - -func (m *Policy) GetTotalCuLimit() uint64 { - if m != nil { - return m.TotalCuLimit - } - return 0 -} - -func (m *Policy) GetEpochCuLimit() uint64 { - if m != nil { - return m.EpochCuLimit - } - return 0 -} - -func (m *Policy) GetMaxProvidersToPair() uint64 { - if m != nil { - return m.MaxProvidersToPair - } - return 0 -} - -type ChainPolicy struct { - ChainId string `protobuf:"bytes,1,opt,name=chain_id,json=chainId,proto3" json:"chain_id,omitempty" mapstructure:"chain_id"` - Apis []string `protobuf:"bytes,2,rep,name=apis,proto3" json:"apis,omitempty" mapstructure:"apis"` -} - -func (m *ChainPolicy) Reset() { *m = ChainPolicy{} } -func (m *ChainPolicy) String() string { return proto.CompactTextString(m) } -func (*ChainPolicy) ProtoMessage() {} -func (*ChainPolicy) Descriptor() ([]byte, []int) { - return fileDescriptor_9f89a31663a330ce, []int{3} -} -func (m *ChainPolicy) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ChainPolicy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ChainPolicy.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *ChainPolicy) XXX_Merge(src proto.Message) { - xxx_messageInfo_ChainPolicy.Merge(m, src) -} -func (m *ChainPolicy) XXX_Size() int { - return m.Size() -} -func (m *ChainPolicy) XXX_DiscardUnknown() { - xxx_messageInfo_ChainPolicy.DiscardUnknown(m) -} - -var xxx_messageInfo_ChainPolicy proto.InternalMessageInfo - -func (m *ChainPolicy) GetChainId() string { - if m != nil { - return m.ChainId - } - return "" -} - -func (m *ChainPolicy) GetApis() []string { - if m != nil { - return m.Apis - } - return nil -} - -type ProtoDeveloperData struct { - ProjectID string `protobuf:"bytes,1,opt,name=projectID,proto3" json:"projectID,omitempty"` -} - -func (m *ProtoDeveloperData) Reset() { *m = ProtoDeveloperData{} } -func (m *ProtoDeveloperData) String() string { return proto.CompactTextString(m) } -func (*ProtoDeveloperData) ProtoMessage() {} -func (*ProtoDeveloperData) Descriptor() ([]byte, []int) { - return fileDescriptor_9f89a31663a330ce, []int{4} -} -func (m *ProtoDeveloperData) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ProtoDeveloperData) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ProtoDeveloperData.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *ProtoDeveloperData) XXX_Merge(src proto.Message) { - xxx_messageInfo_ProtoDeveloperData.Merge(m, src) -} -func (m *ProtoDeveloperData) XXX_Size() int { - return m.Size() -} -func (m *ProtoDeveloperData) XXX_DiscardUnknown() { - xxx_messageInfo_ProtoDeveloperData.DiscardUnknown(m) -} - -var xxx_messageInfo_ProtoDeveloperData proto.InternalMessageInfo - -func (m *ProtoDeveloperData) GetProjectID() string { - if m != nil { - return m.ProjectID - } - return "" -} - -// used as a container struct for the subscription module -type ProjectData struct { - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"` - Enabled bool `protobuf:"varint,3,opt,name=enabled,proto3" json:"enabled,omitempty"` - ProjectKeys []ProjectKey `protobuf:"bytes,4,rep,name=projectKeys,proto3" json:"projectKeys"` - Policy *Policy `protobuf:"bytes,5,opt,name=policy,proto3" json:"policy,omitempty"` -} - -func (m *ProjectData) Reset() { *m = ProjectData{} } -func (m *ProjectData) String() string { return proto.CompactTextString(m) } -func (*ProjectData) ProtoMessage() {} -func (*ProjectData) Descriptor() ([]byte, []int) { - return fileDescriptor_9f89a31663a330ce, []int{5} -} -func (m *ProjectData) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ProjectData) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ProjectData.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *ProjectData) XXX_Merge(src proto.Message) { - xxx_messageInfo_ProjectData.Merge(m, src) -} -func (m *ProjectData) XXX_Size() int { - return m.Size() -} -func (m *ProjectData) XXX_DiscardUnknown() { - xxx_messageInfo_ProjectData.DiscardUnknown(m) -} - -var xxx_messageInfo_ProjectData proto.InternalMessageInfo - -func (m *ProjectData) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -func (m *ProjectData) GetDescription() string { - if m != nil { - return m.Description - } - return "" -} - -func (m *ProjectData) GetEnabled() bool { - if m != nil { - return m.Enabled - } - return false -} - -func (m *ProjectData) GetProjectKeys() []ProjectKey { - if m != nil { - return m.ProjectKeys - } - return nil -} - -func (m *ProjectData) GetPolicy() *Policy { - if m != nil { - return m.Policy - } - return nil -} - -func init() { - proto.RegisterEnum("lavanet.lava.projects.ProjectKey_KEY_TYPE_V3", ProjectKey_KEY_TYPE_name, ProjectKey_KEY_TYPE_value) - proto.RegisterType((*Project)(nil), "lavanet.lava.projects.Project_V3") - proto.RegisterType((*ProjectKey)(nil), "lavanet.lava.projects.ProjectKey_V3") - proto.RegisterType((*Policy)(nil), "lavanet.lava.projects.Policy_V3") - proto.RegisterType((*ChainPolicy)(nil), "lavanet.lava.projects.ChainPolicy_V3") - proto.RegisterType((*ProtoDeveloperData)(nil), "lavanet.lava.projects.ProtoDeveloperData_V3") - proto.RegisterType((*ProjectData)(nil), "lavanet.lava.projects.ProjectData_V3") -} - -func init() { proto.RegisterFile("projects/project.proto", fileDescriptor_9f89a31663a330ce) } - -var fileDescriptor_9f89a31663a330ce = []byte{ - // 777 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x55, 0x4f, 0xab, 0xdb, 0x46, - 0x10, 0xb7, 0x2c, 0xd9, 0x96, 0xc7, 0xce, 0xc3, 0xec, 0x4b, 0x1a, 0x91, 0x34, 0x96, 0xbb, 0xb4, - 0x60, 0x7a, 0x90, 0xe1, 0x85, 0x96, 0x52, 0x28, 0x34, 0x7e, 0x76, 0xc0, 0x2f, 0xa9, 0x63, 0x44, - 0x28, 0xbc, 0x5e, 0xc4, 0x5a, 0xda, 0xda, 0xdb, 0xc8, 0x5a, 0xa1, 0x3f, 0xc6, 0xfe, 0x16, 0xa5, - 0x87, 0x9e, 0xfa, 0x01, 0xfa, 0x51, 0x72, 0xcc, 0xb1, 0x27, 0xb5, 0xf8, 0xdd, 0xde, 0xd1, 0x9f, - 0xa0, 0x68, 0x25, 0xff, 0x51, 0xe2, 0xb4, 0x81, 0x9c, 0x34, 0xf3, 0x9b, 0xdf, 0xcc, 0xec, 0xec, - 0x8c, 0x66, 0xe1, 0x13, 0x3f, 0xe0, 0xbf, 0x50, 0x3b, 0x0a, 0x7b, 0xb9, 0x60, 0xf8, 0x01, 0x8f, - 0x38, 0xba, 0xe7, 0x92, 0x25, 0xf1, 0x68, 0x64, 0xa4, 0x5f, 0x63, 0x47, 0x7a, 0x70, 0x77, 0xc6, - 0x67, 0x5c, 0x30, 0x7a, 0xa9, 0x94, 0x91, 0xf1, 0x6f, 0x32, 0xd4, 0x26, 0x19, 0x05, 0xdd, 0x85, - 0x0a, 0xf3, 0x1c, 0xba, 0xd2, 0xa4, 0x8e, 0xd4, 0xad, 0x9b, 0x99, 0x82, 0x30, 0x34, 0xc3, 0x78, - 0x1a, 0xda, 0x01, 0xf3, 0x23, 0xc6, 0x3d, 0xad, 0x2c, 0x8c, 0x05, 0x0c, 0x75, 0xa0, 0xe1, 0xd0, - 0x03, 0x45, 0x16, 0x94, 0x63, 0x08, 0x69, 0x50, 0xa3, 0x1e, 0x99, 0xba, 0xd4, 0xd1, 0x94, 0x8e, - 0xd4, 0x55, 0xcd, 0x9d, 0x8a, 0xae, 0xa0, 0x99, 0x9f, 0xd1, 0x7a, 0x45, 0xd7, 0xa1, 0x56, 0xe9, - 0xc8, 0xdd, 0xc6, 0xc5, 0x67, 0xc6, 0xc9, 0x2a, 0x8c, 0xfc, 0xac, 0xcf, 0xe8, 0xba, 0xaf, 0xbc, - 0x4e, 0xf4, 0x92, 0xd9, 0xf0, 0xf7, 0x48, 0x88, 0xbe, 0x87, 0x26, 0x71, 0x16, 0xcc, 0xb3, 0x7c, - 0xee, 0x32, 0x7b, 0xad, 0x55, 0x3b, 0x52, 0xb7, 0x71, 0xf1, 0xe8, 0x7d, 0xb1, 0x04, 0xc9, 0x6c, - 0x08, 0x97, 0x4c, 0x41, 0xf7, 0xa1, 0x16, 0x87, 0xd4, 0xb1, 0xec, 0x58, 0xab, 0x75, 0xa4, 0xae, - 0x62, 0x56, 0x53, 0xf5, 0x32, 0x46, 0x63, 0x38, 0x3f, 0x2e, 0x79, 0x97, 0x41, 0xfd, 0x90, 0x0c, - 0xe8, 0xd8, 0x33, 0x4f, 0xf4, 0x00, 0xd4, 0xd0, 0x23, 0x7e, 0x38, 0xe7, 0x91, 0x56, 0x17, 0x99, - 0xf6, 0x3a, 0xfe, 0x43, 0x02, 0x38, 0x14, 0x8a, 0x5a, 0x20, 0xbf, 0xa2, 0xeb, 0xbc, 0x2b, 0xa9, - 0x88, 0x9e, 0x42, 0x25, 0x5a, 0xfb, 0x34, 0xd4, 0xca, 0x1d, 0xb9, 0x7b, 0x76, 0xf1, 0xe5, 0xff, - 0x5e, 0x96, 0xf1, 0x6c, 0x78, 0x6d, 0xbd, 0xbc, 0x9e, 0x0c, 0xf3, 0x5b, 0xcb, 0xdc, 0xb1, 0x01, - 0xea, 0xce, 0x80, 0x54, 0x50, 0xc6, 0x2f, 0xc6, 0xc3, 0x56, 0x09, 0xd5, 0xa1, 0xf2, 0x64, 0xf0, - 0xc3, 0x68, 0xdc, 0x92, 0xd0, 0x1d, 0xa8, 0x0f, 0x86, 0x3f, 0x0e, 0x9f, 0xbf, 0x98, 0x0c, 0xcd, - 0x56, 0xf9, 0x4a, 0x51, 0xe5, 0x96, 0x82, 0x7f, 0x57, 0xa0, 0x9a, 0x57, 0xe1, 0xc3, 0x99, 0x3d, - 0x27, 0xbb, 0x0b, 0x67, 0x34, 0xd4, 0x24, 0xd1, 0x3e, 0xfc, 0x9e, 0x13, 0x5d, 0xa6, 0xe4, 0xcc, - 0xb7, 0xff, 0x45, 0x7a, 0x92, 0x6d, 0xa2, 0x3f, 0x5a, 0x10, 0x3f, 0x8c, 0x82, 0xd8, 0x8e, 0xe2, - 0x80, 0x7e, 0x8b, 0x8b, 0xf1, 0xb0, 0x79, 0xc7, 0xde, 0xfb, 0x30, 0x1a, 0x22, 0x0f, 0xce, 0x67, - 0x94, 0xbb, 0xdc, 0x26, 0x59, 0x1b, 0x02, 0xfe, 0x33, 0x73, 0xa9, 0x98, 0x4a, 0xa5, 0xff, 0xdd, - 0x6d, 0xa2, 0x9f, 0x32, 0x6f, 0x13, 0x1d, 0x17, 0xb3, 0x9c, 0x20, 0x61, 0x13, 0x1d, 0xa1, 0x93, - 0x0c, 0x44, 0xd7, 0x70, 0x16, 0xf1, 0x88, 0xb8, 0x96, 0x1d, 0x5b, 0x2e, 0x5b, 0xb0, 0x48, 0x4c, - 0xb7, 0xd2, 0x7f, 0x7c, 0x9b, 0xe8, 0x6f, 0x59, 0xde, 0xad, 0xa5, 0x68, 0xc7, 0x66, 0x53, 0x00, - 0x97, 0xf1, 0xf3, 0x54, 0x4d, 0x43, 0x53, 0x9f, 0xdb, 0xf3, 0x43, 0x68, 0xe5, 0x10, 0xba, 0x68, - 0x79, 0x37, 0x74, 0xd1, 0x8e, 0xcd, 0xa6, 0x00, 0x76, 0xa1, 0x23, 0xb8, 0xb7, 0x20, 0xab, 0xb4, - 0xb2, 0x25, 0x73, 0x68, 0x10, 0x5a, 0x11, 0xb7, 0x7c, 0xc2, 0x02, 0xad, 0x22, 0x32, 0x3c, 0xb9, - 0x4d, 0xf4, 0xd3, 0x84, 0x6d, 0xa2, 0x7f, 0x5e, 0x4c, 0x74, 0x92, 0x86, 0x4d, 0xb4, 0x20, 0xab, - 0xc9, 0x0e, 0x7e, 0xc9, 0x27, 0x29, 0xb8, 0x84, 0xc6, 0x51, 0x83, 0xd1, 0xd7, 0xa0, 0x66, 0xcd, - 0x64, 0x4e, 0x36, 0xbc, 0xfd, 0x87, 0xdb, 0x44, 0xbf, 0x7f, 0xaa, 0xdd, 0xcc, 0xc1, 0x66, 0x4d, - 0x88, 0x23, 0x07, 0xf5, 0x40, 0x21, 0x3e, 0xcb, 0x86, 0xbb, 0xde, 0x7f, 0x98, 0x8f, 0xc9, 0x79, - 0xd1, 0x2f, 0x65, 0x60, 0x53, 0x10, 0xf1, 0x37, 0x80, 0x26, 0xe9, 0x36, 0x1b, 0xd0, 0x25, 0x75, - 0xb9, 0x4f, 0x83, 0x01, 0x89, 0x08, 0xfa, 0x14, 0xea, 0xf9, 0xdc, 0x8d, 0x06, 0xf9, 0xcf, 0x73, - 0x00, 0xae, 0x14, 0xb5, 0xdc, 0x92, 0xf1, 0xdf, 0x12, 0x34, 0xf2, 0xbf, 0x44, 0xf8, 0x20, 0x50, - 0x3c, 0xb2, 0xa0, 0x39, 0x5d, 0xc8, 0x6f, 0x2f, 0xb7, 0xf2, 0x7f, 0x2e, 0x37, 0xb9, 0xb8, 0xdc, - 0x46, 0x70, 0xbc, 0x9f, 0x34, 0xe5, 0x23, 0x76, 0xdb, 0x57, 0x50, 0xcd, 0x77, 0x4e, 0xe5, 0x43, - 0x76, 0x4e, 0x4e, 0xee, 0x3f, 0xfd, 0x73, 0xd3, 0x96, 0x5e, 0x6f, 0xda, 0xd2, 0x9b, 0x4d, 0x5b, - 0xfa, 0x67, 0xd3, 0x96, 0x7e, 0xbd, 0x69, 0x97, 0xde, 0xdc, 0xb4, 0x4b, 0x7f, 0xdd, 0xb4, 0x4b, - 0x3f, 0x75, 0x67, 0x2c, 0x9a, 0xc7, 0x53, 0xc3, 0xe6, 0x8b, 0x5e, 0x1e, 0x4e, 0x7c, 0x7b, 0xab, - 0xde, 0xfe, 0x75, 0x11, 0xab, 0x62, 0x5a, 0x15, 0xef, 0xc5, 0xe3, 0x7f, 0x03, 0x00, 0x00, 0xff, - 0xff, 0xc4, 0x11, 0xe0, 0xc1, 0x76, 0x06, 0x00, 0x00, -} - -func (this *Project) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*Project) - if !ok { - that2, ok := that.(Project) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if this.Index != that1.Index { - return false - } - if this.Subscription != that1.Subscription { - return false - } - if this.Description != that1.Description { - return false - } - if this.Enabled != that1.Enabled { - return false - } - if len(this.ProjectKeys) != len(that1.ProjectKeys) { - return false - } - for i := range this.ProjectKeys { - if !this.ProjectKeys[i].Equal(&that1.ProjectKeys[i]) { - return false - } - } - if !this.AdminPolicy.Equal(that1.AdminPolicy) { - return false - } - if this.UsedCu != that1.UsedCu { - return false - } - if !this.SubscriptionPolicy.Equal(that1.SubscriptionPolicy) { - return false - } - if this.Snapshot != that1.Snapshot { - return false - } - return true -} -func (this *ProjectKey) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*ProjectKey) - if !ok { - that2, ok := that.(ProjectKey) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if this.Key != that1.Key { - return false - } - if len(this.Types) != len(that1.Types) { - return false - } - for i := range this.Types { - if this.Types[i] != that1.Types[i] { - return false - } - } - return true -} -func (this *Policy) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*Policy) - if !ok { - that2, ok := that.(Policy) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if len(this.ChainPolicies) != len(that1.ChainPolicies) { - return false - } - for i := range this.ChainPolicies { - if !this.ChainPolicies[i].Equal(&that1.ChainPolicies[i]) { - return false - } - } - if this.GeolocationProfile != that1.GeolocationProfile { - return false - } - if this.TotalCuLimit != that1.TotalCuLimit { - return false - } - if this.EpochCuLimit != that1.EpochCuLimit { - return false - } - if this.MaxProvidersToPair != that1.MaxProvidersToPair { - return false - } - return true -} -func (this *ChainPolicy) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*ChainPolicy) - if !ok { - that2, ok := that.(ChainPolicy) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if this.ChainId != that1.ChainId { - return false - } - if len(this.Apis) != len(that1.Apis) { - return false - } - for i := range this.Apis { - if this.Apis[i] != that1.Apis[i] { - return false - } - } - return true -} -func (this *ProtoDeveloperData) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*ProtoDeveloperData) - if !ok { - that2, ok := that.(ProtoDeveloperData) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if this.ProjectID != that1.ProjectID { - return false - } - return true -} -func (this *ProjectData) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*ProjectData) - if !ok { - that2, ok := that.(ProjectData) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if this.Name != that1.Name { - return false - } - if this.Description != that1.Description { - return false - } - if this.Enabled != that1.Enabled { - return false - } - if len(this.ProjectKeys) != len(that1.ProjectKeys) { - return false - } - for i := range this.ProjectKeys { - if !this.ProjectKeys[i].Equal(&that1.ProjectKeys[i]) { - return false - } - } - if !this.Policy.Equal(that1.Policy) { - return false - } - return true -} -func (m *Project) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Project) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Project) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Snapshot != 0 { - i = encodeVarintProject(dAtA, i, uint64(m.Snapshot)) - i-- - dAtA[i] = 0x48 - } - if m.SubscriptionPolicy != nil { - { - size, err := m.SubscriptionPolicy.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintProject(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x42 - } - if m.UsedCu != 0 { - i = encodeVarintProject(dAtA, i, uint64(m.UsedCu)) - i-- - dAtA[i] = 0x38 - } - if m.AdminPolicy != nil { - { - size, err := m.AdminPolicy.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintProject(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x32 - } - if len(m.ProjectKeys) > 0 { - for iNdEx := len(m.ProjectKeys) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.ProjectKeys[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintProject(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x2a - } - } - if m.Enabled { - i-- - if m.Enabled { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x20 - } - if len(m.Description) > 0 { - i -= len(m.Description) - copy(dAtA[i:], m.Description) - i = encodeVarintProject(dAtA, i, uint64(len(m.Description))) - i-- - dAtA[i] = 0x1a - } - if len(m.Subscription) > 0 { - i -= len(m.Subscription) - copy(dAtA[i:], m.Subscription) - i = encodeVarintProject(dAtA, i, uint64(len(m.Subscription))) - i-- - dAtA[i] = 0x12 - } - if len(m.Index) > 0 { - i -= len(m.Index) - copy(dAtA[i:], m.Index) - i = encodeVarintProject(dAtA, i, uint64(len(m.Index))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *ProjectKey) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ProjectKey) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ProjectKey) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Types) > 0 { - dAtA4 := make([]byte, len(m.Types)*10) - var j3 int - for _, num := range m.Types { - for num >= 1<<7 { - dAtA4[j3] = uint8(uint64(num)&0x7f | 0x80) - num >>= 7 - j3++ - } - dAtA4[j3] = uint8(num) - j3++ - } - i -= j3 - copy(dAtA[i:], dAtA4[:j3]) - i = encodeVarintProject(dAtA, i, uint64(j3)) - i-- - dAtA[i] = 0x12 - } - if len(m.Key) > 0 { - i -= len(m.Key) - copy(dAtA[i:], m.Key) - i = encodeVarintProject(dAtA, i, uint64(len(m.Key))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *Policy) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Policy) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Policy) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.MaxProvidersToPair != 0 { - i = encodeVarintProject(dAtA, i, uint64(m.MaxProvidersToPair)) - i-- - dAtA[i] = 0x28 - } - if m.EpochCuLimit != 0 { - i = encodeVarintProject(dAtA, i, uint64(m.EpochCuLimit)) - i-- - dAtA[i] = 0x20 - } - if m.TotalCuLimit != 0 { - i = encodeVarintProject(dAtA, i, uint64(m.TotalCuLimit)) - i-- - dAtA[i] = 0x18 - } - if m.GeolocationProfile != 0 { - i = encodeVarintProject(dAtA, i, uint64(m.GeolocationProfile)) - i-- - dAtA[i] = 0x10 - } - if len(m.ChainPolicies) > 0 { - for iNdEx := len(m.ChainPolicies) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.ChainPolicies[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintProject(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *ChainPolicy) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ChainPolicy) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ChainPolicy) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Apis) > 0 { - for iNdEx := len(m.Apis) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Apis[iNdEx]) - copy(dAtA[i:], m.Apis[iNdEx]) - i = encodeVarintProject(dAtA, i, uint64(len(m.Apis[iNdEx]))) - i-- - dAtA[i] = 0x12 - } - } - if len(m.ChainId) > 0 { - i -= len(m.ChainId) - copy(dAtA[i:], m.ChainId) - i = encodeVarintProject(dAtA, i, uint64(len(m.ChainId))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *ProtoDeveloperData) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ProtoDeveloperData) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ProtoDeveloperData) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.ProjectID) > 0 { - i -= len(m.ProjectID) - copy(dAtA[i:], m.ProjectID) - i = encodeVarintProject(dAtA, i, uint64(len(m.ProjectID))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *ProjectData) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ProjectData) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ProjectData) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Policy != nil { - { - size, err := m.Policy.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintProject(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x2a - } - if len(m.ProjectKeys) > 0 { - for iNdEx := len(m.ProjectKeys) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.ProjectKeys[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintProject(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x22 - } - } - if m.Enabled { - i-- - if m.Enabled { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x18 - } - if len(m.Description) > 0 { - i -= len(m.Description) - copy(dAtA[i:], m.Description) - i = encodeVarintProject(dAtA, i, uint64(len(m.Description))) - i-- - dAtA[i] = 0x12 - } - if len(m.Name) > 0 { - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintProject(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func encodeVarintProject(dAtA []byte, offset int, v uint64) int { - offset -= sovProject(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *Project) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Index) - if l > 0 { - n += 1 + l + sovProject(uint64(l)) - } - l = len(m.Subscription) - if l > 0 { - n += 1 + l + sovProject(uint64(l)) - } - l = len(m.Description) - if l > 0 { - n += 1 + l + sovProject(uint64(l)) - } - if m.Enabled { - n += 2 - } - if len(m.ProjectKeys) > 0 { - for _, e := range m.ProjectKeys { - l = e.Size() - n += 1 + l + sovProject(uint64(l)) - } - } - if m.AdminPolicy != nil { - l = m.AdminPolicy.Size() - n += 1 + l + sovProject(uint64(l)) - } - if m.UsedCu != 0 { - n += 1 + sovProject(uint64(m.UsedCu)) - } - if m.SubscriptionPolicy != nil { - l = m.SubscriptionPolicy.Size() - n += 1 + l + sovProject(uint64(l)) - } - if m.Snapshot != 0 { - n += 1 + sovProject(uint64(m.Snapshot)) - } - return n -} - -func (m *ProjectKey) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Key) - if l > 0 { - n += 1 + l + sovProject(uint64(l)) - } - if len(m.Types) > 0 { - l = 0 - for _, e := range m.Types { - l += sovProject(uint64(e)) - } - n += 1 + sovProject(uint64(l)) + l - } - return n -} - -func (m *Policy) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.ChainPolicies) > 0 { - for _, e := range m.ChainPolicies { - l = e.Size() - n += 1 + l + sovProject(uint64(l)) - } - } - if m.GeolocationProfile != 0 { - n += 1 + sovProject(uint64(m.GeolocationProfile)) - } - if m.TotalCuLimit != 0 { - n += 1 + sovProject(uint64(m.TotalCuLimit)) - } - if m.EpochCuLimit != 0 { - n += 1 + sovProject(uint64(m.EpochCuLimit)) - } - if m.MaxProvidersToPair != 0 { - n += 1 + sovProject(uint64(m.MaxProvidersToPair)) - } - return n -} - -func (m *ChainPolicy) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.ChainId) - if l > 0 { - n += 1 + l + sovProject(uint64(l)) - } - if len(m.Apis) > 0 { - for _, s := range m.Apis { - l = len(s) - n += 1 + l + sovProject(uint64(l)) - } - } - return n -} - -func (m *ProtoDeveloperData) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.ProjectID) - if l > 0 { - n += 1 + l + sovProject(uint64(l)) - } - return n -} - -func (m *ProjectData) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Name) - if l > 0 { - n += 1 + l + sovProject(uint64(l)) - } - l = len(m.Description) - if l > 0 { - n += 1 + l + sovProject(uint64(l)) - } - if m.Enabled { - n += 2 - } - if len(m.ProjectKeys) > 0 { - for _, e := range m.ProjectKeys { - l = e.Size() - n += 1 + l + sovProject(uint64(l)) - } - } - if m.Policy != nil { - l = m.Policy.Size() - n += 1 + l + sovProject(uint64(l)) - } - return n -} - -func sovProject(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozProject(x uint64) (n int) { - return sovProject(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *Project) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProject - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Project: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Project: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Index", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProject - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthProject - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthProject - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Index = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Subscription", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProject - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthProject - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthProject - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Subscription = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Description", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProject - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthProject - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthProject - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Description = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Enabled", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProject - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Enabled = bool(v != 0) - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ProjectKeys", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProject - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthProject - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthProject - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ProjectKeys = append(m.ProjectKeys, ProjectKey{}) - if err := m.ProjectKeys[len(m.ProjectKeys)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AdminPolicy", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProject - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthProject - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthProject - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.AdminPolicy == nil { - m.AdminPolicy = &Policy{} - } - if err := m.AdminPolicy.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 7: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field UsedCu", wireType) - } - m.UsedCu = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProject - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.UsedCu |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 8: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SubscriptionPolicy", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProject - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthProject - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthProject - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.SubscriptionPolicy == nil { - m.SubscriptionPolicy = &Policy{} - } - if err := m.SubscriptionPolicy.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 9: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Snapshot", wireType) - } - m.Snapshot = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProject - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Snapshot |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipProject(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthProject - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ProjectKey) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProject - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ProjectKey: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ProjectKey: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProject - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthProject - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthProject - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Key = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType == 0 { - var v ProjectKey_KEY_TYPE - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProject - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= ProjectKey_KEY_TYPE(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Types = append(m.Types, v) - } else if wireType == 2 { - var packedLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProject - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - packedLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if packedLen < 0 { - return ErrInvalidLengthProject - } - postIndex := iNdEx + packedLen - if postIndex < 0 { - return ErrInvalidLengthProject - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - var elementCount int - if elementCount != 0 && len(m.Types) == 0 { - m.Types = make([]ProjectKey_KEY_TYPE, 0, elementCount) - } - for iNdEx < postIndex { - var v ProjectKey_KEY_TYPE - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProject - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= ProjectKey_KEY_TYPE(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Types = append(m.Types, v) - } - } else { - return fmt.Errorf("proto: wrong wireType = %d for field Types", wireType) - } - default: - iNdEx = preIndex - skippy, err := skipProject(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthProject - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Policy) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProject - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Policy: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Policy: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ChainPolicies", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProject - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthProject - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthProject - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ChainPolicies = append(m.ChainPolicies, ChainPolicy{}) - if err := m.ChainPolicies[len(m.ChainPolicies)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field GeolocationProfile", wireType) - } - m.GeolocationProfile = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProject - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.GeolocationProfile |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TotalCuLimit", wireType) - } - m.TotalCuLimit = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProject - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.TotalCuLimit |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field EpochCuLimit", wireType) - } - m.EpochCuLimit = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProject - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.EpochCuLimit |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field MaxProvidersToPair", wireType) - } - m.MaxProvidersToPair = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProject - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.MaxProvidersToPair |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipProject(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthProject - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ChainPolicy) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProject - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ChainPolicy: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ChainPolicy: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ChainId", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProject - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthProject - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthProject - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ChainId = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Apis", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProject - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthProject - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthProject - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Apis = append(m.Apis, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipProject(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthProject - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ProtoDeveloperData) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProject - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ProtoDeveloperData: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ProtoDeveloperData: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ProjectID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProject - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthProject - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthProject - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ProjectID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipProject(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthProject - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ProjectData) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProject - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ProjectData: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ProjectData: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProject - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthProject - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthProject - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Description", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProject - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthProject - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthProject - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Description = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Enabled", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProject - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Enabled = bool(v != 0) - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ProjectKeys", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProject - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthProject - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthProject - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ProjectKeys = append(m.ProjectKeys, ProjectKey{}) - if err := m.ProjectKeys[len(m.ProjectKeys)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Policy", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProject - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthProject - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthProject - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Policy == nil { - m.Policy = &Policy{} - } - if err := m.Policy.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipProject(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthProject - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipProject(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowProject - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowProject - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowProject - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthProject - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupProject - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthProject - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthProject = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowProject = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupProject = fmt.Errorf("proto: unexpected end of group") -) diff --git a/x/projects/migrations/v4/project.pb.go b/x/projects/migrations/v4/project.pb.go deleted file mode 100644 index f1c0d4cdba..0000000000 --- a/x/projects/migrations/v4/project.pb.go +++ /dev/null @@ -1,2375 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: projects/project.proto - -package types - -import ( - fmt "fmt" - _ "github.com/gogo/protobuf/gogoproto" - proto "github.com/gogo/protobuf/proto" - io "io" - math "math" - math_bits "math/bits" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -type ProjectKey_KEY_TYPE int32 - -const ( - ProjectKey_NONE ProjectKey_KEY_TYPE = 0 - ProjectKey_ADMIN ProjectKey_KEY_TYPE = 1 - ProjectKey_DEVELOPER ProjectKey_KEY_TYPE = 2 -) - -var ProjectKey_KEY_TYPE_name = map[int32]string{ - 0: "NONE", - 1: "ADMIN", - 2: "DEVELOPER", -} - -var ProjectKey_KEY_TYPE_value = map[string]int32{ - "NONE": 0, - "ADMIN": 1, - "DEVELOPER": 2, -} - -func (x ProjectKey_KEY_TYPE) String() string { - return proto.EnumName(ProjectKey_KEY_TYPE_name, int32(x)) -} - -func (ProjectKey_KEY_TYPE) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_9f89a31663a330ce, []int{1, 0} -} - -type Project struct { - Index string `protobuf:"bytes,1,opt,name=index,proto3" json:"index,omitempty"` - Subscription string `protobuf:"bytes,2,opt,name=subscription,proto3" json:"subscription,omitempty"` - Description string `protobuf:"bytes,3,opt,name=description,proto3" json:"description,omitempty"` - Enabled bool `protobuf:"varint,4,opt,name=enabled,proto3" json:"enabled,omitempty"` - ProjectKeys []ProjectKey `protobuf:"bytes,5,rep,name=project_keys,json=projectKeys,proto3" json:"project_keys"` - AdminPolicy *Policy `protobuf:"bytes,6,opt,name=admin_policy,json=adminPolicy,proto3" json:"admin_policy,omitempty"` - UsedCu uint64 `protobuf:"varint,7,opt,name=used_cu,json=usedCu,proto3" json:"used_cu,omitempty"` - SubscriptionPolicy *Policy `protobuf:"bytes,8,opt,name=subscription_policy,json=subscriptionPolicy,proto3" json:"subscription_policy,omitempty"` - Snapshot uint64 `protobuf:"varint,9,opt,name=snapshot,proto3" json:"snapshot,omitempty"` -} - -func (m *Project) Reset() { *m = Project{} } -func (m *Project) String() string { return proto.CompactTextString(m) } -func (*Project) ProtoMessage() {} -func (*Project) Descriptor() ([]byte, []int) { - return fileDescriptor_9f89a31663a330ce, []int{0} -} -func (m *Project) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Project) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Project.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Project) XXX_Merge(src proto.Message) { - xxx_messageInfo_Project.Merge(m, src) -} -func (m *Project) XXX_Size() int { - return m.Size() -} -func (m *Project) XXX_DiscardUnknown() { - xxx_messageInfo_Project.DiscardUnknown(m) -} - -var xxx_messageInfo_Project proto.InternalMessageInfo - -func (m *Project) GetIndex() string { - if m != nil { - return m.Index - } - return "" -} - -func (m *Project) GetSubscription() string { - if m != nil { - return m.Subscription - } - return "" -} - -func (m *Project) GetDescription() string { - if m != nil { - return m.Description - } - return "" -} - -func (m *Project) GetEnabled() bool { - if m != nil { - return m.Enabled - } - return false -} - -func (m *Project) GetProjectKeys() []ProjectKey { - if m != nil { - return m.ProjectKeys - } - return nil -} - -func (m *Project) GetAdminPolicy() *Policy { - if m != nil { - return m.AdminPolicy - } - return nil -} - -func (m *Project) GetUsedCu() uint64 { - if m != nil { - return m.UsedCu - } - return 0 -} - -func (m *Project) GetSubscriptionPolicy() *Policy { - if m != nil { - return m.SubscriptionPolicy - } - return nil -} - -func (m *Project) GetSnapshot() uint64 { - if m != nil { - return m.Snapshot - } - return 0 -} - -type ProjectKey struct { - Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` - Types []ProjectKey_KEY_TYPE `protobuf:"varint,2,rep,packed,name=types,proto3,enum=lavanet.lava.projects.ProjectKey_KEY_TYPE" json:"types,omitempty"` -} - -func (m *ProjectKey) Reset() { *m = ProjectKey{} } -func (m *ProjectKey) String() string { return proto.CompactTextString(m) } -func (*ProjectKey) ProtoMessage() {} -func (*ProjectKey) Descriptor() ([]byte, []int) { - return fileDescriptor_9f89a31663a330ce, []int{1} -} -func (m *ProjectKey) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ProjectKey) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ProjectKey.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *ProjectKey) XXX_Merge(src proto.Message) { - xxx_messageInfo_ProjectKey.Merge(m, src) -} -func (m *ProjectKey) XXX_Size() int { - return m.Size() -} -func (m *ProjectKey) XXX_DiscardUnknown() { - xxx_messageInfo_ProjectKey.DiscardUnknown(m) -} - -var xxx_messageInfo_ProjectKey proto.InternalMessageInfo - -func (m *ProjectKey) GetKey() string { - if m != nil { - return m.Key - } - return "" -} - -func (m *ProjectKey) GetTypes() []ProjectKey_KEY_TYPE { - if m != nil { - return m.Types - } - return nil -} - -// protobuf expected in YAML format: used "moretags" to simplify parsing -type Policy struct { - ChainPolicies []ChainPolicy `protobuf:"bytes,1,rep,name=chain_policies,json=chainPolicies,proto3" json:"chain_policies" mapstructure:"chain_policies"` - GeolocationProfile uint64 `protobuf:"varint,2,opt,name=geolocation_profile,json=geolocationProfile,proto3" json:"geolocation_profile" mapstructure:"geolocation_profile"` - TotalCuLimit uint64 `protobuf:"varint,3,opt,name=total_cu_limit,json=totalCuLimit,proto3" json:"total_cu_limit" mapstructure:"total_cu_limit"` - EpochCuLimit uint64 `protobuf:"varint,4,opt,name=epoch_cu_limit,json=epochCuLimit,proto3" json:"epoch_cu_limit" mapstructure:"epoch_cu_limit"` - MaxProvidersToPair uint64 `protobuf:"varint,5,opt,name=max_providers_to_pair,json=maxProvidersToPair,proto3" json:"max_providers_to_pair" mapstructure:"max_providers_to_pair"` -} - -func (m *Policy) Reset() { *m = Policy{} } -func (m *Policy) String() string { return proto.CompactTextString(m) } -func (*Policy) ProtoMessage() {} -func (*Policy) Descriptor() ([]byte, []int) { - return fileDescriptor_9f89a31663a330ce, []int{2} -} -func (m *Policy) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Policy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Policy.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Policy) XXX_Merge(src proto.Message) { - xxx_messageInfo_Policy.Merge(m, src) -} -func (m *Policy) XXX_Size() int { - return m.Size() -} -func (m *Policy) XXX_DiscardUnknown() { - xxx_messageInfo_Policy.DiscardUnknown(m) -} - -var xxx_messageInfo_Policy proto.InternalMessageInfo - -func (m *Policy) GetChainPolicies() []ChainPolicy { - if m != nil { - return m.ChainPolicies - } - return nil -} - -func (m *Policy) GetGeolocationProfile() uint64 { - if m != nil { - return m.GeolocationProfile - } - return 0 -} - -func (m *Policy) GetTotalCuLimit() uint64 { - if m != nil { - return m.TotalCuLimit - } - return 0 -} - -func (m *Policy) GetEpochCuLimit() uint64 { - if m != nil { - return m.EpochCuLimit - } - return 0 -} - -func (m *Policy) GetMaxProvidersToPair() uint64 { - if m != nil { - return m.MaxProvidersToPair - } - return 0 -} - -type ChainPolicy struct { - ChainId string `protobuf:"bytes,1,opt,name=chain_id,json=chainId,proto3" json:"chain_id,omitempty" mapstructure:"chain_id"` - Apis []string `protobuf:"bytes,2,rep,name=apis,proto3" json:"apis,omitempty" mapstructure:"apis"` -} - -func (m *ChainPolicy) Reset() { *m = ChainPolicy{} } -func (m *ChainPolicy) String() string { return proto.CompactTextString(m) } -func (*ChainPolicy) ProtoMessage() {} -func (*ChainPolicy) Descriptor() ([]byte, []int) { - return fileDescriptor_9f89a31663a330ce, []int{3} -} -func (m *ChainPolicy) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ChainPolicy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ChainPolicy.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *ChainPolicy) XXX_Merge(src proto.Message) { - xxx_messageInfo_ChainPolicy.Merge(m, src) -} -func (m *ChainPolicy) XXX_Size() int { - return m.Size() -} -func (m *ChainPolicy) XXX_DiscardUnknown() { - xxx_messageInfo_ChainPolicy.DiscardUnknown(m) -} - -var xxx_messageInfo_ChainPolicy proto.InternalMessageInfo - -func (m *ChainPolicy) GetChainId() string { - if m != nil { - return m.ChainId - } - return "" -} - -func (m *ChainPolicy) GetApis() []string { - if m != nil { - return m.Apis - } - return nil -} - -type ProtoDeveloperData struct { - ProjectID string `protobuf:"bytes,1,opt,name=projectID,proto3" json:"projectID,omitempty"` -} - -func (m *ProtoDeveloperData) Reset() { *m = ProtoDeveloperData{} } -func (m *ProtoDeveloperData) String() string { return proto.CompactTextString(m) } -func (*ProtoDeveloperData) ProtoMessage() {} -func (*ProtoDeveloperData) Descriptor() ([]byte, []int) { - return fileDescriptor_9f89a31663a330ce, []int{4} -} -func (m *ProtoDeveloperData) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ProtoDeveloperData) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ProtoDeveloperData.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *ProtoDeveloperData) XXX_Merge(src proto.Message) { - xxx_messageInfo_ProtoDeveloperData.Merge(m, src) -} -func (m *ProtoDeveloperData) XXX_Size() int { - return m.Size() -} -func (m *ProtoDeveloperData) XXX_DiscardUnknown() { - xxx_messageInfo_ProtoDeveloperData.DiscardUnknown(m) -} - -var xxx_messageInfo_ProtoDeveloperData proto.InternalMessageInfo - -func (m *ProtoDeveloperData) GetProjectID() string { - if m != nil { - return m.ProjectID - } - return "" -} - -// used as a container struct for the subscription module -type ProjectData struct { - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"` - Enabled bool `protobuf:"varint,3,opt,name=enabled,proto3" json:"enabled,omitempty"` - ProjectKeys []ProjectKey `protobuf:"bytes,4,rep,name=projectKeys,proto3" json:"projectKeys"` - Policy *Policy `protobuf:"bytes,5,opt,name=policy,proto3" json:"policy,omitempty"` -} - -func (m *ProjectData) Reset() { *m = ProjectData{} } -func (m *ProjectData) String() string { return proto.CompactTextString(m) } -func (*ProjectData) ProtoMessage() {} -func (*ProjectData) Descriptor() ([]byte, []int) { - return fileDescriptor_9f89a31663a330ce, []int{5} -} -func (m *ProjectData) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ProjectData) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ProjectData.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *ProjectData) XXX_Merge(src proto.Message) { - xxx_messageInfo_ProjectData.Merge(m, src) -} -func (m *ProjectData) XXX_Size() int { - return m.Size() -} -func (m *ProjectData) XXX_DiscardUnknown() { - xxx_messageInfo_ProjectData.DiscardUnknown(m) -} - -var xxx_messageInfo_ProjectData proto.InternalMessageInfo - -func (m *ProjectData) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -func (m *ProjectData) GetDescription() string { - if m != nil { - return m.Description - } - return "" -} - -func (m *ProjectData) GetEnabled() bool { - if m != nil { - return m.Enabled - } - return false -} - -func (m *ProjectData) GetProjectKeys() []ProjectKey { - if m != nil { - return m.ProjectKeys - } - return nil -} - -func (m *ProjectData) GetPolicy() *Policy { - if m != nil { - return m.Policy - } - return nil -} - -func init() { - proto.RegisterEnum("lavanet.lava.projects.ProjectKey_KEY_TYPE_V4", ProjectKey_KEY_TYPE_name, ProjectKey_KEY_TYPE_value) - proto.RegisterType((*Project)(nil), "lavanet.lava.projects.Project_V4") - proto.RegisterType((*ProjectKey)(nil), "lavanet.lava.projects.ProjectKey_V4") - proto.RegisterType((*Policy)(nil), "lavanet.lava.projects.Policy_V4") - proto.RegisterType((*ChainPolicy)(nil), "lavanet.lava.projects.ChainPolicy_V4") - proto.RegisterType((*ProtoDeveloperData)(nil), "lavanet.lava.projects.ProtoDeveloperData_V4") - proto.RegisterType((*ProjectData)(nil), "lavanet.lava.projects.ProjectData_V4") -} - -func init() { proto.RegisterFile("projects/project.proto", fileDescriptor_9f89a31663a330ce) } - -var fileDescriptor_9f89a31663a330ce = []byte{ - // 777 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x55, 0x4f, 0xab, 0xdb, 0x46, - 0x10, 0xb7, 0x2c, 0xd9, 0x96, 0xc7, 0xce, 0xc3, 0xec, 0x4b, 0x1a, 0x91, 0x34, 0x96, 0xbb, 0xb4, - 0x60, 0x7a, 0x90, 0xe1, 0x85, 0x96, 0x52, 0x28, 0x34, 0x7e, 0x76, 0xc0, 0x2f, 0xa9, 0x63, 0x44, - 0x28, 0xbc, 0x5e, 0xc4, 0x5a, 0xda, 0xda, 0xdb, 0xc8, 0x5a, 0xa1, 0x3f, 0xc6, 0xfe, 0x16, 0xa5, - 0x87, 0x9e, 0xfa, 0x01, 0xfa, 0x51, 0x72, 0xcc, 0xb1, 0x27, 0xb5, 0xf8, 0xdd, 0xde, 0xd1, 0x9f, - 0xa0, 0x68, 0x25, 0xff, 0x51, 0xe2, 0xb4, 0x81, 0x9c, 0x34, 0xf3, 0x9b, 0xdf, 0xcc, 0xec, 0xec, - 0x8c, 0x66, 0xe1, 0x13, 0x3f, 0xe0, 0xbf, 0x50, 0x3b, 0x0a, 0x7b, 0xb9, 0x60, 0xf8, 0x01, 0x8f, - 0x38, 0xba, 0xe7, 0x92, 0x25, 0xf1, 0x68, 0x64, 0xa4, 0x5f, 0x63, 0x47, 0x7a, 0x70, 0x77, 0xc6, - 0x67, 0x5c, 0x30, 0x7a, 0xa9, 0x94, 0x91, 0xf1, 0x6f, 0x32, 0xd4, 0x26, 0x19, 0x05, 0xdd, 0x85, - 0x0a, 0xf3, 0x1c, 0xba, 0xd2, 0xa4, 0x8e, 0xd4, 0xad, 0x9b, 0x99, 0x82, 0x30, 0x34, 0xc3, 0x78, - 0x1a, 0xda, 0x01, 0xf3, 0x23, 0xc6, 0x3d, 0xad, 0x2c, 0x8c, 0x05, 0x0c, 0x75, 0xa0, 0xe1, 0xd0, - 0x03, 0x45, 0x16, 0x94, 0x63, 0x08, 0x69, 0x50, 0xa3, 0x1e, 0x99, 0xba, 0xd4, 0xd1, 0x94, 0x8e, - 0xd4, 0x55, 0xcd, 0x9d, 0x8a, 0xae, 0xa0, 0x99, 0x9f, 0xd1, 0x7a, 0x45, 0xd7, 0xa1, 0x56, 0xe9, - 0xc8, 0xdd, 0xc6, 0xc5, 0x67, 0xc6, 0xc9, 0x2a, 0x8c, 0xfc, 0xac, 0xcf, 0xe8, 0xba, 0xaf, 0xbc, - 0x4e, 0xf4, 0x92, 0xd9, 0xf0, 0xf7, 0x48, 0x88, 0xbe, 0x87, 0x26, 0x71, 0x16, 0xcc, 0xb3, 0x7c, - 0xee, 0x32, 0x7b, 0xad, 0x55, 0x3b, 0x52, 0xb7, 0x71, 0xf1, 0xe8, 0x7d, 0xb1, 0x04, 0xc9, 0x6c, - 0x08, 0x97, 0x4c, 0x41, 0xf7, 0xa1, 0x16, 0x87, 0xd4, 0xb1, 0xec, 0x58, 0xab, 0x75, 0xa4, 0xae, - 0x62, 0x56, 0x53, 0xf5, 0x32, 0x46, 0x63, 0x38, 0x3f, 0x2e, 0x79, 0x97, 0x41, 0xfd, 0x90, 0x0c, - 0xe8, 0xd8, 0x33, 0x4f, 0xf4, 0x00, 0xd4, 0xd0, 0x23, 0x7e, 0x38, 0xe7, 0x91, 0x56, 0x17, 0x99, - 0xf6, 0x3a, 0xfe, 0x43, 0x02, 0x38, 0x14, 0x8a, 0x5a, 0x20, 0xbf, 0xa2, 0xeb, 0xbc, 0x2b, 0xa9, - 0x88, 0x9e, 0x42, 0x25, 0x5a, 0xfb, 0x34, 0xd4, 0xca, 0x1d, 0xb9, 0x7b, 0x76, 0xf1, 0xe5, 0xff, - 0x5e, 0x96, 0xf1, 0x6c, 0x78, 0x6d, 0xbd, 0xbc, 0x9e, 0x0c, 0xf3, 0x5b, 0xcb, 0xdc, 0xb1, 0x01, - 0xea, 0xce, 0x80, 0x54, 0x50, 0xc6, 0x2f, 0xc6, 0xc3, 0x56, 0x09, 0xd5, 0xa1, 0xf2, 0x64, 0xf0, - 0xc3, 0x68, 0xdc, 0x92, 0xd0, 0x1d, 0xa8, 0x0f, 0x86, 0x3f, 0x0e, 0x9f, 0xbf, 0x98, 0x0c, 0xcd, - 0x56, 0xf9, 0x4a, 0x51, 0xe5, 0x96, 0x82, 0x7f, 0x57, 0xa0, 0x9a, 0x57, 0xe1, 0xc3, 0x99, 0x3d, - 0x27, 0xbb, 0x0b, 0x67, 0x34, 0xd4, 0x24, 0xd1, 0x3e, 0xfc, 0x9e, 0x13, 0x5d, 0xa6, 0xe4, 0xcc, - 0xb7, 0xff, 0x45, 0x7a, 0x92, 0x6d, 0xa2, 0x3f, 0x5a, 0x10, 0x3f, 0x8c, 0x82, 0xd8, 0x8e, 0xe2, - 0x80, 0x7e, 0x8b, 0x8b, 0xf1, 0xb0, 0x79, 0xc7, 0xde, 0xfb, 0x30, 0x1a, 0x22, 0x0f, 0xce, 0x67, - 0x94, 0xbb, 0xdc, 0x26, 0x59, 0x1b, 0x02, 0xfe, 0x33, 0x73, 0xa9, 0x98, 0x4a, 0xa5, 0xff, 0xdd, - 0x6d, 0xa2, 0x9f, 0x32, 0x6f, 0x13, 0x1d, 0x17, 0xb3, 0x9c, 0x20, 0x61, 0x13, 0x1d, 0xa1, 0x93, - 0x0c, 0x44, 0xd7, 0x70, 0x16, 0xf1, 0x88, 0xb8, 0x96, 0x1d, 0x5b, 0x2e, 0x5b, 0xb0, 0x48, 0x4c, - 0xb7, 0xd2, 0x7f, 0x7c, 0x9b, 0xe8, 0x6f, 0x59, 0xde, 0xad, 0xa5, 0x68, 0xc7, 0x66, 0x53, 0x00, - 0x97, 0xf1, 0xf3, 0x54, 0x4d, 0x43, 0x53, 0x9f, 0xdb, 0xf3, 0x43, 0x68, 0xe5, 0x10, 0xba, 0x68, - 0x79, 0x37, 0x74, 0xd1, 0x8e, 0xcd, 0xa6, 0x00, 0x76, 0xa1, 0x23, 0xb8, 0xb7, 0x20, 0xab, 0xb4, - 0xb2, 0x25, 0x73, 0x68, 0x10, 0x5a, 0x11, 0xb7, 0x7c, 0xc2, 0x02, 0xad, 0x22, 0x32, 0x3c, 0xb9, - 0x4d, 0xf4, 0xd3, 0x84, 0x6d, 0xa2, 0x7f, 0x5e, 0x4c, 0x74, 0x92, 0x86, 0x4d, 0xb4, 0x20, 0xab, - 0xc9, 0x0e, 0x7e, 0xc9, 0x27, 0x29, 0xb8, 0x84, 0xc6, 0x51, 0x83, 0xd1, 0xd7, 0xa0, 0x66, 0xcd, - 0x64, 0x4e, 0x36, 0xbc, 0xfd, 0x87, 0xdb, 0x44, 0xbf, 0x7f, 0xaa, 0xdd, 0xcc, 0xc1, 0x66, 0x4d, - 0x88, 0x23, 0x07, 0xf5, 0x40, 0x21, 0x3e, 0xcb, 0x86, 0xbb, 0xde, 0x7f, 0x98, 0x8f, 0xc9, 0x79, - 0xd1, 0x2f, 0x65, 0x60, 0x53, 0x10, 0xf1, 0x37, 0x80, 0x26, 0xe9, 0x36, 0x1b, 0xd0, 0x25, 0x75, - 0xb9, 0x4f, 0x83, 0x01, 0x89, 0x08, 0xfa, 0x14, 0xea, 0xf9, 0xdc, 0x8d, 0x06, 0xf9, 0xcf, 0x73, - 0x00, 0xae, 0x14, 0xb5, 0xdc, 0x92, 0xf1, 0xdf, 0x12, 0x34, 0xf2, 0xbf, 0x44, 0xf8, 0x20, 0x50, - 0x3c, 0xb2, 0xa0, 0x39, 0x5d, 0xc8, 0x6f, 0x2f, 0xb7, 0xf2, 0x7f, 0x2e, 0x37, 0xb9, 0xb8, 0xdc, - 0x46, 0x70, 0xbc, 0x9f, 0x34, 0xe5, 0x23, 0x76, 0xdb, 0x57, 0x50, 0xcd, 0x77, 0x4e, 0xe5, 0x43, - 0x76, 0x4e, 0x4e, 0xee, 0x3f, 0xfd, 0x73, 0xd3, 0x96, 0x5e, 0x6f, 0xda, 0xd2, 0x9b, 0x4d, 0x5b, - 0xfa, 0x67, 0xd3, 0x96, 0x7e, 0xbd, 0x69, 0x97, 0xde, 0xdc, 0xb4, 0x4b, 0x7f, 0xdd, 0xb4, 0x4b, - 0x3f, 0x75, 0x67, 0x2c, 0x9a, 0xc7, 0x53, 0xc3, 0xe6, 0x8b, 0x5e, 0x1e, 0x4e, 0x7c, 0x7b, 0xab, - 0xde, 0xfe, 0x75, 0x11, 0xab, 0x62, 0x5a, 0x15, 0xef, 0xc5, 0xe3, 0x7f, 0x03, 0x00, 0x00, 0xff, - 0xff, 0xc4, 0x11, 0xe0, 0xc1, 0x76, 0x06, 0x00, 0x00, -} - -func (this *Project) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*Project) - if !ok { - that2, ok := that.(Project) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if this.Index != that1.Index { - return false - } - if this.Subscription != that1.Subscription { - return false - } - if this.Description != that1.Description { - return false - } - if this.Enabled != that1.Enabled { - return false - } - if len(this.ProjectKeys) != len(that1.ProjectKeys) { - return false - } - for i := range this.ProjectKeys { - if !this.ProjectKeys[i].Equal(&that1.ProjectKeys[i]) { - return false - } - } - if !this.AdminPolicy.Equal(that1.AdminPolicy) { - return false - } - if this.UsedCu != that1.UsedCu { - return false - } - if !this.SubscriptionPolicy.Equal(that1.SubscriptionPolicy) { - return false - } - if this.Snapshot != that1.Snapshot { - return false - } - return true -} -func (this *ProjectKey) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*ProjectKey) - if !ok { - that2, ok := that.(ProjectKey) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if this.Key != that1.Key { - return false - } - if len(this.Types) != len(that1.Types) { - return false - } - for i := range this.Types { - if this.Types[i] != that1.Types[i] { - return false - } - } - return true -} -func (this *Policy) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*Policy) - if !ok { - that2, ok := that.(Policy) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if len(this.ChainPolicies) != len(that1.ChainPolicies) { - return false - } - for i := range this.ChainPolicies { - if !this.ChainPolicies[i].Equal(&that1.ChainPolicies[i]) { - return false - } - } - if this.GeolocationProfile != that1.GeolocationProfile { - return false - } - if this.TotalCuLimit != that1.TotalCuLimit { - return false - } - if this.EpochCuLimit != that1.EpochCuLimit { - return false - } - if this.MaxProvidersToPair != that1.MaxProvidersToPair { - return false - } - return true -} -func (this *ChainPolicy) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*ChainPolicy) - if !ok { - that2, ok := that.(ChainPolicy) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if this.ChainId != that1.ChainId { - return false - } - if len(this.Apis) != len(that1.Apis) { - return false - } - for i := range this.Apis { - if this.Apis[i] != that1.Apis[i] { - return false - } - } - return true -} -func (this *ProtoDeveloperData) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*ProtoDeveloperData) - if !ok { - that2, ok := that.(ProtoDeveloperData) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if this.ProjectID != that1.ProjectID { - return false - } - return true -} -func (this *ProjectData) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*ProjectData) - if !ok { - that2, ok := that.(ProjectData) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if this.Name != that1.Name { - return false - } - if this.Description != that1.Description { - return false - } - if this.Enabled != that1.Enabled { - return false - } - if len(this.ProjectKeys) != len(that1.ProjectKeys) { - return false - } - for i := range this.ProjectKeys { - if !this.ProjectKeys[i].Equal(&that1.ProjectKeys[i]) { - return false - } - } - if !this.Policy.Equal(that1.Policy) { - return false - } - return true -} -func (m *Project) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Project) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Project) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Snapshot != 0 { - i = encodeVarintProject(dAtA, i, uint64(m.Snapshot)) - i-- - dAtA[i] = 0x48 - } - if m.SubscriptionPolicy != nil { - { - size, err := m.SubscriptionPolicy.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintProject(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x42 - } - if m.UsedCu != 0 { - i = encodeVarintProject(dAtA, i, uint64(m.UsedCu)) - i-- - dAtA[i] = 0x38 - } - if m.AdminPolicy != nil { - { - size, err := m.AdminPolicy.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintProject(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x32 - } - if len(m.ProjectKeys) > 0 { - for iNdEx := len(m.ProjectKeys) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.ProjectKeys[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintProject(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x2a - } - } - if m.Enabled { - i-- - if m.Enabled { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x20 - } - if len(m.Description) > 0 { - i -= len(m.Description) - copy(dAtA[i:], m.Description) - i = encodeVarintProject(dAtA, i, uint64(len(m.Description))) - i-- - dAtA[i] = 0x1a - } - if len(m.Subscription) > 0 { - i -= len(m.Subscription) - copy(dAtA[i:], m.Subscription) - i = encodeVarintProject(dAtA, i, uint64(len(m.Subscription))) - i-- - dAtA[i] = 0x12 - } - if len(m.Index) > 0 { - i -= len(m.Index) - copy(dAtA[i:], m.Index) - i = encodeVarintProject(dAtA, i, uint64(len(m.Index))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *ProjectKey) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ProjectKey) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ProjectKey) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Types) > 0 { - dAtA4 := make([]byte, len(m.Types)*10) - var j3 int - for _, num := range m.Types { - for num >= 1<<7 { - dAtA4[j3] = uint8(uint64(num)&0x7f | 0x80) - num >>= 7 - j3++ - } - dAtA4[j3] = uint8(num) - j3++ - } - i -= j3 - copy(dAtA[i:], dAtA4[:j3]) - i = encodeVarintProject(dAtA, i, uint64(j3)) - i-- - dAtA[i] = 0x12 - } - if len(m.Key) > 0 { - i -= len(m.Key) - copy(dAtA[i:], m.Key) - i = encodeVarintProject(dAtA, i, uint64(len(m.Key))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *Policy) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Policy) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Policy) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.MaxProvidersToPair != 0 { - i = encodeVarintProject(dAtA, i, uint64(m.MaxProvidersToPair)) - i-- - dAtA[i] = 0x28 - } - if m.EpochCuLimit != 0 { - i = encodeVarintProject(dAtA, i, uint64(m.EpochCuLimit)) - i-- - dAtA[i] = 0x20 - } - if m.TotalCuLimit != 0 { - i = encodeVarintProject(dAtA, i, uint64(m.TotalCuLimit)) - i-- - dAtA[i] = 0x18 - } - if m.GeolocationProfile != 0 { - i = encodeVarintProject(dAtA, i, uint64(m.GeolocationProfile)) - i-- - dAtA[i] = 0x10 - } - if len(m.ChainPolicies) > 0 { - for iNdEx := len(m.ChainPolicies) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.ChainPolicies[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintProject(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *ChainPolicy) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ChainPolicy) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ChainPolicy) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Apis) > 0 { - for iNdEx := len(m.Apis) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Apis[iNdEx]) - copy(dAtA[i:], m.Apis[iNdEx]) - i = encodeVarintProject(dAtA, i, uint64(len(m.Apis[iNdEx]))) - i-- - dAtA[i] = 0x12 - } - } - if len(m.ChainId) > 0 { - i -= len(m.ChainId) - copy(dAtA[i:], m.ChainId) - i = encodeVarintProject(dAtA, i, uint64(len(m.ChainId))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *ProtoDeveloperData) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ProtoDeveloperData) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ProtoDeveloperData) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.ProjectID) > 0 { - i -= len(m.ProjectID) - copy(dAtA[i:], m.ProjectID) - i = encodeVarintProject(dAtA, i, uint64(len(m.ProjectID))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *ProjectData) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ProjectData) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ProjectData) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Policy != nil { - { - size, err := m.Policy.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintProject(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x2a - } - if len(m.ProjectKeys) > 0 { - for iNdEx := len(m.ProjectKeys) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.ProjectKeys[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintProject(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x22 - } - } - if m.Enabled { - i-- - if m.Enabled { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x18 - } - if len(m.Description) > 0 { - i -= len(m.Description) - copy(dAtA[i:], m.Description) - i = encodeVarintProject(dAtA, i, uint64(len(m.Description))) - i-- - dAtA[i] = 0x12 - } - if len(m.Name) > 0 { - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintProject(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func encodeVarintProject(dAtA []byte, offset int, v uint64) int { - offset -= sovProject(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *Project) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Index) - if l > 0 { - n += 1 + l + sovProject(uint64(l)) - } - l = len(m.Subscription) - if l > 0 { - n += 1 + l + sovProject(uint64(l)) - } - l = len(m.Description) - if l > 0 { - n += 1 + l + sovProject(uint64(l)) - } - if m.Enabled { - n += 2 - } - if len(m.ProjectKeys) > 0 { - for _, e := range m.ProjectKeys { - l = e.Size() - n += 1 + l + sovProject(uint64(l)) - } - } - if m.AdminPolicy != nil { - l = m.AdminPolicy.Size() - n += 1 + l + sovProject(uint64(l)) - } - if m.UsedCu != 0 { - n += 1 + sovProject(uint64(m.UsedCu)) - } - if m.SubscriptionPolicy != nil { - l = m.SubscriptionPolicy.Size() - n += 1 + l + sovProject(uint64(l)) - } - if m.Snapshot != 0 { - n += 1 + sovProject(uint64(m.Snapshot)) - } - return n -} - -func (m *ProjectKey) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Key) - if l > 0 { - n += 1 + l + sovProject(uint64(l)) - } - if len(m.Types) > 0 { - l = 0 - for _, e := range m.Types { - l += sovProject(uint64(e)) - } - n += 1 + sovProject(uint64(l)) + l - } - return n -} - -func (m *Policy) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.ChainPolicies) > 0 { - for _, e := range m.ChainPolicies { - l = e.Size() - n += 1 + l + sovProject(uint64(l)) - } - } - if m.GeolocationProfile != 0 { - n += 1 + sovProject(uint64(m.GeolocationProfile)) - } - if m.TotalCuLimit != 0 { - n += 1 + sovProject(uint64(m.TotalCuLimit)) - } - if m.EpochCuLimit != 0 { - n += 1 + sovProject(uint64(m.EpochCuLimit)) - } - if m.MaxProvidersToPair != 0 { - n += 1 + sovProject(uint64(m.MaxProvidersToPair)) - } - return n -} - -func (m *ChainPolicy) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.ChainId) - if l > 0 { - n += 1 + l + sovProject(uint64(l)) - } - if len(m.Apis) > 0 { - for _, s := range m.Apis { - l = len(s) - n += 1 + l + sovProject(uint64(l)) - } - } - return n -} - -func (m *ProtoDeveloperData) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.ProjectID) - if l > 0 { - n += 1 + l + sovProject(uint64(l)) - } - return n -} - -func (m *ProjectData) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Name) - if l > 0 { - n += 1 + l + sovProject(uint64(l)) - } - l = len(m.Description) - if l > 0 { - n += 1 + l + sovProject(uint64(l)) - } - if m.Enabled { - n += 2 - } - if len(m.ProjectKeys) > 0 { - for _, e := range m.ProjectKeys { - l = e.Size() - n += 1 + l + sovProject(uint64(l)) - } - } - if m.Policy != nil { - l = m.Policy.Size() - n += 1 + l + sovProject(uint64(l)) - } - return n -} - -func sovProject(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozProject(x uint64) (n int) { - return sovProject(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *Project) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProject - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Project: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Project: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Index", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProject - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthProject - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthProject - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Index = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Subscription", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProject - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthProject - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthProject - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Subscription = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Description", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProject - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthProject - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthProject - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Description = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Enabled", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProject - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Enabled = bool(v != 0) - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ProjectKeys", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProject - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthProject - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthProject - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ProjectKeys = append(m.ProjectKeys, ProjectKey{}) - if err := m.ProjectKeys[len(m.ProjectKeys)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AdminPolicy", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProject - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthProject - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthProject - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.AdminPolicy == nil { - m.AdminPolicy = &Policy{} - } - if err := m.AdminPolicy.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 7: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field UsedCu", wireType) - } - m.UsedCu = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProject - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.UsedCu |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 8: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SubscriptionPolicy", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProject - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthProject - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthProject - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.SubscriptionPolicy == nil { - m.SubscriptionPolicy = &Policy{} - } - if err := m.SubscriptionPolicy.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 9: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Snapshot", wireType) - } - m.Snapshot = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProject - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Snapshot |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipProject(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthProject - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ProjectKey) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProject - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ProjectKey: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ProjectKey: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProject - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthProject - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthProject - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Key = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType == 0 { - var v ProjectKey_KEY_TYPE - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProject - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= ProjectKey_KEY_TYPE(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Types = append(m.Types, v) - } else if wireType == 2 { - var packedLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProject - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - packedLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if packedLen < 0 { - return ErrInvalidLengthProject - } - postIndex := iNdEx + packedLen - if postIndex < 0 { - return ErrInvalidLengthProject - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - var elementCount int - if elementCount != 0 && len(m.Types) == 0 { - m.Types = make([]ProjectKey_KEY_TYPE, 0, elementCount) - } - for iNdEx < postIndex { - var v ProjectKey_KEY_TYPE - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProject - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= ProjectKey_KEY_TYPE(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Types = append(m.Types, v) - } - } else { - return fmt.Errorf("proto: wrong wireType = %d for field Types", wireType) - } - default: - iNdEx = preIndex - skippy, err := skipProject(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthProject - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Policy) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProject - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Policy: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Policy: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ChainPolicies", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProject - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthProject - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthProject - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ChainPolicies = append(m.ChainPolicies, ChainPolicy{}) - if err := m.ChainPolicies[len(m.ChainPolicies)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field GeolocationProfile", wireType) - } - m.GeolocationProfile = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProject - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.GeolocationProfile |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TotalCuLimit", wireType) - } - m.TotalCuLimit = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProject - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.TotalCuLimit |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field EpochCuLimit", wireType) - } - m.EpochCuLimit = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProject - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.EpochCuLimit |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field MaxProvidersToPair", wireType) - } - m.MaxProvidersToPair = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProject - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.MaxProvidersToPair |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipProject(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthProject - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ChainPolicy) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProject - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ChainPolicy: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ChainPolicy: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ChainId", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProject - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthProject - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthProject - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ChainId = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Apis", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProject - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthProject - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthProject - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Apis = append(m.Apis, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipProject(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthProject - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ProtoDeveloperData) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProject - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ProtoDeveloperData: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ProtoDeveloperData: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ProjectID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProject - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthProject - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthProject - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ProjectID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipProject(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthProject - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ProjectData) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProject - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ProjectData: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ProjectData: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProject - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthProject - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthProject - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Description", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProject - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthProject - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthProject - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Description = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Enabled", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProject - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Enabled = bool(v != 0) - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ProjectKeys", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProject - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthProject - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthProject - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ProjectKeys = append(m.ProjectKeys, ProjectKey{}) - if err := m.ProjectKeys[len(m.ProjectKeys)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Policy", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProject - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthProject - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthProject - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Policy == nil { - m.Policy = &Policy{} - } - if err := m.Policy.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipProject(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthProject - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipProject(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowProject - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowProject - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowProject - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthProject - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupProject - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthProject - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthProject = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowProject = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupProject = fmt.Errorf("proto: unexpected end of group") -) diff --git a/x/projects/migrations/v5/project.go b/x/projects/migrations/v5/project.go deleted file mode 100644 index f261e17272..0000000000 --- a/x/projects/migrations/v5/project.go +++ /dev/null @@ -1,10 +0,0 @@ -package types - -func NewProjectKey(key string, kinds uint32) ProjectKey { - return ProjectKey{Key: key, Kinds: kinds} -} - -func (projectKey ProjectKey) AddType(kind ProjectKey_Type) ProjectKey { - projectKey.Kinds |= uint32(kind) - return projectKey -} diff --git a/x/projects/migrations/v5/project.pb.go b/x/projects/migrations/v5/project.pb.go deleted file mode 100644 index 92fc162a18..0000000000 --- a/x/projects/migrations/v5/project.pb.go +++ /dev/null @@ -1,2302 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: projects/project.proto - -package types - -import ( - fmt "fmt" - _ "github.com/gogo/protobuf/gogoproto" - proto "github.com/gogo/protobuf/proto" - io "io" - math "math" - math_bits "math/bits" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -type ProjectKey_Type int32 - -const ( - ProjectKey_NONE ProjectKey_Type = 0 - ProjectKey_ADMIN ProjectKey_Type = 1 - ProjectKey_DEVELOPER ProjectKey_Type = 2 -) - -var ProjectKey_Type_name = map[int32]string{ - 0: "NONE", - 1: "ADMIN", - 2: "DEVELOPER", -} - -var ProjectKey_Type_value = map[string]int32{ - "NONE": 0, - "ADMIN": 1, - "DEVELOPER": 2, -} - -func (x ProjectKey_Type) String() string { - return proto.EnumName(ProjectKey_Type_name, int32(x)) -} - -func (ProjectKey_Type) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_9f89a31663a330ce, []int{1, 0} -} - -type Project struct { - Index string `protobuf:"bytes,1,opt,name=index,proto3" json:"index,omitempty"` - Subscription string `protobuf:"bytes,2,opt,name=subscription,proto3" json:"subscription,omitempty"` - Description string `protobuf:"bytes,3,opt,name=description,proto3" json:"description,omitempty"` - Enabled bool `protobuf:"varint,4,opt,name=enabled,proto3" json:"enabled,omitempty"` - ProjectKeys []ProjectKey `protobuf:"bytes,5,rep,name=project_keys,json=projectKeys,proto3" json:"project_keys"` - AdminPolicy *Policy `protobuf:"bytes,6,opt,name=admin_policy,json=adminPolicy,proto3" json:"admin_policy,omitempty"` - UsedCu uint64 `protobuf:"varint,7,opt,name=used_cu,json=usedCu,proto3" json:"used_cu,omitempty"` - SubscriptionPolicy *Policy `protobuf:"bytes,8,opt,name=subscription_policy,json=subscriptionPolicy,proto3" json:"subscription_policy,omitempty"` - Snapshot uint64 `protobuf:"varint,9,opt,name=snapshot,proto3" json:"snapshot,omitempty"` -} - -func (m *Project) Reset() { *m = Project{} } -func (m *Project) String() string { return proto.CompactTextString(m) } -func (*Project) ProtoMessage() {} -func (*Project) Descriptor() ([]byte, []int) { - return fileDescriptor_9f89a31663a330ce, []int{0} -} -func (m *Project) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Project) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Project.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Project) XXX_Merge(src proto.Message) { - xxx_messageInfo_Project.Merge(m, src) -} -func (m *Project) XXX_Size() int { - return m.Size() -} -func (m *Project) XXX_DiscardUnknown() { - xxx_messageInfo_Project.DiscardUnknown(m) -} - -var xxx_messageInfo_Project proto.InternalMessageInfo - -func (m *Project) GetIndex() string { - if m != nil { - return m.Index - } - return "" -} - -func (m *Project) GetSubscription() string { - if m != nil { - return m.Subscription - } - return "" -} - -func (m *Project) GetDescription() string { - if m != nil { - return m.Description - } - return "" -} - -func (m *Project) GetEnabled() bool { - if m != nil { - return m.Enabled - } - return false -} - -func (m *Project) GetProjectKeys() []ProjectKey { - if m != nil { - return m.ProjectKeys - } - return nil -} - -func (m *Project) GetAdminPolicy() *Policy { - if m != nil { - return m.AdminPolicy - } - return nil -} - -func (m *Project) GetUsedCu() uint64 { - if m != nil { - return m.UsedCu - } - return 0 -} - -func (m *Project) GetSubscriptionPolicy() *Policy { - if m != nil { - return m.SubscriptionPolicy - } - return nil -} - -func (m *Project) GetSnapshot() uint64 { - if m != nil { - return m.Snapshot - } - return 0 -} - -type ProjectKey struct { - Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` - Kinds uint32 `protobuf:"varint,4,opt,name=kinds,proto3" json:"kinds,omitempty"` -} - -func (m *ProjectKey) Reset() { *m = ProjectKey{} } -func (m *ProjectKey) String() string { return proto.CompactTextString(m) } -func (*ProjectKey) ProtoMessage() {} -func (*ProjectKey) Descriptor() ([]byte, []int) { - return fileDescriptor_9f89a31663a330ce, []int{1} -} -func (m *ProjectKey) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ProjectKey) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ProjectKey.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *ProjectKey) XXX_Merge(src proto.Message) { - xxx_messageInfo_ProjectKey.Merge(m, src) -} -func (m *ProjectKey) XXX_Size() int { - return m.Size() -} -func (m *ProjectKey) XXX_DiscardUnknown() { - xxx_messageInfo_ProjectKey.DiscardUnknown(m) -} - -var xxx_messageInfo_ProjectKey proto.InternalMessageInfo - -func (m *ProjectKey) GetKey() string { - if m != nil { - return m.Key - } - return "" -} - -func (m *ProjectKey) GetKinds() uint32 { - if m != nil { - return m.Kinds - } - return 0 -} - -// protobuf expected in YAML format: used "moretags" to simplify parsing -type Policy struct { - ChainPolicies []ChainPolicy `protobuf:"bytes,1,rep,name=chain_policies,json=chainPolicies,proto3" json:"chain_policies" mapstructure:"chain_policies"` - GeolocationProfile uint64 `protobuf:"varint,2,opt,name=geolocation_profile,json=geolocationProfile,proto3" json:"geolocation_profile" mapstructure:"geolocation_profile"` - TotalCuLimit uint64 `protobuf:"varint,3,opt,name=total_cu_limit,json=totalCuLimit,proto3" json:"total_cu_limit" mapstructure:"total_cu_limit"` - EpochCuLimit uint64 `protobuf:"varint,4,opt,name=epoch_cu_limit,json=epochCuLimit,proto3" json:"epoch_cu_limit" mapstructure:"epoch_cu_limit"` - MaxProvidersToPair uint64 `protobuf:"varint,5,opt,name=max_providers_to_pair,json=maxProvidersToPair,proto3" json:"max_providers_to_pair" mapstructure:"max_providers_to_pair"` -} - -func (m *Policy) Reset() { *m = Policy{} } -func (m *Policy) String() string { return proto.CompactTextString(m) } -func (*Policy) ProtoMessage() {} -func (*Policy) Descriptor() ([]byte, []int) { - return fileDescriptor_9f89a31663a330ce, []int{2} -} -func (m *Policy) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Policy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Policy.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Policy) XXX_Merge(src proto.Message) { - xxx_messageInfo_Policy.Merge(m, src) -} -func (m *Policy) XXX_Size() int { - return m.Size() -} -func (m *Policy) XXX_DiscardUnknown() { - xxx_messageInfo_Policy.DiscardUnknown(m) -} - -var xxx_messageInfo_Policy proto.InternalMessageInfo - -func (m *Policy) GetChainPolicies() []ChainPolicy { - if m != nil { - return m.ChainPolicies - } - return nil -} - -func (m *Policy) GetGeolocationProfile() uint64 { - if m != nil { - return m.GeolocationProfile - } - return 0 -} - -func (m *Policy) GetTotalCuLimit() uint64 { - if m != nil { - return m.TotalCuLimit - } - return 0 -} - -func (m *Policy) GetEpochCuLimit() uint64 { - if m != nil { - return m.EpochCuLimit - } - return 0 -} - -func (m *Policy) GetMaxProvidersToPair() uint64 { - if m != nil { - return m.MaxProvidersToPair - } - return 0 -} - -type ChainPolicy struct { - ChainId string `protobuf:"bytes,1,opt,name=chain_id,json=chainId,proto3" json:"chain_id,omitempty" mapstructure:"chain_id"` - Apis []string `protobuf:"bytes,2,rep,name=apis,proto3" json:"apis,omitempty" mapstructure:"apis"` -} - -func (m *ChainPolicy) Reset() { *m = ChainPolicy{} } -func (m *ChainPolicy) String() string { return proto.CompactTextString(m) } -func (*ChainPolicy) ProtoMessage() {} -func (*ChainPolicy) Descriptor() ([]byte, []int) { - return fileDescriptor_9f89a31663a330ce, []int{3} -} -func (m *ChainPolicy) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ChainPolicy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ChainPolicy.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *ChainPolicy) XXX_Merge(src proto.Message) { - xxx_messageInfo_ChainPolicy.Merge(m, src) -} -func (m *ChainPolicy) XXX_Size() int { - return m.Size() -} -func (m *ChainPolicy) XXX_DiscardUnknown() { - xxx_messageInfo_ChainPolicy.DiscardUnknown(m) -} - -var xxx_messageInfo_ChainPolicy proto.InternalMessageInfo - -func (m *ChainPolicy) GetChainId() string { - if m != nil { - return m.ChainId - } - return "" -} - -func (m *ChainPolicy) GetApis() []string { - if m != nil { - return m.Apis - } - return nil -} - -type ProtoDeveloperData struct { - ProjectID string `protobuf:"bytes,1,opt,name=projectID,proto3" json:"projectID,omitempty"` -} - -func (m *ProtoDeveloperData) Reset() { *m = ProtoDeveloperData{} } -func (m *ProtoDeveloperData) String() string { return proto.CompactTextString(m) } -func (*ProtoDeveloperData) ProtoMessage() {} -func (*ProtoDeveloperData) Descriptor() ([]byte, []int) { - return fileDescriptor_9f89a31663a330ce, []int{4} -} -func (m *ProtoDeveloperData) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ProtoDeveloperData) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ProtoDeveloperData.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *ProtoDeveloperData) XXX_Merge(src proto.Message) { - xxx_messageInfo_ProtoDeveloperData.Merge(m, src) -} -func (m *ProtoDeveloperData) XXX_Size() int { - return m.Size() -} -func (m *ProtoDeveloperData) XXX_DiscardUnknown() { - xxx_messageInfo_ProtoDeveloperData.DiscardUnknown(m) -} - -var xxx_messageInfo_ProtoDeveloperData proto.InternalMessageInfo - -func (m *ProtoDeveloperData) GetProjectID() string { - if m != nil { - return m.ProjectID - } - return "" -} - -// used as a container struct for the subscription module -type ProjectData struct { - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"` - Enabled bool `protobuf:"varint,3,opt,name=enabled,proto3" json:"enabled,omitempty"` - ProjectKeys []ProjectKey `protobuf:"bytes,4,rep,name=projectKeys,proto3" json:"projectKeys"` - Policy *Policy `protobuf:"bytes,5,opt,name=policy,proto3" json:"policy,omitempty"` -} - -func (m *ProjectData) Reset() { *m = ProjectData{} } -func (m *ProjectData) String() string { return proto.CompactTextString(m) } -func (*ProjectData) ProtoMessage() {} -func (*ProjectData) Descriptor() ([]byte, []int) { - return fileDescriptor_9f89a31663a330ce, []int{5} -} -func (m *ProjectData) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ProjectData) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ProjectData.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *ProjectData) XXX_Merge(src proto.Message) { - xxx_messageInfo_ProjectData.Merge(m, src) -} -func (m *ProjectData) XXX_Size() int { - return m.Size() -} -func (m *ProjectData) XXX_DiscardUnknown() { - xxx_messageInfo_ProjectData.DiscardUnknown(m) -} - -var xxx_messageInfo_ProjectData proto.InternalMessageInfo - -func (m *ProjectData) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -func (m *ProjectData) GetDescription() string { - if m != nil { - return m.Description - } - return "" -} - -func (m *ProjectData) GetEnabled() bool { - if m != nil { - return m.Enabled - } - return false -} - -func (m *ProjectData) GetProjectKeys() []ProjectKey { - if m != nil { - return m.ProjectKeys - } - return nil -} - -func (m *ProjectData) GetPolicy() *Policy { - if m != nil { - return m.Policy - } - return nil -} - -func init() { - proto.RegisterEnum("lavanet.lava.projects.ProjectKey_Type_V5", ProjectKey_Type_name, ProjectKey_Type_value) - proto.RegisterType((*Project)(nil), "lavanet.lava.projects.Project_V5") - proto.RegisterType((*ProjectKey)(nil), "lavanet.lava.projects.ProjectKey_V5") - proto.RegisterType((*Policy)(nil), "lavanet.lava.projects.Policy_V5") - proto.RegisterType((*ChainPolicy)(nil), "lavanet.lava.projects.ChainPolicy_V5") - proto.RegisterType((*ProtoDeveloperData)(nil), "lavanet.lava.projects.ProtoDeveloperData_V5") - proto.RegisterType((*ProjectData)(nil), "lavanet.lava.projects.ProjectData_V5") -} - -func init() { proto.RegisterFile("projects/project.proto", fileDescriptor_9f89a31663a330ce) } - -var fileDescriptor_9f89a31663a330ce = []byte{ - // 760 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x55, 0x4d, 0x6b, 0xf3, 0x46, - 0x10, 0xb6, 0x2c, 0xf9, 0x6b, 0xec, 0xbc, 0x98, 0xcd, 0x9b, 0x46, 0xa4, 0x8d, 0xe5, 0x2e, 0x2d, - 0x98, 0x1e, 0x6c, 0x48, 0x68, 0x29, 0x85, 0x42, 0xe3, 0xd8, 0x05, 0xa7, 0xa9, 0x63, 0x44, 0x28, - 0xb4, 0x17, 0xb3, 0x96, 0xb6, 0xf6, 0x36, 0xb2, 0x56, 0xe8, 0xc3, 0xd8, 0xff, 0xa2, 0xf4, 0xd0, - 0xdf, 0xd0, 0x9f, 0x92, 0x63, 0x8e, 0x3d, 0xa9, 0xc5, 0xb9, 0xe5, 0xe8, 0x5f, 0x50, 0xb4, 0x92, - 0x3f, 0x94, 0x38, 0x25, 0xf0, 0x9e, 0x34, 0xf3, 0xcc, 0x33, 0x33, 0x3b, 0x3b, 0xb3, 0x23, 0xf8, - 0xc8, 0x71, 0xf9, 0x6f, 0xd4, 0xf0, 0xbd, 0x56, 0x22, 0x34, 0x1d, 0x97, 0xfb, 0x1c, 0x1d, 0x59, - 0x64, 0x46, 0x6c, 0xea, 0x37, 0xa3, 0x6f, 0x73, 0x4d, 0x3a, 0x79, 0x3f, 0xe6, 0x63, 0x2e, 0x18, - 0xad, 0x48, 0x8a, 0xc9, 0xf8, 0x0f, 0x19, 0x0a, 0x83, 0x98, 0x82, 0xde, 0x43, 0x8e, 0xd9, 0x26, - 0x9d, 0xab, 0x52, 0x5d, 0x6a, 0x94, 0xf4, 0x58, 0x41, 0x18, 0x2a, 0x5e, 0x30, 0xf2, 0x0c, 0x97, - 0x39, 0x3e, 0xe3, 0xb6, 0x9a, 0x15, 0xc6, 0x14, 0x86, 0xea, 0x50, 0x36, 0xe9, 0x96, 0x22, 0x0b, - 0xca, 0x2e, 0x84, 0x54, 0x28, 0x50, 0x9b, 0x8c, 0x2c, 0x6a, 0xaa, 0x4a, 0x5d, 0x6a, 0x14, 0xf5, - 0xb5, 0x8a, 0xae, 0xa0, 0x92, 0x9c, 0x71, 0x78, 0x47, 0x17, 0x9e, 0x9a, 0xab, 0xcb, 0x8d, 0xf2, - 0xd9, 0xa7, 0xcd, 0xbd, 0x55, 0x34, 0x93, 0xb3, 0xfe, 0x40, 0x17, 0x6d, 0xe5, 0x3e, 0xd4, 0x32, - 0x7a, 0xd9, 0xd9, 0x20, 0x1e, 0xfa, 0x0e, 0x2a, 0xc4, 0x9c, 0x32, 0x7b, 0xe8, 0x70, 0x8b, 0x19, - 0x0b, 0x35, 0x5f, 0x97, 0x1a, 0xe5, 0xb3, 0xd3, 0xd7, 0x62, 0x09, 0x92, 0x5e, 0x16, 0x2e, 0xb1, - 0x82, 0x8e, 0xa1, 0x10, 0x78, 0xd4, 0x1c, 0x1a, 0x81, 0x5a, 0xa8, 0x4b, 0x0d, 0x45, 0xcf, 0x47, - 0xea, 0x65, 0x80, 0xfa, 0x70, 0xb8, 0x5b, 0xf2, 0x3a, 0x43, 0xf1, 0x2d, 0x19, 0xd0, 0xae, 0x67, - 0x92, 0xe8, 0x04, 0x8a, 0x9e, 0x4d, 0x1c, 0x6f, 0xc2, 0x7d, 0xb5, 0x24, 0x32, 0x6d, 0x74, 0x6c, - 0x01, 0x6c, 0xeb, 0x44, 0x55, 0x90, 0xef, 0xe8, 0x22, 0x69, 0x4a, 0x24, 0x46, 0x8d, 0xba, 0x63, - 0xb6, 0xe9, 0x89, 0xab, 0x3c, 0xd0, 0x63, 0x05, 0x7f, 0x01, 0xca, 0xed, 0xc2, 0xa1, 0xa8, 0x08, - 0x4a, 0xff, 0xa6, 0xdf, 0xad, 0x66, 0x50, 0x09, 0x72, 0x17, 0x9d, 0x1f, 0x7b, 0xfd, 0xaa, 0x84, - 0x0e, 0xa0, 0xd4, 0xe9, 0xfe, 0xd4, 0xbd, 0xbe, 0x19, 0x74, 0xf5, 0x6a, 0xf6, 0x4a, 0x29, 0x66, - 0xab, 0xf2, 0x95, 0x52, 0x94, 0xab, 0x0a, 0xfe, 0x53, 0x81, 0x7c, 0x72, 0x28, 0x07, 0xde, 0x19, - 0x13, 0xb2, 0xbe, 0x3f, 0x46, 0x3d, 0x55, 0x12, 0xdd, 0xc0, 0xaf, 0xd4, 0x77, 0x19, 0x91, 0x63, - 0xdf, 0xf6, 0xe7, 0x51, 0x3b, 0x56, 0xa1, 0x76, 0x3a, 0x25, 0x8e, 0xe7, 0xbb, 0x81, 0xe1, 0x07, - 0x2e, 0xfd, 0x06, 0xa7, 0xe3, 0x61, 0xfd, 0xc0, 0xd8, 0xf8, 0x30, 0xea, 0x21, 0x1b, 0x0e, 0xc7, - 0x94, 0x5b, 0xdc, 0x20, 0xf1, 0xad, 0xba, 0xfc, 0x57, 0x66, 0x51, 0x31, 0x64, 0x4a, 0xfb, 0xdb, - 0xa7, 0x50, 0xdb, 0x67, 0x5e, 0x85, 0x1a, 0x4e, 0x67, 0xd9, 0x43, 0xc2, 0x3a, 0xda, 0x41, 0x07, - 0x31, 0x88, 0x7e, 0x86, 0x77, 0x3e, 0xf7, 0x89, 0x35, 0x34, 0x82, 0xa1, 0xc5, 0xa6, 0xcc, 0x17, - 0xc3, 0xaa, 0xb4, 0xcf, 0x9f, 0x42, 0xed, 0x99, 0xe5, 0x65, 0x2d, 0x69, 0x3b, 0xd6, 0x2b, 0x02, - 0xb8, 0x0c, 0xae, 0x23, 0x35, 0x0a, 0x4d, 0x1d, 0x6e, 0x4c, 0xb6, 0xa1, 0x95, 0x6d, 0xe8, 0xb4, - 0xe5, 0x65, 0xe8, 0xb4, 0x1d, 0xeb, 0x15, 0x01, 0xac, 0x43, 0xfb, 0x70, 0x34, 0x25, 0xf3, 0xa8, - 0xb2, 0x19, 0x33, 0xa9, 0xeb, 0x0d, 0x7d, 0x3e, 0x74, 0x08, 0x73, 0xd5, 0x9c, 0xc8, 0x70, 0xf1, - 0x14, 0x6a, 0xfb, 0x09, 0xab, 0x50, 0xfb, 0x2c, 0x9d, 0x68, 0x2f, 0x0d, 0xeb, 0x68, 0x4a, 0xe6, - 0x83, 0x35, 0x7c, 0xcb, 0x07, 0x11, 0x38, 0x83, 0xf2, 0x4e, 0x83, 0xd1, 0x57, 0x50, 0x8c, 0x9b, - 0xc9, 0xcc, 0x78, 0x18, 0xdb, 0x1f, 0xaf, 0x42, 0xed, 0x78, 0x5f, 0xbb, 0x99, 0x89, 0xf5, 0x82, - 0x10, 0x7b, 0x26, 0x6a, 0x81, 0x42, 0x1c, 0xe6, 0xa9, 0xd9, 0xba, 0x1c, 0xf9, 0x24, 0x63, 0x72, - 0x98, 0xf6, 0x8b, 0x18, 0x58, 0x17, 0x44, 0xfc, 0x35, 0xa0, 0x41, 0xb4, 0x9c, 0x3a, 0x74, 0x46, - 0x2d, 0xee, 0x50, 0xb7, 0x43, 0x7c, 0x82, 0x3e, 0x81, 0x52, 0x32, 0x77, 0xbd, 0x4e, 0xf2, 0x18, - 0xb6, 0x40, 0x3c, 0xd0, 0xf8, 0x1f, 0x09, 0xca, 0xc9, 0xcb, 0x11, 0x3e, 0x08, 0x14, 0x9b, 0x4c, - 0x69, 0x42, 0x17, 0xf2, 0xf3, 0x5d, 0x95, 0xfd, 0xdf, 0x5d, 0x25, 0xa7, 0x77, 0x55, 0x0f, 0x76, - 0xd7, 0x8d, 0xaa, 0x7c, 0xc0, 0xaa, 0xfa, 0x12, 0xf2, 0xc9, 0x0a, 0xc9, 0xbd, 0x65, 0x85, 0x24, - 0xe4, 0xf6, 0xf7, 0x7f, 0x2d, 0x6b, 0xd2, 0xfd, 0xb2, 0x26, 0x3d, 0x2c, 0x6b, 0xd2, 0xbf, 0xcb, - 0x9a, 0xf4, 0xfb, 0x63, 0x2d, 0xf3, 0xf0, 0x58, 0xcb, 0xfc, 0xfd, 0x58, 0xcb, 0xfc, 0xd2, 0x18, - 0x33, 0x7f, 0x12, 0x8c, 0x9a, 0x06, 0x9f, 0xb6, 0x92, 0x70, 0xe2, 0xdb, 0x9a, 0xb7, 0x36, 0x3f, - 0x0b, 0x7f, 0xe1, 0x50, 0x6f, 0x94, 0x17, 0xeb, 0xff, 0xfc, 0xbf, 0x00, 0x00, 0x00, 0xff, 0xff, - 0xcb, 0x00, 0xd5, 0xe0, 0x45, 0x06, 0x00, 0x00, -} - -func (this *Project) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*Project) - if !ok { - that2, ok := that.(Project) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if this.Index != that1.Index { - return false - } - if this.Subscription != that1.Subscription { - return false - } - if this.Description != that1.Description { - return false - } - if this.Enabled != that1.Enabled { - return false - } - if len(this.ProjectKeys) != len(that1.ProjectKeys) { - return false - } - for i := range this.ProjectKeys { - if !this.ProjectKeys[i].Equal(&that1.ProjectKeys[i]) { - return false - } - } - if !this.AdminPolicy.Equal(that1.AdminPolicy) { - return false - } - if this.UsedCu != that1.UsedCu { - return false - } - if !this.SubscriptionPolicy.Equal(that1.SubscriptionPolicy) { - return false - } - if this.Snapshot != that1.Snapshot { - return false - } - return true -} -func (this *ProjectKey) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*ProjectKey) - if !ok { - that2, ok := that.(ProjectKey) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if this.Key != that1.Key { - return false - } - if this.Kinds != that1.Kinds { - return false - } - return true -} -func (this *Policy) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*Policy) - if !ok { - that2, ok := that.(Policy) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if len(this.ChainPolicies) != len(that1.ChainPolicies) { - return false - } - for i := range this.ChainPolicies { - if !this.ChainPolicies[i].Equal(&that1.ChainPolicies[i]) { - return false - } - } - if this.GeolocationProfile != that1.GeolocationProfile { - return false - } - if this.TotalCuLimit != that1.TotalCuLimit { - return false - } - if this.EpochCuLimit != that1.EpochCuLimit { - return false - } - if this.MaxProvidersToPair != that1.MaxProvidersToPair { - return false - } - return true -} -func (this *ChainPolicy) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*ChainPolicy) - if !ok { - that2, ok := that.(ChainPolicy) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if this.ChainId != that1.ChainId { - return false - } - if len(this.Apis) != len(that1.Apis) { - return false - } - for i := range this.Apis { - if this.Apis[i] != that1.Apis[i] { - return false - } - } - return true -} -func (this *ProtoDeveloperData) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*ProtoDeveloperData) - if !ok { - that2, ok := that.(ProtoDeveloperData) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if this.ProjectID != that1.ProjectID { - return false - } - return true -} -func (this *ProjectData) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*ProjectData) - if !ok { - that2, ok := that.(ProjectData) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if this.Name != that1.Name { - return false - } - if this.Description != that1.Description { - return false - } - if this.Enabled != that1.Enabled { - return false - } - if len(this.ProjectKeys) != len(that1.ProjectKeys) { - return false - } - for i := range this.ProjectKeys { - if !this.ProjectKeys[i].Equal(&that1.ProjectKeys[i]) { - return false - } - } - if !this.Policy.Equal(that1.Policy) { - return false - } - return true -} -func (m *Project) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Project) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Project) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Snapshot != 0 { - i = encodeVarintProject(dAtA, i, uint64(m.Snapshot)) - i-- - dAtA[i] = 0x48 - } - if m.SubscriptionPolicy != nil { - { - size, err := m.SubscriptionPolicy.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintProject(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x42 - } - if m.UsedCu != 0 { - i = encodeVarintProject(dAtA, i, uint64(m.UsedCu)) - i-- - dAtA[i] = 0x38 - } - if m.AdminPolicy != nil { - { - size, err := m.AdminPolicy.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintProject(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x32 - } - if len(m.ProjectKeys) > 0 { - for iNdEx := len(m.ProjectKeys) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.ProjectKeys[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintProject(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x2a - } - } - if m.Enabled { - i-- - if m.Enabled { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x20 - } - if len(m.Description) > 0 { - i -= len(m.Description) - copy(dAtA[i:], m.Description) - i = encodeVarintProject(dAtA, i, uint64(len(m.Description))) - i-- - dAtA[i] = 0x1a - } - if len(m.Subscription) > 0 { - i -= len(m.Subscription) - copy(dAtA[i:], m.Subscription) - i = encodeVarintProject(dAtA, i, uint64(len(m.Subscription))) - i-- - dAtA[i] = 0x12 - } - if len(m.Index) > 0 { - i -= len(m.Index) - copy(dAtA[i:], m.Index) - i = encodeVarintProject(dAtA, i, uint64(len(m.Index))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *ProjectKey) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ProjectKey) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ProjectKey) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Kinds != 0 { - i = encodeVarintProject(dAtA, i, uint64(m.Kinds)) - i-- - dAtA[i] = 0x20 - } - if len(m.Key) > 0 { - i -= len(m.Key) - copy(dAtA[i:], m.Key) - i = encodeVarintProject(dAtA, i, uint64(len(m.Key))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *Policy) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Policy) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Policy) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.MaxProvidersToPair != 0 { - i = encodeVarintProject(dAtA, i, uint64(m.MaxProvidersToPair)) - i-- - dAtA[i] = 0x28 - } - if m.EpochCuLimit != 0 { - i = encodeVarintProject(dAtA, i, uint64(m.EpochCuLimit)) - i-- - dAtA[i] = 0x20 - } - if m.TotalCuLimit != 0 { - i = encodeVarintProject(dAtA, i, uint64(m.TotalCuLimit)) - i-- - dAtA[i] = 0x18 - } - if m.GeolocationProfile != 0 { - i = encodeVarintProject(dAtA, i, uint64(m.GeolocationProfile)) - i-- - dAtA[i] = 0x10 - } - if len(m.ChainPolicies) > 0 { - for iNdEx := len(m.ChainPolicies) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.ChainPolicies[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintProject(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *ChainPolicy) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ChainPolicy) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ChainPolicy) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Apis) > 0 { - for iNdEx := len(m.Apis) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Apis[iNdEx]) - copy(dAtA[i:], m.Apis[iNdEx]) - i = encodeVarintProject(dAtA, i, uint64(len(m.Apis[iNdEx]))) - i-- - dAtA[i] = 0x12 - } - } - if len(m.ChainId) > 0 { - i -= len(m.ChainId) - copy(dAtA[i:], m.ChainId) - i = encodeVarintProject(dAtA, i, uint64(len(m.ChainId))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *ProtoDeveloperData) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ProtoDeveloperData) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ProtoDeveloperData) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.ProjectID) > 0 { - i -= len(m.ProjectID) - copy(dAtA[i:], m.ProjectID) - i = encodeVarintProject(dAtA, i, uint64(len(m.ProjectID))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *ProjectData) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ProjectData) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ProjectData) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Policy != nil { - { - size, err := m.Policy.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintProject(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x2a - } - if len(m.ProjectKeys) > 0 { - for iNdEx := len(m.ProjectKeys) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.ProjectKeys[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintProject(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x22 - } - } - if m.Enabled { - i-- - if m.Enabled { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x18 - } - if len(m.Description) > 0 { - i -= len(m.Description) - copy(dAtA[i:], m.Description) - i = encodeVarintProject(dAtA, i, uint64(len(m.Description))) - i-- - dAtA[i] = 0x12 - } - if len(m.Name) > 0 { - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintProject(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func encodeVarintProject(dAtA []byte, offset int, v uint64) int { - offset -= sovProject(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *Project) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Index) - if l > 0 { - n += 1 + l + sovProject(uint64(l)) - } - l = len(m.Subscription) - if l > 0 { - n += 1 + l + sovProject(uint64(l)) - } - l = len(m.Description) - if l > 0 { - n += 1 + l + sovProject(uint64(l)) - } - if m.Enabled { - n += 2 - } - if len(m.ProjectKeys) > 0 { - for _, e := range m.ProjectKeys { - l = e.Size() - n += 1 + l + sovProject(uint64(l)) - } - } - if m.AdminPolicy != nil { - l = m.AdminPolicy.Size() - n += 1 + l + sovProject(uint64(l)) - } - if m.UsedCu != 0 { - n += 1 + sovProject(uint64(m.UsedCu)) - } - if m.SubscriptionPolicy != nil { - l = m.SubscriptionPolicy.Size() - n += 1 + l + sovProject(uint64(l)) - } - if m.Snapshot != 0 { - n += 1 + sovProject(uint64(m.Snapshot)) - } - return n -} - -func (m *ProjectKey) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Key) - if l > 0 { - n += 1 + l + sovProject(uint64(l)) - } - if m.Kinds != 0 { - n += 1 + sovProject(uint64(m.Kinds)) - } - return n -} - -func (m *Policy) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.ChainPolicies) > 0 { - for _, e := range m.ChainPolicies { - l = e.Size() - n += 1 + l + sovProject(uint64(l)) - } - } - if m.GeolocationProfile != 0 { - n += 1 + sovProject(uint64(m.GeolocationProfile)) - } - if m.TotalCuLimit != 0 { - n += 1 + sovProject(uint64(m.TotalCuLimit)) - } - if m.EpochCuLimit != 0 { - n += 1 + sovProject(uint64(m.EpochCuLimit)) - } - if m.MaxProvidersToPair != 0 { - n += 1 + sovProject(uint64(m.MaxProvidersToPair)) - } - return n -} - -func (m *ChainPolicy) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.ChainId) - if l > 0 { - n += 1 + l + sovProject(uint64(l)) - } - if len(m.Apis) > 0 { - for _, s := range m.Apis { - l = len(s) - n += 1 + l + sovProject(uint64(l)) - } - } - return n -} - -func (m *ProtoDeveloperData) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.ProjectID) - if l > 0 { - n += 1 + l + sovProject(uint64(l)) - } - return n -} - -func (m *ProjectData) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Name) - if l > 0 { - n += 1 + l + sovProject(uint64(l)) - } - l = len(m.Description) - if l > 0 { - n += 1 + l + sovProject(uint64(l)) - } - if m.Enabled { - n += 2 - } - if len(m.ProjectKeys) > 0 { - for _, e := range m.ProjectKeys { - l = e.Size() - n += 1 + l + sovProject(uint64(l)) - } - } - if m.Policy != nil { - l = m.Policy.Size() - n += 1 + l + sovProject(uint64(l)) - } - return n -} - -func sovProject(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozProject(x uint64) (n int) { - return sovProject(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *Project) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProject - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Project: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Project: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Index", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProject - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthProject - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthProject - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Index = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Subscription", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProject - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthProject - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthProject - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Subscription = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Description", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProject - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthProject - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthProject - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Description = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Enabled", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProject - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Enabled = bool(v != 0) - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ProjectKeys", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProject - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthProject - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthProject - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ProjectKeys = append(m.ProjectKeys, ProjectKey{}) - if err := m.ProjectKeys[len(m.ProjectKeys)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AdminPolicy", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProject - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthProject - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthProject - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.AdminPolicy == nil { - m.AdminPolicy = &Policy{} - } - if err := m.AdminPolicy.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 7: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field UsedCu", wireType) - } - m.UsedCu = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProject - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.UsedCu |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 8: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SubscriptionPolicy", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProject - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthProject - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthProject - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.SubscriptionPolicy == nil { - m.SubscriptionPolicy = &Policy{} - } - if err := m.SubscriptionPolicy.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 9: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Snapshot", wireType) - } - m.Snapshot = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProject - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Snapshot |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipProject(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthProject - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ProjectKey) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProject - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ProjectKey: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ProjectKey: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProject - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthProject - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthProject - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Key = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Kinds", wireType) - } - m.Kinds = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProject - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Kinds |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipProject(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthProject - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Policy) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProject - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Policy: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Policy: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ChainPolicies", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProject - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthProject - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthProject - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ChainPolicies = append(m.ChainPolicies, ChainPolicy{}) - if err := m.ChainPolicies[len(m.ChainPolicies)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field GeolocationProfile", wireType) - } - m.GeolocationProfile = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProject - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.GeolocationProfile |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TotalCuLimit", wireType) - } - m.TotalCuLimit = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProject - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.TotalCuLimit |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field EpochCuLimit", wireType) - } - m.EpochCuLimit = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProject - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.EpochCuLimit |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field MaxProvidersToPair", wireType) - } - m.MaxProvidersToPair = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProject - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.MaxProvidersToPair |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipProject(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthProject - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ChainPolicy) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProject - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ChainPolicy: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ChainPolicy: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ChainId", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProject - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthProject - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthProject - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ChainId = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Apis", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProject - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthProject - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthProject - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Apis = append(m.Apis, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipProject(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthProject - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ProtoDeveloperData) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProject - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ProtoDeveloperData: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ProtoDeveloperData: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ProjectID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProject - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthProject - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthProject - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ProjectID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipProject(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthProject - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ProjectData) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProject - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ProjectData: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ProjectData: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProject - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthProject - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthProject - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Description", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProject - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthProject - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthProject - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Description = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Enabled", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProject - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Enabled = bool(v != 0) - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ProjectKeys", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProject - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthProject - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthProject - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ProjectKeys = append(m.ProjectKeys, ProjectKey{}) - if err := m.ProjectKeys[len(m.ProjectKeys)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Policy", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProject - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthProject - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthProject - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Policy == nil { - m.Policy = &Policy{} - } - if err := m.Policy.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipProject(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthProject - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipProject(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowProject - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowProject - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowProject - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthProject - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupProject - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthProject - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthProject = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowProject = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupProject = fmt.Errorf("proto: unexpected end of group") -) diff --git a/x/projects/module.go b/x/projects/module.go index 2e78ef4414..50e6962d03 100644 --- a/x/projects/module.go +++ b/x/projects/module.go @@ -127,26 +127,6 @@ func (am AppModule) RegisterServices(cfg module.Configurator) { migrator := keeper.NewMigrator(am.keeper) - // register v2 -> v3 migration - if err := cfg.RegisterMigration(types.ModuleName, 2, migrator.Migrate2to3); err != nil { - // panic:ok: at start up, migration cannot proceed anyhow - panic(fmt.Errorf("%s: failed to register migration to v3: %w", types.ModuleName, err)) - } - // register v3 -> v4 migration - if err := cfg.RegisterMigration(types.ModuleName, 3, migrator.Migrate3to4); err != nil { - // panic:ok: at start up, migration cannot proceed anyhow - panic(fmt.Errorf("%s: failed to register migration to v4: %w", types.ModuleName, err)) - } - // register v4 -> v5 migration - if err := cfg.RegisterMigration(types.ModuleName, 4, migrator.Migrate4to5); err != nil { - // panic:ok: at start up, migration cannot proceed anyhow - panic(fmt.Errorf("%s: failed to register migration to v5: %w", types.ModuleName, err)) - } - // register v5 -> v6 migration - if err := cfg.RegisterMigration(types.ModuleName, 5, migrator.Migrate5to6); err != nil { - // panic:ok: at start up, migration cannot proceed anyhow - panic(fmt.Errorf("%s: failed to register migration to v5: %w", types.ModuleName, err)) - } // register v6 -> v7 migration if err := cfg.RegisterMigration(types.ModuleName, 6, migrator.Migrate6to7); err != nil { // panic:ok: at start up, migration cannot proceed anyhow diff --git a/x/projects/module_simulation.go b/x/projects/module_simulation.go deleted file mode 100644 index b2039803ff..0000000000 --- a/x/projects/module_simulation.go +++ /dev/null @@ -1,119 +0,0 @@ -package projects - -import ( - "math/rand" - - "github.com/cosmos/cosmos-sdk/testutil/sims" - - "github.com/cosmos/cosmos-sdk/baseapp" - sdk "github.com/cosmos/cosmos-sdk/types" - "github.com/cosmos/cosmos-sdk/types/module" - simtypes "github.com/cosmos/cosmos-sdk/types/simulation" - "github.com/cosmos/cosmos-sdk/x/simulation" - "github.com/lavanet/lava/v4/testutil/sample" - projectssimulation "github.com/lavanet/lava/v4/x/projects/simulation" - "github.com/lavanet/lava/v4/x/projects/types" -) - -// avoid unused import issue -var ( - _ = sample.AccAddress - _ = projectssimulation.FindAccount - _ = sims.StakePerAccount - _ = simulation.MsgEntryKind - _ = baseapp.Paramspace -) - -const ( - opWeightMsgAddKeys = "op_weight_msg_add_keys" - // TODO: Determine the simulation weight value - defaultWeightMsgAddKeys int = 100 - - opWeightMsgDelKeys = "op_weight_msg_del_keys" - // TODO: Determine the simulation weight value - defaultWeightMsgDelKeys int = 100 - - opWeightMsgSetPolicy = "op_weight_msg_set_admin_policy" - // TODO: Determine the simulation weight value - defaultWeightMsgSetPolicy int = 100 - - opWeightMsgSetSubscriptionPolicy = "op_weight_msg_set_subscription_policy" - // TODO: Determine the simulation weight value - defaultWeightMsgSetSubscriptionPolicy int = 100 - - // this line is used by starport scaffolding # simapp/module/const -) - -// GenerateGenesisState creates a randomized GenState of the module -func (AppModule) GenerateGenesisState(simState *module.SimulationState) { - accs := make([]string, len(simState.Accounts)) - for i, acc := range simState.Accounts { - accs[i] = acc.Address.String() - } - projectsGenesis := types.GenesisState{ - Params: types.DefaultParams(), - // this line is used by starport scaffolding # simapp/module/genesisState - } - simState.GenState[types.ModuleName] = simState.Cdc.MustMarshalJSON(&projectsGenesis) -} - -// ProposalContents doesn't return any content functions for governance proposals -func (AppModule) ProposalContents(_ module.SimulationState) []simtypes.WeightedProposalMsg { - return nil -} - -// RegisterStoreDecoder registers a decoder -func (am AppModule) RegisterStoreDecoder(_ sdk.StoreDecoderRegistry) {} - -// WeightedOperations returns the all the gov module operations with their respective weights. -func (am AppModule) WeightedOperations(simState module.SimulationState) []simtypes.WeightedOperation { - operations := make([]simtypes.WeightedOperation, 0) - - var weightMsgAddKeys int - simState.AppParams.GetOrGenerate(simState.Cdc, opWeightMsgAddKeys, &weightMsgAddKeys, nil, - func(_ *rand.Rand) { - weightMsgAddKeys = defaultWeightMsgAddKeys - }, - ) - operations = append(operations, simulation.NewWeightedOperation( - weightMsgAddKeys, - projectssimulation.SimulateMsgAddKeys(am.keeper), - )) - - var weightMsgDelKeys int - simState.AppParams.GetOrGenerate(simState.Cdc, opWeightMsgDelKeys, &weightMsgDelKeys, nil, - func(_ *rand.Rand) { - weightMsgDelKeys = defaultWeightMsgDelKeys - }, - ) - operations = append(operations, simulation.NewWeightedOperation( - weightMsgDelKeys, - projectssimulation.SimulateMsgDelKeys(am.keeper), - )) - - var weightMsgSetPolicy int - simState.AppParams.GetOrGenerate(simState.Cdc, opWeightMsgSetPolicy, &weightMsgSetPolicy, nil, - func(_ *rand.Rand) { - weightMsgSetPolicy = defaultWeightMsgSetPolicy - }, - ) - operations = append(operations, simulation.NewWeightedOperation( - weightMsgSetPolicy, - projectssimulation.SimulateMsgSetPolicy(am.keeper), - )) - - var weightMsgSetSubscriptionPolicy int - simState.AppParams.GetOrGenerate(simState.Cdc, opWeightMsgSetSubscriptionPolicy, &weightMsgSetSubscriptionPolicy, nil, - func(_ *rand.Rand) { - weightMsgSetSubscriptionPolicy = defaultWeightMsgSetSubscriptionPolicy - }, - ) - operations = append(operations, simulation.NewWeightedOperation( - weightMsgSetSubscriptionPolicy, - projectssimulation.SimulateMsgSetSubscriptionPolicy(am.keeper), - )) - - // this line is used by starport scaffolding # simapp/module/operation - - return operations -} diff --git a/x/projects/simulation/add_project_keys.go b/x/projects/simulation/add_project_keys.go deleted file mode 100644 index 98f1b06a10..0000000000 --- a/x/projects/simulation/add_project_keys.go +++ /dev/null @@ -1,27 +0,0 @@ -package simulation - -import ( - "math/rand" - - "github.com/cosmos/cosmos-sdk/baseapp" - sdk "github.com/cosmos/cosmos-sdk/types" - simtypes "github.com/cosmos/cosmos-sdk/types/simulation" - "github.com/lavanet/lava/v4/x/projects/keeper" - "github.com/lavanet/lava/v4/x/projects/types" -) - -func SimulateMsgAddKeys( - k keeper.Keeper, -) simtypes.Operation { - return func(r *rand.Rand, app *baseapp.BaseApp, ctx sdk.Context, accs []simtypes.Account, chainID string, - ) (simtypes.OperationMsg, []simtypes.FutureOperation, error) { - simAccount, _ := simtypes.RandomAcc(r, accs) - msg := &types.MsgAddKeys{ - Creator: simAccount.Address.String(), - } - - // TODO: Handling the AddKeys simulation - - return simtypes.NoOpMsg(types.ModuleName, msg.Type(), "AddKeys simulation not implemented"), nil, nil - } -} diff --git a/x/projects/simulation/del_project_keys.go b/x/projects/simulation/del_project_keys.go deleted file mode 100644 index 272b112a64..0000000000 --- a/x/projects/simulation/del_project_keys.go +++ /dev/null @@ -1,27 +0,0 @@ -package simulation - -import ( - "math/rand" - - "github.com/cosmos/cosmos-sdk/baseapp" - sdk "github.com/cosmos/cosmos-sdk/types" - simtypes "github.com/cosmos/cosmos-sdk/types/simulation" - "github.com/lavanet/lava/v4/x/projects/keeper" - "github.com/lavanet/lava/v4/x/projects/types" -) - -func SimulateMsgDelKeys( - k keeper.Keeper, -) simtypes.Operation { - return func(r *rand.Rand, app *baseapp.BaseApp, ctx sdk.Context, accs []simtypes.Account, chainID string, - ) (simtypes.OperationMsg, []simtypes.FutureOperation, error) { - simAccount, _ := simtypes.RandomAcc(r, accs) - msg := &types.MsgDelKeys{ - Creator: simAccount.Address.String(), - } - - // TODO: Handling the DelKeys simulation - - return simtypes.NoOpMsg(types.ModuleName, msg.Type(), "DelKeys simulation not implemented"), nil, nil - } -} diff --git a/x/projects/simulation/set_admin_policy.go b/x/projects/simulation/set_admin_policy.go deleted file mode 100644 index 90a10e8bc4..0000000000 --- a/x/projects/simulation/set_admin_policy.go +++ /dev/null @@ -1,27 +0,0 @@ -package simulation - -import ( - "math/rand" - - "github.com/cosmos/cosmos-sdk/baseapp" - sdk "github.com/cosmos/cosmos-sdk/types" - simtypes "github.com/cosmos/cosmos-sdk/types/simulation" - "github.com/lavanet/lava/v4/x/projects/keeper" - "github.com/lavanet/lava/v4/x/projects/types" -) - -func SimulateMsgSetPolicy( - k keeper.Keeper, -) simtypes.Operation { - return func(r *rand.Rand, app *baseapp.BaseApp, ctx sdk.Context, accs []simtypes.Account, chainID string, - ) (simtypes.OperationMsg, []simtypes.FutureOperation, error) { - simAccount, _ := simtypes.RandomAcc(r, accs) - msg := &types.MsgSetPolicy{ - Creator: simAccount.Address.String(), - } - - // TODO: Handling the SetPolicy simulation - - return simtypes.NoOpMsg(types.ModuleName, msg.Type(), "SetPolicy simulation not implemented"), nil, nil - } -} diff --git a/x/projects/simulation/set_subscription_policy.go b/x/projects/simulation/set_subscription_policy.go deleted file mode 100644 index 21fecf1405..0000000000 --- a/x/projects/simulation/set_subscription_policy.go +++ /dev/null @@ -1,27 +0,0 @@ -package simulation - -import ( - "math/rand" - - "github.com/cosmos/cosmos-sdk/baseapp" - sdk "github.com/cosmos/cosmos-sdk/types" - simtypes "github.com/cosmos/cosmos-sdk/types/simulation" - "github.com/lavanet/lava/v4/x/projects/keeper" - "github.com/lavanet/lava/v4/x/projects/types" -) - -func SimulateMsgSetSubscriptionPolicy( - k keeper.Keeper, -) simtypes.Operation { - return func(r *rand.Rand, app *baseapp.BaseApp, ctx sdk.Context, accs []simtypes.Account, chainID string, - ) (simtypes.OperationMsg, []simtypes.FutureOperation, error) { - simAccount, _ := simtypes.RandomAcc(r, accs) - msg := &types.MsgSetSubscriptionPolicy{ - Creator: simAccount.Address.String(), - } - - // TODO: Handling the SetSubscriptionPolicy simulation - - return simtypes.NoOpMsg(types.ModuleName, msg.Type(), "SetSubscriptionPolicy simulation not implemented"), nil, nil - } -} diff --git a/x/projects/simulation/simap.go b/x/projects/simulation/simap.go deleted file mode 100644 index 92c437c0d1..0000000000 --- a/x/projects/simulation/simap.go +++ /dev/null @@ -1,15 +0,0 @@ -package simulation - -import ( - sdk "github.com/cosmos/cosmos-sdk/types" - simtypes "github.com/cosmos/cosmos-sdk/types/simulation" -) - -// FindAccount find a specific address from an account list -func FindAccount(accs []simtypes.Account, address string) (simtypes.Account, bool) { - creator, err := sdk.AccAddressFromBech32(address) - if err != nil { - panic(err) - } - return simtypes.FindAccount(accs, creator) -} diff --git a/x/protocol/module_simulation.go b/x/protocol/module_simulation.go deleted file mode 100644 index 60a67b4d0f..0000000000 --- a/x/protocol/module_simulation.go +++ /dev/null @@ -1,79 +0,0 @@ -package protocol - -import ( - "math/rand" - - "github.com/cosmos/cosmos-sdk/testutil/sims" - types2 "github.com/cosmos/cosmos-sdk/x/auth/types" - - "github.com/cosmos/cosmos-sdk/baseapp" - sdk "github.com/cosmos/cosmos-sdk/types" - "github.com/cosmos/cosmos-sdk/types/module" - simtypes "github.com/cosmos/cosmos-sdk/types/simulation" - "github.com/cosmos/cosmos-sdk/x/simulation" - "github.com/lavanet/lava/v4/testutil/sample" - protocolsimulation "github.com/lavanet/lava/v4/x/protocol/simulation" - "github.com/lavanet/lava/v4/x/protocol/types" -) - -// avoid unused import issue -var ( - _ = sample.AccAddress - _ = protocolsimulation.FindAccount - _ = sims.StakePerAccount - _ = simulation.MsgEntryKind - _ = baseapp.Paramspace -) - -const ( -// this line is used by starport scaffolding # simapp/module/const -) - -// GenerateGenesisState creates a randomized GenState of the module -func (AppModule) GenerateGenesisState(simState *module.SimulationState) { - accs := make([]string, len(simState.Accounts)) - for i, acc := range simState.Accounts { - accs[i] = acc.Address.String() - } - protocolGenesis := types.GenesisState{ - Params: types.DefaultParams(), - // this line is used by starport scaffolding # simapp/module/genesisState - } - simState.GenState[types.ModuleName] = simState.Cdc.MustMarshalJSON(&protocolGenesis) -} - -// ProposalContents doesn't return any content functions for governance proposals -func (AppModule) ProposalContents(_ module.SimulationState) []simtypes.WeightedProposalMsg { - return nil -} - -// TODO: Add weighted proposals -func (AppModule) ProposalMsgs(_ module.SimulationState) []simtypes.WeightedProposalMsg { - return []simtypes.WeightedProposalMsg{ - simulation.NewWeightedProposalMsg("op_weight_msg_update_params", 100, func(r *rand.Rand, ctx sdk.Context, accs []simtypes.Account) sdk.Msg { - return &types2.MsgUpdateParams{} - }), - } -} - -//// RandomizedParams creates randomized param changes for the simulator -// func (am AppModule) RandomizedParams(_ *rand.Rand) []simtypes.ParamChange { -// protocolParams := types.DefaultParams() -// return []simtypes.ParamChange{ -// simulation.NewSimParamChange(types.ModuleName, string(types.KeyVersion), func(r *rand.Rand) string { -// return string(types.Amino.MustMarshalJSON(protocolParams.Version)) -// }), -// } -// } - -// RegisterStoreDecoder registers a decoder -func (am AppModule) RegisterStoreDecoder(_ sdk.StoreDecoderRegistry) {} - -// WeightedOperations returns the all the gov module operations with their respective weights. -func (am AppModule) WeightedOperations(simState module.SimulationState) []simtypes.WeightedOperation { - operations := make([]simtypes.WeightedOperation, 0) - - // this line is used by starport scaffolding # simapp/module/operation - - return operations -} diff --git a/x/protocol/simulation/simap.go b/x/protocol/simulation/simap.go deleted file mode 100644 index 92c437c0d1..0000000000 --- a/x/protocol/simulation/simap.go +++ /dev/null @@ -1,15 +0,0 @@ -package simulation - -import ( - sdk "github.com/cosmos/cosmos-sdk/types" - simtypes "github.com/cosmos/cosmos-sdk/types/simulation" -) - -// FindAccount find a specific address from an account list -func FindAccount(accs []simtypes.Account, address string) (simtypes.Account, bool) { - creator, err := sdk.AccAddressFromBech32(address) - if err != nil { - panic(err) - } - return simtypes.FindAccount(accs, creator) -} diff --git a/x/protocol/types/params.go b/x/protocol/types/params.go index 772f2ab9e5..cc76214289 100644 --- a/x/protocol/types/params.go +++ b/x/protocol/types/params.go @@ -12,7 +12,7 @@ import ( var _ paramtypes.ParamSet = (*Params)(nil) const ( - TARGET_VERSION = "4.1.0" + TARGET_VERSION = "4.2.1" MIN_VERSION = "3.1.0" ) diff --git a/x/rewards/module_simulation.go b/x/rewards/module_simulation.go deleted file mode 100644 index a5f6613792..0000000000 --- a/x/rewards/module_simulation.go +++ /dev/null @@ -1,56 +0,0 @@ -package rewards - -import ( - "github.com/cosmos/cosmos-sdk/baseapp" - "github.com/cosmos/cosmos-sdk/testutil/sims" - sdk "github.com/cosmos/cosmos-sdk/types" - "github.com/cosmos/cosmos-sdk/types/module" - simtypes "github.com/cosmos/cosmos-sdk/types/simulation" - "github.com/cosmos/cosmos-sdk/x/simulation" - "github.com/lavanet/lava/v4/testutil/sample" - rewardssimulation "github.com/lavanet/lava/v4/x/rewards/simulation" - "github.com/lavanet/lava/v4/x/rewards/types" -) - -// avoid unused import issue -var ( - _ = sample.AccAddress - _ = rewardssimulation.FindAccount - _ = sims.StakePerAccount - _ = simulation.MsgEntryKind - _ = baseapp.Paramspace -) - -const ( -// this line is used by starport scaffolding # simapp/module/const -) - -// GenerateGenesisState creates a randomized GenState of the module -func (AppModule) GenerateGenesisState(simState *module.SimulationState) { - accs := make([]string, len(simState.Accounts)) - for i, acc := range simState.Accounts { - accs[i] = acc.Address.String() - } - rewardsGenesis := types.GenesisState{ - Params: types.DefaultParams(), - // this line is used by starport scaffolding # simapp/module/genesisState - } - simState.GenState[types.ModuleName] = simState.Cdc.MustMarshalJSON(&rewardsGenesis) -} - -// ProposalContents doesn't return any content functions for governance proposals -func (AppModule) ProposalContents(_ module.SimulationState) []simtypes.WeightedProposalMsg { - return nil -} - -// RegisterStoreDecoder registers a decoder -func (am AppModule) RegisterStoreDecoder(_ sdk.StoreDecoderRegistry) {} - -// WeightedOperations returns the all the gov module operations with their respective weights. -func (am AppModule) WeightedOperations(simState module.SimulationState) []simtypes.WeightedOperation { - operations := make([]simtypes.WeightedOperation, 0) - - // this line is used by starport scaffolding # simapp/module/operation - - return operations -} diff --git a/x/rewards/simulation/simap.go b/x/rewards/simulation/simap.go deleted file mode 100644 index 92c437c0d1..0000000000 --- a/x/rewards/simulation/simap.go +++ /dev/null @@ -1,15 +0,0 @@ -package simulation - -import ( - sdk "github.com/cosmos/cosmos-sdk/types" - simtypes "github.com/cosmos/cosmos-sdk/types/simulation" -) - -// FindAccount find a specific address from an account list -func FindAccount(accs []simtypes.Account, address string) (simtypes.Account, bool) { - creator, err := sdk.AccAddressFromBech32(address) - if err != nil { - panic(err) - } - return simtypes.FindAccount(accs, creator) -} diff --git a/x/spec/README.md b/x/spec/README.md index 6f43a1f00d..9a6f09bd2f 100644 --- a/x/spec/README.md +++ b/x/spec/README.md @@ -53,8 +53,8 @@ type Spec struct { BlockLastUpdated uint64 // the last block this spec was updated on chain ReliabilityThreshold uint32 // this determines the probability of data reliability checks by the consumer DataReliabilityEnabled bool // enables/disables data reliability for the chain - BlockDistanceForFinalizedData uint32 - BlocksInFinalizationProof uint32 + BlockDistanceForFinalizedData uint32 // number of finalized blocks a provider keeps for data reliability + BlocksInFinalizationProof uint32 // number of blocks for finalization } ``` `Coin` type is from Cosmos-SDK (`cosmos.base.v1beta1.Coin`). @@ -157,9 +157,9 @@ This struct defines properties of an api. ```go type SpecCategory struct { Deterministic bool // if this api have the same response across nodes - Local bool // TBD + Local bool // specific to the local node (like node info query) Subscription bool // subscription base api - Stateful uint32 // TBD + Stateful uint32 // true for transaction APIs HangingApi bool // marks this api with longer timeout } ``` diff --git a/x/spec/ante/ante_test.go b/x/spec/ante/ante_test.go index bc90cce63c..1080918ce1 100644 --- a/x/spec/ante/ante_test.go +++ b/x/spec/ante/ante_test.go @@ -178,9 +178,8 @@ func TestNewExpeditedProposalFilterAnteDecorator(t *testing.T) { } for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { + t.Parallel() k, ctx := specutils.SpecKeeper(t) params := spectypes.DefaultParams() params.AllowlistedExpeditedMsgs = []string{ diff --git a/x/spec/client/cli/query_spec_test.go b/x/spec/client/cli/query_spec_test.go index cf648aba25..5c187a7762 100644 --- a/x/spec/client/cli/query_spec_test.go +++ b/x/spec/client/cli/query_spec_test.go @@ -70,7 +70,6 @@ func TestShowSpec(t *testing.T) { err: status.Error(codes.InvalidArgument, "not found"), }, } { - tc := tc t.Run(tc.desc, func(t *testing.T) { args := []string{ tc.idIndex, diff --git a/x/spec/keeper/migrations.go b/x/spec/keeper/migrations.go index f39fdc02f5..d9126c23ad 100644 --- a/x/spec/keeper/migrations.go +++ b/x/spec/keeper/migrations.go @@ -1,8 +1,6 @@ package keeper import ( - "strings" - "github.com/cosmos/cosmos-sdk/store/prefix" sdk "github.com/cosmos/cosmos-sdk/types" "github.com/lavanet/lava/v4/x/spec/types" @@ -17,15 +15,6 @@ func NewMigrator(keeper Keeper) Migrator { return Migrator{keeper: keeper} } -func (m Migrator) Migrate2to3(ctx sdk.Context) error { - specs := m.keeper.GetAllSpec(ctx) - for _, spec := range specs { - spec.Name = strings.ToLower(spec.Name) - m.keeper.SetSpec(ctx, spec) - } - return nil -} - func (m Migrator) Migrate3to4(ctx sdk.Context) error { store := prefix.NewStore(ctx.KVStore(m.keeper.storeKey), types.KeyPrefix(types.SpecKeyPrefix)) iterator := sdk.KVStorePrefixIterator(store, []byte{}) diff --git a/x/spec/keeper/spec_test.go b/x/spec/keeper/spec_test.go index 1fda3f3e03..49ccedf14b 100644 --- a/x/spec/keeper/spec_test.go +++ b/x/spec/keeper/spec_test.go @@ -819,48 +819,88 @@ func TestCookbookSpecs(t *testing.T) { ts := newTester(t) getToTopMostPath := "../../.././cookbook/specs/" - // base specs needs to be proposed first - baseSpecs := []string{"ibc.json", "tendermint.json", "ethermint.json", "cosmoswasm.json", "cosmossdk.json", "cosmossdk_45.json", "cosmossdk_full.json", "ethereum.json", "solana.json"} - Specs, err := getAllFilesInDirectory(getToTopMostPath) + specsFiles, err := getAllFilesInDirectory(getToTopMostPath) require.NoError(t, err) - // remove the base specs so there wont be a duplicate - Specs = removeSetFromSet(baseSpecs, Specs) - Specs = append(baseSpecs, Specs...) - for _, fileName := range Specs { - proposal := utils.SpecAddProposalJSON{} + // Sort specs by hierarchy - specs that are imported by others should come first + specImports := make(map[string][]string) + specProposals := make(map[string]types.Spec) + // First read all spec contents + for _, fileName := range specsFiles { contents, err := os.ReadFile(getToTopMostPath + fileName) require.NoError(t, err) + // Parse imports from spec + var proposal utils.SpecAddProposalJSON decoder := json.NewDecoder(bytes.NewReader(contents)) decoder.DisallowUnknownFields() // This will make the unmarshal fail if there are unused fields err = decoder.Decode(&proposal) require.NoError(t, err, fileName) - for _, sp := range proposal.Proposal.Specs { - ts.setSpec(sp) - fullspec, err := ts.expandSpec(sp) - require.NoError(t, err) - require.NotNil(t, fullspec) - verifications := []*types.Verification{} - for _, apiCol := range fullspec.ApiCollections { - for _, verification := range apiCol.Verifications { - require.NotNil(t, verification.ParseDirective) - if verification.ParseDirective.FunctionTag == types.FUNCTION_TAG_VERIFICATION { - require.NotEqual(t, "", verification.ParseDirective.ApiName) - } - } - verifications = append(verifications, apiCol.Verifications...) + imports := []string{} + for _, spec := range proposal.Proposal.Specs { + if spec.Imports != nil { + imports = append(imports, spec.Imports...) } - if fullspec.Enabled { - // all specs need to have verifications - require.Greater(t, len(verifications), 0, fullspec.Index) + specImports[spec.Index] = imports + specProposals[spec.Index] = spec + } + } + + // Topologically sort based on imports + var sortedSpecs []string + visited := make(map[string]bool) + visiting := make(map[string]bool) + + var visit func(string) + visit = func(spec string) { + if visiting[spec] { + require.Fail(t, "Circular dependency detected") + } + if visited[spec] { + return + } + visiting[spec] = true + for _, imp := range specImports[spec] { + visit(imp) + } + visiting[spec] = false + visited[spec] = true + sortedSpecs = append(sortedSpecs, spec) + } + + for spec := range specImports { + if !visited[spec] { + visit(spec) + } + } + + for _, specName := range sortedSpecs { + sp, found := specProposals[specName] + require.True(t, found, specName) + + ts.setSpec(sp) + fullspec, err := ts.expandSpec(sp) + require.NoError(t, err) + require.NotNil(t, fullspec) + verifications := []*types.Verification{} + for _, apiCol := range fullspec.ApiCollections { + for _, verification := range apiCol.Verifications { + require.NotNil(t, verification.ParseDirective) + if verification.ParseDirective.FunctionTag == types.FUNCTION_TAG_VERIFICATION { + require.NotEqual(t, "", verification.ParseDirective.ApiName) + } } - _, err = fullspec.ValidateSpec(10000000) - require.NoError(t, err, sp.Name) + verifications = append(verifications, apiCol.Verifications...) + } + if fullspec.Enabled { + // all specs need to have verifications + require.Greater(t, len(verifications), 0, fullspec.Index) } + _, err = fullspec.ValidateSpec(10000000) + require.NoError(t, err, sp.Name) } } diff --git a/x/spec/module.go b/x/spec/module.go index 7fbe7731ce..d456e4313c 100644 --- a/x/spec/module.go +++ b/x/spec/module.go @@ -130,12 +130,6 @@ func (am AppModule) RegisterServices(cfg module.Configurator) { migrator := keeper.NewMigrator(am.keeper) - // register v2 -> v3 migration - if err := cfg.RegisterMigration(types.ModuleName, 2, migrator.Migrate2to3); err != nil { - // panic:ok: at start up, migration cannot proceed anyhow - panic(fmt.Errorf("%s: failed to register migration to v3: %w", types.ModuleName, err)) - } - // register v3 -> v4 migration if err := cfg.RegisterMigration(types.ModuleName, 3, migrator.Migrate3to4); err != nil { // panic:ok: at start up, migration cannot proceed anyhow diff --git a/x/spec/module_simulation.go b/x/spec/module_simulation.go deleted file mode 100644 index 63eb972e93..0000000000 --- a/x/spec/module_simulation.go +++ /dev/null @@ -1,55 +0,0 @@ -package spec - -import ( - "github.com/cosmos/cosmos-sdk/baseapp" - "github.com/cosmos/cosmos-sdk/testutil/sims" - sdk "github.com/cosmos/cosmos-sdk/types" - "github.com/cosmos/cosmos-sdk/types/module" - simtypes "github.com/cosmos/cosmos-sdk/types/simulation" - "github.com/cosmos/cosmos-sdk/x/simulation" - "github.com/lavanet/lava/v4/testutil/sample" - specsimulation "github.com/lavanet/lava/v4/x/spec/simulation" - "github.com/lavanet/lava/v4/x/spec/types" -) - -// avoid unused import issue -var ( - _ = sample.AccAddress - _ = specsimulation.FindAccount - _ = sims.StakePerAccount - _ = simulation.MsgEntryKind - _ = baseapp.Paramspace -) - -const ( -// this line is used by starport scaffolding # simapp/module/const -) - -// GenerateGenesisState creates a randomized GenState of the module -func (AppModule) GenerateGenesisState(simState *module.SimulationState) { - accs := make([]string, len(simState.Accounts)) - for i, acc := range simState.Accounts { - accs[i] = acc.Address.String() - } - specGenesis := types.GenesisState{ - // this line is used by starport scaffolding # simapp/module/genesisState - } - simState.GenState[types.ModuleName] = simState.Cdc.MustMarshalJSON(&specGenesis) -} - -// ProposalContents doesn't return any content functions for governance proposals -func (AppModule) ProposalContents(_ module.SimulationState) []simtypes.WeightedProposalMsg { - return nil -} - -// RegisterStoreDecoder registers a decoder -func (am AppModule) RegisterStoreDecoder(_ sdk.StoreDecoderRegistry) {} - -// WeightedOperations returns the all the gov module operations with their respective weights. -func (am AppModule) WeightedOperations(simState module.SimulationState) []simtypes.WeightedOperation { - operations := make([]simtypes.WeightedOperation, 0) - - // this line is used by starport scaffolding # simapp/module/operation - - return operations -} diff --git a/x/spec/simulation/simap.go b/x/spec/simulation/simap.go deleted file mode 100644 index 92c437c0d1..0000000000 --- a/x/spec/simulation/simap.go +++ /dev/null @@ -1,15 +0,0 @@ -package simulation - -import ( - sdk "github.com/cosmos/cosmos-sdk/types" - simtypes "github.com/cosmos/cosmos-sdk/types/simulation" -) - -// FindAccount find a specific address from an account list -func FindAccount(accs []simtypes.Account, address string) (simtypes.Account, bool) { - creator, err := sdk.AccAddressFromBech32(address) - if err != nil { - panic(err) - } - return simtypes.FindAccount(accs, creator) -} diff --git a/x/subscription/keeper/migrations.go b/x/subscription/keeper/migrations.go index 22aebf0452..56168236ed 100644 --- a/x/subscription/keeper/migrations.go +++ b/x/subscription/keeper/migrations.go @@ -2,16 +2,10 @@ package keeper import ( "fmt" - "time" - "github.com/cosmos/cosmos-sdk/store/prefix" sdk "github.com/cosmos/cosmos-sdk/types" "github.com/lavanet/lava/v4/utils" - v2 "github.com/lavanet/lava/v4/x/subscription/migrations/v2" - v5 "github.com/lavanet/lava/v4/x/subscription/migrations/v5" - v6 "github.com/lavanet/lava/v4/x/subscription/migrations/v6" v8 "github.com/lavanet/lava/v4/x/subscription/migrations/v8" - "github.com/lavanet/lava/v4/x/subscription/types" ) type Migrator struct { @@ -22,168 +16,6 @@ func NewMigrator(keeper Keeper) Migrator { return Migrator{keeper: keeper} } -// Migrate2to3 implements store migration from v2 to v3: -// - Convert subscription store to fixation store and use timers -func (m Migrator) Migrate2to3(ctx sdk.Context) error { - utils.LavaFormatDebug("migrate: subscriptions") - - keeper := m.keeper - - store := prefix.NewStore( - ctx.KVStore(keeper.storeKey), - v2.KeyPrefix(v2.SubscriptionKeyPrefix), - ) - - iterator := sdk.KVStorePrefixIterator(store, []byte{}) - defer iterator.Close() - - for ; iterator.Valid(); iterator.Next() { - var sub_V2 v2.Subscription - keeper.cdc.MustUnmarshal(iterator.Value(), &sub_V2) - - utils.LavaFormatDebug("migrate:", - utils.Attribute{Key: "subscription", Value: sub_V2.Consumer}) - - sub_V3 := types.Subscription{ - Creator: sub_V2.Creator, - Consumer: sub_V2.Consumer, - Block: sub_V2.Block, - PlanIndex: sub_V2.PlanIndex, - PlanBlock: sub_V2.PlanBlock, - DurationTotal: sub_V2.DurationTotal, - DurationLeft: sub_V2.DurationLeft, - MonthExpiryTime: sub_V2.MonthExpiryTime, - MonthCuTotal: sub_V2.MonthCuTotal, - MonthCuLeft: sub_V2.MonthCuLeft, - } - - // each subscription entry in V2 store should have an entry in V3 store - err := keeper.subsFS.AppendEntry(ctx, sub_V3.Consumer, sub_V3.Block, &sub_V3) - if err != nil { - return fmt.Errorf("%w: subscriptions %s", err, sub_V3.Consumer) - } - - // if the subscription has expired, then delete the entry from V3 store to induce - // stale-period state (use the block of last expiry as the block for deletion). - // otherwise, the subscription is alive, but the current month may have expired - // between since the upgrade proposal took effect (and until now); if indeed so, - // then invoke advanceMonth() since the current block is the (month) expiry block. - // otherwise, set the timer for the monthly expiry as already was set in V2. - if sub_V3.DurationLeft > 0 { - expiry := sub_V2.MonthExpiryTime - if expiry <= uint64(ctx.BlockTime().UTC().Unix()) { - utils.LavaFormatDebug(" subscription live, month expired", - utils.Attribute{Key: "expiry", Value: time.Unix(int64(expiry), 0)}, - utils.Attribute{Key: "blockTime", Value: ctx.BlockTime().UTC()}, - ) - keeper.advanceMonth(ctx, []byte(sub_V3.Consumer)) - } else { - utils.LavaFormatDebug(" subscription live, future expiry", - utils.Attribute{Key: "expiry", Value: time.Unix(int64(expiry), 0)}, - utils.Attribute{Key: "blockTime", Value: ctx.BlockTime().UTC()}, - ) - keeper.subsTS.AddTimerByBlockTime(ctx, expiry, []byte(sub_V3.Consumer), []byte{}) - } - } else { - utils.LavaFormatDebug(" subscription deleted", - utils.Attribute{Key: "block", Value: sub_V2.PrevExpiryBlock}) - keeper.subsFS.DelEntry(ctx, sub_V3.Consumer, sub_V2.PrevExpiryBlock) - } - - store.Delete(iterator.Key()) - } - - return nil -} - -// Migrate3to4 implements store migration from v3 to v4: -// -- trigger fixation migration (v4->v5), initialize IsLatest field -func (m Migrator) Migrate3to4(ctx sdk.Context) error { - // This migration used to call a deprecated fixationstore function called MigrateVersionAndPrefix - - return nil -} - -// Migrate4to5 implements store migration from v4 to v5: -// -- rename the DurationTotal field to DurationBought -// -- introduce two new fields: DurationTotal (with new meaning) and cluster -// -- assign the subscription's cluster -func (m Migrator) Migrate4to5(ctx sdk.Context) error { - utils.LavaFormatDebug("migrate 4->5: subscriptions") - - keeper := m.keeper - - indices := keeper.subsFS.AllEntryIndicesFilter(ctx, "", nil) - for _, ind := range indices { - blocks := keeper.subsFS.GetAllEntryVersions(ctx, ind) - - for _, block := range blocks { - var sub_V5 v5.Subscription - keeper.subsFS.ReadEntry(ctx, ind, block, &sub_V5) - utils.LavaFormatDebug("migrate:", - utils.Attribute{Key: "subscription", Value: sub_V5.Consumer}) - - sub_V5.Cluster = v5.GetClusterKey(sub_V5) - - keeper.subsFS.ModifyEntry(ctx, ind, block, &sub_V5) - } - } - return nil -} - -// Migrate5to6 implements store migration from v5 to v6: -// -- find old subscriptions and trigger advance month to make them expire -func (m Migrator) Migrate5to6(ctx sdk.Context) error { - indices := m.keeper.GetAllSubscriptionsIndices(ctx) - currentTime := ctx.BlockTime().UTC().Unix() - for _, ind := range indices { - sub, found := m.keeper.GetSubscription(ctx, ind) - if !found { - utils.LavaFormatError("cannot migrate sub", fmt.Errorf("sub not found"), - utils.Attribute{Key: "sub", Value: sub}, - ) - } - - if sub.MonthExpiryTime < uint64(currentTime) { - m.keeper.advanceMonth(ctx, []byte(ind)) - } - } - - return nil -} - -// Migrate6to7 implements store migration from v6 to v7: -// -- if subscription's auto_renewal = true, set auto_renewal_next_plan to the current's subscription plan -func (m Migrator) Migrate6to7(ctx sdk.Context) error { - utils.LavaFormatDebug("migrate 6->7: subscriptions") - - for _, index := range m.keeper.subsFS.GetAllEntryIndices(ctx) { - for _, block := range m.keeper.subsFS.GetAllEntryVersions(ctx, index) { - var subscriptionV6 v6.Subscription - var subscriptionV7 types.Subscription - foundOld := m.keeper.subsFS.FindEntry(ctx, index, block, &subscriptionV6) - foundNew := m.keeper.subsFS.FindEntry(ctx, index, block, &subscriptionV7) - if !foundOld || !foundNew { - utils.LavaFormatError("cannot migrate sub", fmt.Errorf("sub not found"), - utils.Attribute{Key: "index", Value: index}, - utils.Attribute{Key: "block", Value: block}, - ) - continue - } - - if subscriptionV6.AutoRenewal { - subscriptionV7.AutoRenewalNextPlan = subscriptionV7.PlanIndex - } else { - subscriptionV7.AutoRenewalNextPlan = types.AUTO_RENEWAL_PLAN_NONE - } - - m.keeper.subsFS.ModifyEntry(ctx, index, block, &subscriptionV7) - } - } - - return nil -} - // Migrate7to8 implements store migration from v7 to v8: // init new credit field func (m Migrator) Migrate7to8(ctx sdk.Context) error { diff --git a/x/subscription/migrations/v2/keys.go b/x/subscription/migrations/v2/keys.go deleted file mode 100644 index c2d634384d..0000000000 --- a/x/subscription/migrations/v2/keys.go +++ /dev/null @@ -1,10 +0,0 @@ -package types - -const ( - // SubscriptionKeyPrefix is the prefix to retrieve all Subscription - SubscriptionKeyPrefix = "Subscribe/value/" -) - -func KeyPrefix(p string) []byte { - return []byte(p) -} diff --git a/x/subscription/migrations/v2/subscription.pb.go b/x/subscription/migrations/v2/subscription.pb.go deleted file mode 100644 index 6ff3bdcf4a..0000000000 --- a/x/subscription/migrations/v2/subscription.pb.go +++ /dev/null @@ -1,744 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: subscription/subscription.proto - -package types - -import ( - fmt "fmt" - proto "github.com/gogo/protobuf/proto" - io "io" - math "math" - math_bits "math/bits" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -type Subscription struct { - Creator string `protobuf:"bytes,1,opt,name=creator,proto3" json:"creator,omitempty"` - Consumer string `protobuf:"bytes,2,opt,name=consumer,proto3" json:"consumer,omitempty"` - Block uint64 `protobuf:"varint,3,opt,name=block,proto3" json:"block,omitempty"` - PlanIndex string `protobuf:"bytes,4,opt,name=plan_index,json=planIndex,proto3" json:"plan_index,omitempty"` - PlanBlock uint64 `protobuf:"varint,5,opt,name=plan_block,json=planBlock,proto3" json:"plan_block,omitempty"` - DurationTotal uint64 `protobuf:"varint,6,opt,name=duration_total,json=durationTotal,proto3" json:"duration_total,omitempty"` - DurationLeft uint64 `protobuf:"varint,7,opt,name=duration_left,json=durationLeft,proto3" json:"duration_left,omitempty"` - MonthExpiryTime uint64 `protobuf:"varint,8,opt,name=month_expiry_time,json=monthExpiryTime,proto3" json:"month_expiry_time,omitempty"` - PrevExpiryBlock uint64 `protobuf:"varint,9,opt,name=prev_expiry_block,json=prevExpiryBlock,proto3" json:"prev_expiry_block,omitempty"` - MonthCuTotal uint64 `protobuf:"varint,10,opt,name=month_cu_total,json=monthCuTotal,proto3" json:"month_cu_total,omitempty"` - MonthCuLeft uint64 `protobuf:"varint,11,opt,name=month_cu_left,json=monthCuLeft,proto3" json:"month_cu_left,omitempty"` - PrevCuLeft uint64 `protobuf:"varint,12,opt,name=prev_cu_left,json=prevCuLeft,proto3" json:"prev_cu_left,omitempty"` -} - -func (m *Subscription) Reset() { *m = Subscription{} } -func (m *Subscription) String() string { return proto.CompactTextString(m) } -func (*Subscription) ProtoMessage() {} -func (*Subscription) Descriptor() ([]byte, []int) { - return fileDescriptor_ac47bc0f89224537, []int{0} -} -func (m *Subscription) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Subscription) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Subscription.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Subscription) XXX_Merge(src proto.Message) { - xxx_messageInfo_Subscription.Merge(m, src) -} -func (m *Subscription) XXX_Size() int { - return m.Size() -} -func (m *Subscription) XXX_DiscardUnknown() { - xxx_messageInfo_Subscription.DiscardUnknown(m) -} - -var xxx_messageInfo_Subscription proto.InternalMessageInfo - -func (m *Subscription) GetCreator() string { - if m != nil { - return m.Creator - } - return "" -} - -func (m *Subscription) GetConsumer() string { - if m != nil { - return m.Consumer - } - return "" -} - -func (m *Subscription) GetBlock() uint64 { - if m != nil { - return m.Block - } - return 0 -} - -func (m *Subscription) GetPlanIndex() string { - if m != nil { - return m.PlanIndex - } - return "" -} - -func (m *Subscription) GetPlanBlock() uint64 { - if m != nil { - return m.PlanBlock - } - return 0 -} - -func (m *Subscription) GetDurationTotal() uint64 { - if m != nil { - return m.DurationTotal - } - return 0 -} - -func (m *Subscription) GetDurationLeft() uint64 { - if m != nil { - return m.DurationLeft - } - return 0 -} - -func (m *Subscription) GetMonthExpiryTime() uint64 { - if m != nil { - return m.MonthExpiryTime - } - return 0 -} - -func (m *Subscription) GetPrevExpiryBlock() uint64 { - if m != nil { - return m.PrevExpiryBlock - } - return 0 -} - -func (m *Subscription) GetMonthCuTotal() uint64 { - if m != nil { - return m.MonthCuTotal - } - return 0 -} - -func (m *Subscription) GetMonthCuLeft() uint64 { - if m != nil { - return m.MonthCuLeft - } - return 0 -} - -func (m *Subscription) GetPrevCuLeft() uint64 { - if m != nil { - return m.PrevCuLeft - } - return 0 -} - -func init() { - proto.RegisterType((*Subscription)(nil), "lavanet.lava.subscription.Subscription_V2") -} - -func init() { proto.RegisterFile("subscription/subscription.proto", fileDescriptor_ac47bc0f89224537) } - -var fileDescriptor_ac47bc0f89224537 = []byte{ - // 353 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x54, 0x92, 0xcf, 0x4a, 0xeb, 0x40, - 0x14, 0xc6, 0x9b, 0xdb, 0xff, 0xa7, 0x69, 0x2f, 0x77, 0xb8, 0x8b, 0x51, 0x30, 0x96, 0xaa, 0x50, - 0x44, 0x92, 0x85, 0x6f, 0x50, 0x51, 0x10, 0x5c, 0xd5, 0xae, 0xdc, 0x84, 0x24, 0x9d, 0xda, 0xc1, - 0x24, 0x13, 0x26, 0x93, 0xd2, 0xbe, 0x85, 0x8f, 0xe1, 0xa3, 0xb8, 0xec, 0xd2, 0xa5, 0xb4, 0x2f, - 0x22, 0x73, 0xa6, 0x09, 0x75, 0x35, 0x9c, 0xdf, 0xf7, 0x3b, 0x99, 0x0f, 0x32, 0x70, 0x9e, 0x17, - 0x61, 0x1e, 0x49, 0x9e, 0x29, 0x2e, 0x52, 0xef, 0x78, 0x70, 0x33, 0x29, 0x94, 0x20, 0x27, 0x71, - 0xb0, 0x0a, 0x52, 0xa6, 0x5c, 0x7d, 0xba, 0xc7, 0xc2, 0xe8, 0xa3, 0x0e, 0xf6, 0xf3, 0x11, 0x20, - 0x14, 0xda, 0x91, 0x64, 0x81, 0x12, 0x92, 0x5a, 0x43, 0x6b, 0xdc, 0x9d, 0x96, 0x23, 0x39, 0x85, - 0x4e, 0x24, 0xd2, 0xbc, 0x48, 0x98, 0xa4, 0x7f, 0x30, 0xaa, 0x66, 0xf2, 0x1f, 0x9a, 0x61, 0x2c, - 0xa2, 0x37, 0x5a, 0x1f, 0x5a, 0xe3, 0xc6, 0xd4, 0x0c, 0xe4, 0x0c, 0x20, 0x8b, 0x83, 0xd4, 0xe7, - 0xe9, 0x9c, 0xad, 0x69, 0x03, 0x77, 0xba, 0x9a, 0x3c, 0x6a, 0x50, 0xc5, 0x66, 0xb3, 0x89, 0x9b, - 0x18, 0x4f, 0x70, 0xfb, 0x0a, 0x06, 0xf3, 0x42, 0x06, 0xba, 0x95, 0xaf, 0x84, 0x0a, 0x62, 0xda, - 0x42, 0xa5, 0x5f, 0xd2, 0x99, 0x86, 0xe4, 0x02, 0x2a, 0xe0, 0xc7, 0x6c, 0xa1, 0x68, 0x1b, 0x2d, - 0xbb, 0x84, 0x4f, 0x6c, 0xa1, 0xc8, 0x35, 0xfc, 0x4b, 0x44, 0xaa, 0x96, 0x3e, 0x5b, 0x67, 0x5c, - 0x6e, 0x7c, 0xc5, 0x13, 0x46, 0x3b, 0x28, 0xfe, 0xc5, 0xe0, 0x1e, 0xf9, 0x8c, 0x27, 0x4c, 0xbb, - 0x99, 0x64, 0xab, 0x52, 0x35, 0xed, 0xba, 0xc6, 0xd5, 0x81, 0x51, 0x4d, 0xc7, 0x4b, 0x18, 0x98, - 0xef, 0x46, 0xc5, 0xa1, 0x23, 0x98, 0xdb, 0x91, 0xde, 0x15, 0xa6, 0xe2, 0x08, 0xfa, 0x95, 0x85, - 0x15, 0x7b, 0x28, 0xf5, 0x0e, 0x12, 0x36, 0x1c, 0x82, 0x8d, 0xb7, 0x96, 0x8a, 0x8d, 0x0a, 0x68, - 0x66, 0x8c, 0xc9, 0xc3, 0xe7, 0xce, 0xb1, 0xb6, 0x3b, 0xc7, 0xfa, 0xde, 0x39, 0xd6, 0xfb, 0xde, - 0xa9, 0x6d, 0xf7, 0x4e, 0xed, 0x6b, 0xef, 0xd4, 0x5e, 0x6e, 0x5e, 0xb9, 0x5a, 0x16, 0xa1, 0x1b, - 0x89, 0xc4, 0x3b, 0xfc, 0x6a, 0x3c, 0xbd, 0xf5, 0xaf, 0xd7, 0xe0, 0xa9, 0x4d, 0xc6, 0xf2, 0xb0, - 0x85, 0x8f, 0xe2, 0xf6, 0x27, 0x00, 0x00, 0xff, 0xff, 0xd7, 0x6e, 0x85, 0xd9, 0x37, 0x02, 0x00, - 0x00, -} - -func (m *Subscription) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Subscription) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Subscription) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.PrevCuLeft != 0 { - i = encodeVarintSubscription(dAtA, i, uint64(m.PrevCuLeft)) - i-- - dAtA[i] = 0x60 - } - if m.MonthCuLeft != 0 { - i = encodeVarintSubscription(dAtA, i, uint64(m.MonthCuLeft)) - i-- - dAtA[i] = 0x58 - } - if m.MonthCuTotal != 0 { - i = encodeVarintSubscription(dAtA, i, uint64(m.MonthCuTotal)) - i-- - dAtA[i] = 0x50 - } - if m.PrevExpiryBlock != 0 { - i = encodeVarintSubscription(dAtA, i, uint64(m.PrevExpiryBlock)) - i-- - dAtA[i] = 0x48 - } - if m.MonthExpiryTime != 0 { - i = encodeVarintSubscription(dAtA, i, uint64(m.MonthExpiryTime)) - i-- - dAtA[i] = 0x40 - } - if m.DurationLeft != 0 { - i = encodeVarintSubscription(dAtA, i, uint64(m.DurationLeft)) - i-- - dAtA[i] = 0x38 - } - if m.DurationTotal != 0 { - i = encodeVarintSubscription(dAtA, i, uint64(m.DurationTotal)) - i-- - dAtA[i] = 0x30 - } - if m.PlanBlock != 0 { - i = encodeVarintSubscription(dAtA, i, uint64(m.PlanBlock)) - i-- - dAtA[i] = 0x28 - } - if len(m.PlanIndex) > 0 { - i -= len(m.PlanIndex) - copy(dAtA[i:], m.PlanIndex) - i = encodeVarintSubscription(dAtA, i, uint64(len(m.PlanIndex))) - i-- - dAtA[i] = 0x22 - } - if m.Block != 0 { - i = encodeVarintSubscription(dAtA, i, uint64(m.Block)) - i-- - dAtA[i] = 0x18 - } - if len(m.Consumer) > 0 { - i -= len(m.Consumer) - copy(dAtA[i:], m.Consumer) - i = encodeVarintSubscription(dAtA, i, uint64(len(m.Consumer))) - i-- - dAtA[i] = 0x12 - } - if len(m.Creator) > 0 { - i -= len(m.Creator) - copy(dAtA[i:], m.Creator) - i = encodeVarintSubscription(dAtA, i, uint64(len(m.Creator))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func encodeVarintSubscription(dAtA []byte, offset int, v uint64) int { - offset -= sovSubscription(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *Subscription) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Creator) - if l > 0 { - n += 1 + l + sovSubscription(uint64(l)) - } - l = len(m.Consumer) - if l > 0 { - n += 1 + l + sovSubscription(uint64(l)) - } - if m.Block != 0 { - n += 1 + sovSubscription(uint64(m.Block)) - } - l = len(m.PlanIndex) - if l > 0 { - n += 1 + l + sovSubscription(uint64(l)) - } - if m.PlanBlock != 0 { - n += 1 + sovSubscription(uint64(m.PlanBlock)) - } - if m.DurationTotal != 0 { - n += 1 + sovSubscription(uint64(m.DurationTotal)) - } - if m.DurationLeft != 0 { - n += 1 + sovSubscription(uint64(m.DurationLeft)) - } - if m.MonthExpiryTime != 0 { - n += 1 + sovSubscription(uint64(m.MonthExpiryTime)) - } - if m.PrevExpiryBlock != 0 { - n += 1 + sovSubscription(uint64(m.PrevExpiryBlock)) - } - if m.MonthCuTotal != 0 { - n += 1 + sovSubscription(uint64(m.MonthCuTotal)) - } - if m.MonthCuLeft != 0 { - n += 1 + sovSubscription(uint64(m.MonthCuLeft)) - } - if m.PrevCuLeft != 0 { - n += 1 + sovSubscription(uint64(m.PrevCuLeft)) - } - return n -} - -func sovSubscription(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozSubscription(x uint64) (n int) { - return sovSubscription(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *Subscription) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSubscription - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Subscription: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Subscription: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Creator", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSubscription - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthSubscription - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthSubscription - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Creator = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Consumer", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSubscription - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthSubscription - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthSubscription - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Consumer = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Block", wireType) - } - m.Block = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSubscription - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Block |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PlanIndex", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSubscription - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthSubscription - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthSubscription - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.PlanIndex = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field PlanBlock", wireType) - } - m.PlanBlock = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSubscription - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.PlanBlock |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 6: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field DurationTotal", wireType) - } - m.DurationTotal = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSubscription - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.DurationTotal |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 7: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field DurationLeft", wireType) - } - m.DurationLeft = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSubscription - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.DurationLeft |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 8: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field MonthExpiryTime", wireType) - } - m.MonthExpiryTime = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSubscription - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.MonthExpiryTime |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 9: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field PrevExpiryBlock", wireType) - } - m.PrevExpiryBlock = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSubscription - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.PrevExpiryBlock |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 10: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field MonthCuTotal", wireType) - } - m.MonthCuTotal = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSubscription - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.MonthCuTotal |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 11: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field MonthCuLeft", wireType) - } - m.MonthCuLeft = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSubscription - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.MonthCuLeft |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 12: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field PrevCuLeft", wireType) - } - m.PrevCuLeft = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSubscription - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.PrevCuLeft |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipSubscription(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthSubscription - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipSubscription(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowSubscription - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowSubscription - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowSubscription - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthSubscription - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupSubscription - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthSubscription - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthSubscription = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowSubscription = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupSubscription = fmt.Errorf("proto: unexpected end of group") -) diff --git a/x/subscription/migrations/v5/cluster.go b/x/subscription/migrations/v5/cluster.go deleted file mode 100644 index 2af2fd59d4..0000000000 --- a/x/subscription/migrations/v5/cluster.go +++ /dev/null @@ -1,44 +0,0 @@ -package v5 - -// QoS clustering divides the QoS monitoring into a discrete set of clusters -// such that QoS is maintained separately for each Provider x Cluster. -// -// The clusters are determined based on certain subscription owner properties, -// such as past or recent activity (e.g. aggregate subscription periods), the -// current plan used, etc. Each consumer (subscription owner or project developer) -// QoS report about some provider will be considered only in the cluster matching -// that consumer’s properties. During pairing selection for a particular consumer, -// the QoS data for the pairing calculation will be taken from the cluster matching -// that consumer’s properties. -// Cluster assignment is updated when a subscription renews (every month). - -// To add a new cluster criterion, update the Cluster struct, create an array with -// the criterion values (like PLAN_CRITERION) and add it to ConstructAllClusters() -// -// All clusters: -// 1. For each plan (except "free") a cluster for each subUsage -// 2. "free" cluster (without regarding subUsage) - -import "strconv" - -const FREE_PLAN = "free" // gets its own const because it's treated differently - -func GetSubUsageCriterion(sub Subscription) uint64 { - switch { - case sub.DurationTotal == 0: - return 0 - case sub.DurationTotal > 6: - return 7 - default: - return 6 - } -} - -// GetClusterKey returns the subscription's best-fit cluster -func GetClusterKey(sub Subscription) string { - if sub.PlanIndex == FREE_PLAN { - return FREE_PLAN - } - - return sub.PlanIndex + "_" + strconv.FormatUint(GetSubUsageCriterion(sub), 10) -} diff --git a/x/subscription/migrations/v5/keys.go b/x/subscription/migrations/v5/keys.go deleted file mode 100644 index fa1c4b93c9..0000000000 --- a/x/subscription/migrations/v5/keys.go +++ /dev/null @@ -1,24 +0,0 @@ -package v5 - -const ( - // ModuleName defines the module name - ModuleName = "subscription" - - // StoreKey defines the primary module store key - StoreKey = ModuleName - - // RouterKey is the message route for slashing - RouterKey = ModuleName - - // QuerierRoute defines the module's query routing key - QuerierRoute = ModuleName - - // MemStoreKey defines the in-memory store key - MemStoreKey = "mem_subscription" - - // prefix for the subscription fixation store - SubsFixationPrefix = "subs-fs" - - // prefix for the subscription fixation store - SubsTimerPrefix = "subs-ts" -) diff --git a/x/subscription/migrations/v5/params.go b/x/subscription/migrations/v5/params.go deleted file mode 100644 index 246e2a01a5..0000000000 --- a/x/subscription/migrations/v5/params.go +++ /dev/null @@ -1,39 +0,0 @@ -package v5 - -import ( - paramtypes "github.com/cosmos/cosmos-sdk/x/params/types" - "gopkg.in/yaml.v2" -) - -var _ paramtypes.ParamSet = (*Params)(nil) - -// ParamKeyTable the param key table for launch module -func ParamKeyTable() paramtypes.KeyTable { - return paramtypes.NewKeyTable().RegisterParamSet(&Params{}) -} - -// NewParams creates a new Params instance -func NewParams() Params { - return Params{} -} - -// DefaultParams returns a default set of parameters -func DefaultParams() Params { - return NewParams() -} - -// ParamSetPairs get the params.ParamSet -func (p *Params) ParamSetPairs() paramtypes.ParamSetPairs { - return paramtypes.ParamSetPairs{} -} - -// Validate validates the set of params -func (p Params) Validate() error { - return nil -} - -// String implements the Stringer interface. -func (p Params) String() string { - out, _ := yaml.Marshal(p) - return string(out) -} diff --git a/x/subscription/migrations/v5/params.pb.go b/x/subscription/migrations/v5/params.pb.go deleted file mode 100644 index 7d2d1487b7..0000000000 --- a/x/subscription/migrations/v5/params.pb.go +++ /dev/null @@ -1,266 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: lavanet/lava/subscription/params.proto - -package v5 - -import ( - fmt "fmt" - _ "github.com/cosmos/gogoproto/gogoproto" - proto "github.com/cosmos/gogoproto/proto" - io "io" - math "math" - math_bits "math/bits" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -// Params defines the parameters for the module. -type Params struct { -} - -func (m *Params) Reset() { *m = Params{} } -func (*Params) ProtoMessage() {} -func (*Params) Descriptor() ([]byte, []int) { - return fileDescriptor_8b1e38ca40b9ef74, []int{0} -} -func (m *Params) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Params) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Params.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Params) XXX_Merge(src proto.Message) { - xxx_messageInfo_Params.Merge(m, src) -} -func (m *Params) XXX_Size() int { - return m.Size() -} -func (m *Params) XXX_DiscardUnknown() { - xxx_messageInfo_Params.DiscardUnknown(m) -} - -var xxx_messageInfo_Params proto.InternalMessageInfo - -func init() { - proto.RegisterType((*Params)(nil), "lavanet.lava.subscription.ParamsV5") -} - -func init() { - proto.RegisterFile("lavanet/lava/subscription/params.proto", fileDescriptor_8b1e38ca40b9ef74) -} - -var fileDescriptor_8b1e38ca40b9ef74 = []byte{ - // 154 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x52, 0xcb, 0x49, 0x2c, 0x4b, - 0xcc, 0x4b, 0x2d, 0xd1, 0x07, 0xd1, 0xfa, 0xc5, 0xa5, 0x49, 0xc5, 0xc9, 0x45, 0x99, 0x05, 0x25, - 0x99, 0xf9, 0x79, 0xfa, 0x05, 0x89, 0x45, 0x89, 0xb9, 0xc5, 0x7a, 0x05, 0x45, 0xf9, 0x25, 0xf9, - 0x42, 0x92, 0x50, 0x75, 0x7a, 0x20, 0x5a, 0x0f, 0x59, 0x9d, 0x94, 0x48, 0x7a, 0x7e, 0x7a, 0x3e, - 0x58, 0x95, 0x3e, 0x88, 0x05, 0xd1, 0xa0, 0xc4, 0xc7, 0xc5, 0x16, 0x00, 0x36, 0xc0, 0x8a, 0x65, - 0xc6, 0x02, 0x79, 0x06, 0x27, 0xb7, 0x13, 0x8f, 0xe4, 0x18, 0x2f, 0x3c, 0x92, 0x63, 0x7c, 0xf0, - 0x48, 0x8e, 0x71, 0xc2, 0x63, 0x39, 0x86, 0x0b, 0x8f, 0xe5, 0x18, 0x6e, 0x3c, 0x96, 0x63, 0x88, - 0xd2, 0x49, 0xcf, 0x2c, 0xc9, 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x47, 0x71, 0x4d, 0x05, - 0xaa, 0x7b, 0x4a, 0x2a, 0x0b, 0x52, 0x8b, 0x93, 0xd8, 0xc0, 0xc6, 0x1b, 0x03, 0x02, 0x00, 0x00, - 0xff, 0xff, 0x51, 0x13, 0xf7, 0x58, 0xb9, 0x00, 0x00, 0x00, -} - -func (m *Params) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Params) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Params) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - return len(dAtA) - i, nil -} - -func encodeVarintParams(dAtA []byte, offset int, v uint64) int { - offset -= sovParams(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *Params) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - return n -} - -func sovParams(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozParams(x uint64) (n int) { - return sovParams(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *Params) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowParams - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Params: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Params: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skipParams(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthParams - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipParams(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowParams - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowParams - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowParams - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthParams - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupParams - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthParams - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthParams = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowParams = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupParams = fmt.Errorf("proto: unexpected end of group") -) diff --git a/x/subscription/migrations/v5/query.pb.go b/x/subscription/migrations/v5/query.pb.go deleted file mode 100644 index 9ba62c4de3..0000000000 --- a/x/subscription/migrations/v5/query.pb.go +++ /dev/null @@ -1,2154 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: lavanet/lava/subscription/query.proto - -package v5 - -import ( - context "context" - fmt "fmt" - _ "github.com/cosmos/cosmos-sdk/types/query" - _ "github.com/cosmos/gogoproto/gogoproto" - grpc1 "github.com/cosmos/gogoproto/grpc" - proto "github.com/cosmos/gogoproto/proto" - _ "google.golang.org/genproto/googleapis/api/annotations" - grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" - io "io" - math "math" - math_bits "math/bits" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -// QueryParamsRequest is request type for the Query/Params RPC method. -type QueryParamsRequest struct { -} - -func (m *QueryParamsRequest) Reset() { *m = QueryParamsRequest{} } -func (m *QueryParamsRequest) String() string { return proto.CompactTextString(m) } -func (*QueryParamsRequest) ProtoMessage() {} -func (*QueryParamsRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_e870698c9d8ccc09, []int{0} -} -func (m *QueryParamsRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *QueryParamsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_QueryParamsRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *QueryParamsRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_QueryParamsRequest.Merge(m, src) -} -func (m *QueryParamsRequest) XXX_Size() int { - return m.Size() -} -func (m *QueryParamsRequest) XXX_DiscardUnknown() { - xxx_messageInfo_QueryParamsRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_QueryParamsRequest proto.InternalMessageInfo - -// QueryParamsResponse is response type for the Query/Params RPC method. -type QueryParamsResponse struct { - // params holds all the parameters of this module. - Params Params `protobuf:"bytes,1,opt,name=params,proto3" json:"params"` -} - -func (m *QueryParamsResponse) Reset() { *m = QueryParamsResponse{} } -func (m *QueryParamsResponse) String() string { return proto.CompactTextString(m) } -func (*QueryParamsResponse) ProtoMessage() {} -func (*QueryParamsResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_e870698c9d8ccc09, []int{1} -} -func (m *QueryParamsResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *QueryParamsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_QueryParamsResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *QueryParamsResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_QueryParamsResponse.Merge(m, src) -} -func (m *QueryParamsResponse) XXX_Size() int { - return m.Size() -} -func (m *QueryParamsResponse) XXX_DiscardUnknown() { - xxx_messageInfo_QueryParamsResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_QueryParamsResponse proto.InternalMessageInfo - -func (m *QueryParamsResponse) GetParams() Params { - if m != nil { - return m.Params - } - return Params{} -} - -type QueryCurrentRequest struct { - Consumer string `protobuf:"bytes,1,opt,name=consumer,proto3" json:"consumer,omitempty"` -} - -func (m *QueryCurrentRequest) Reset() { *m = QueryCurrentRequest{} } -func (m *QueryCurrentRequest) String() string { return proto.CompactTextString(m) } -func (*QueryCurrentRequest) ProtoMessage() {} -func (*QueryCurrentRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_e870698c9d8ccc09, []int{2} -} -func (m *QueryCurrentRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *QueryCurrentRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_QueryCurrentRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *QueryCurrentRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_QueryCurrentRequest.Merge(m, src) -} -func (m *QueryCurrentRequest) XXX_Size() int { - return m.Size() -} -func (m *QueryCurrentRequest) XXX_DiscardUnknown() { - xxx_messageInfo_QueryCurrentRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_QueryCurrentRequest proto.InternalMessageInfo - -func (m *QueryCurrentRequest) GetConsumer() string { - if m != nil { - return m.Consumer - } - return "" -} - -type QueryCurrentResponse struct { - Sub *Subscription `protobuf:"bytes,1,opt,name=sub,proto3" json:"sub,omitempty"` -} - -func (m *QueryCurrentResponse) Reset() { *m = QueryCurrentResponse{} } -func (m *QueryCurrentResponse) String() string { return proto.CompactTextString(m) } -func (*QueryCurrentResponse) ProtoMessage() {} -func (*QueryCurrentResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_e870698c9d8ccc09, []int{3} -} -func (m *QueryCurrentResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *QueryCurrentResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_QueryCurrentResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *QueryCurrentResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_QueryCurrentResponse.Merge(m, src) -} -func (m *QueryCurrentResponse) XXX_Size() int { - return m.Size() -} -func (m *QueryCurrentResponse) XXX_DiscardUnknown() { - xxx_messageInfo_QueryCurrentResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_QueryCurrentResponse proto.InternalMessageInfo - -func (m *QueryCurrentResponse) GetSub() *Subscription { - if m != nil { - return m.Sub - } - return nil -} - -type QueryListProjectsRequest struct { - Subscription string `protobuf:"bytes,1,opt,name=subscription,proto3" json:"subscription,omitempty"` -} - -func (m *QueryListProjectsRequest) Reset() { *m = QueryListProjectsRequest{} } -func (m *QueryListProjectsRequest) String() string { return proto.CompactTextString(m) } -func (*QueryListProjectsRequest) ProtoMessage() {} -func (*QueryListProjectsRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_e870698c9d8ccc09, []int{4} -} -func (m *QueryListProjectsRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *QueryListProjectsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_QueryListProjectsRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *QueryListProjectsRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_QueryListProjectsRequest.Merge(m, src) -} -func (m *QueryListProjectsRequest) XXX_Size() int { - return m.Size() -} -func (m *QueryListProjectsRequest) XXX_DiscardUnknown() { - xxx_messageInfo_QueryListProjectsRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_QueryListProjectsRequest proto.InternalMessageInfo - -func (m *QueryListProjectsRequest) GetSubscription() string { - if m != nil { - return m.Subscription - } - return "" -} - -type QueryListProjectsResponse struct { - Projects []string `protobuf:"bytes,1,rep,name=projects,proto3" json:"projects,omitempty"` -} - -func (m *QueryListProjectsResponse) Reset() { *m = QueryListProjectsResponse{} } -func (m *QueryListProjectsResponse) String() string { return proto.CompactTextString(m) } -func (*QueryListProjectsResponse) ProtoMessage() {} -func (*QueryListProjectsResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_e870698c9d8ccc09, []int{5} -} -func (m *QueryListProjectsResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *QueryListProjectsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_QueryListProjectsResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *QueryListProjectsResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_QueryListProjectsResponse.Merge(m, src) -} -func (m *QueryListProjectsResponse) XXX_Size() int { - return m.Size() -} -func (m *QueryListProjectsResponse) XXX_DiscardUnknown() { - xxx_messageInfo_QueryListProjectsResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_QueryListProjectsResponse proto.InternalMessageInfo - -func (m *QueryListProjectsResponse) GetProjects() []string { - if m != nil { - return m.Projects - } - return nil -} - -type QueryListRequest struct { -} - -func (m *QueryListRequest) Reset() { *m = QueryListRequest{} } -func (m *QueryListRequest) String() string { return proto.CompactTextString(m) } -func (*QueryListRequest) ProtoMessage() {} -func (*QueryListRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_e870698c9d8ccc09, []int{6} -} -func (m *QueryListRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *QueryListRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_QueryListRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *QueryListRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_QueryListRequest.Merge(m, src) -} -func (m *QueryListRequest) XXX_Size() int { - return m.Size() -} -func (m *QueryListRequest) XXX_DiscardUnknown() { - xxx_messageInfo_QueryListRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_QueryListRequest proto.InternalMessageInfo - -type QueryListResponse struct { - SubsInfo []ListInfoStruct `protobuf:"bytes,1,rep,name=subs_info,json=subsInfo,proto3" json:"subs_info"` -} - -func (m *QueryListResponse) Reset() { *m = QueryListResponse{} } -func (m *QueryListResponse) String() string { return proto.CompactTextString(m) } -func (*QueryListResponse) ProtoMessage() {} -func (*QueryListResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_e870698c9d8ccc09, []int{7} -} -func (m *QueryListResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *QueryListResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_QueryListResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *QueryListResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_QueryListResponse.Merge(m, src) -} -func (m *QueryListResponse) XXX_Size() int { - return m.Size() -} -func (m *QueryListResponse) XXX_DiscardUnknown() { - xxx_messageInfo_QueryListResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_QueryListResponse proto.InternalMessageInfo - -func (m *QueryListResponse) GetSubsInfo() []ListInfoStruct { - if m != nil { - return m.SubsInfo - } - return nil -} - -type ListInfoStruct struct { - Consumer string `protobuf:"bytes,1,opt,name=consumer,proto3" json:"consumer,omitempty"` - Plan string `protobuf:"bytes,2,opt,name=plan,proto3" json:"plan,omitempty"` - DurationBought uint64 `protobuf:"varint,3,opt,name=duration_bought,json=durationBought,proto3" json:"duration_bought,omitempty"` - DurationLeft uint64 `protobuf:"varint,4,opt,name=duration_left,json=durationLeft,proto3" json:"duration_left,omitempty"` - MonthExpiry uint64 `protobuf:"varint,5,opt,name=month_expiry,json=monthExpiry,proto3" json:"month_expiry,omitempty"` - MonthCuTotal uint64 `protobuf:"varint,6,opt,name=month_cu_total,json=monthCuTotal,proto3" json:"month_cu_total,omitempty"` - MonthCuLeft uint64 `protobuf:"varint,7,opt,name=month_cu_left,json=monthCuLeft,proto3" json:"month_cu_left,omitempty"` - Cluster string `protobuf:"bytes,8,opt,name=cluster,proto3" json:"cluster,omitempty"` - DurationTotal uint64 `protobuf:"varint,9,opt,name=duration_total,json=durationTotal,proto3" json:"duration_total,omitempty"` -} - -func (m *ListInfoStruct) Reset() { *m = ListInfoStruct{} } -func (m *ListInfoStruct) String() string { return proto.CompactTextString(m) } -func (*ListInfoStruct) ProtoMessage() {} -func (*ListInfoStruct) Descriptor() ([]byte, []int) { - return fileDescriptor_e870698c9d8ccc09, []int{8} -} -func (m *ListInfoStruct) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ListInfoStruct) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ListInfoStruct.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *ListInfoStruct) XXX_Merge(src proto.Message) { - xxx_messageInfo_ListInfoStruct.Merge(m, src) -} -func (m *ListInfoStruct) XXX_Size() int { - return m.Size() -} -func (m *ListInfoStruct) XXX_DiscardUnknown() { - xxx_messageInfo_ListInfoStruct.DiscardUnknown(m) -} - -var xxx_messageInfo_ListInfoStruct proto.InternalMessageInfo - -func (m *ListInfoStruct) GetConsumer() string { - if m != nil { - return m.Consumer - } - return "" -} - -func (m *ListInfoStruct) GetPlan() string { - if m != nil { - return m.Plan - } - return "" -} - -func (m *ListInfoStruct) GetDurationBought() uint64 { - if m != nil { - return m.DurationBought - } - return 0 -} - -func (m *ListInfoStruct) GetDurationLeft() uint64 { - if m != nil { - return m.DurationLeft - } - return 0 -} - -func (m *ListInfoStruct) GetMonthExpiry() uint64 { - if m != nil { - return m.MonthExpiry - } - return 0 -} - -func (m *ListInfoStruct) GetMonthCuTotal() uint64 { - if m != nil { - return m.MonthCuTotal - } - return 0 -} - -func (m *ListInfoStruct) GetMonthCuLeft() uint64 { - if m != nil { - return m.MonthCuLeft - } - return 0 -} - -func (m *ListInfoStruct) GetCluster() string { - if m != nil { - return m.Cluster - } - return "" -} - -func (m *ListInfoStruct) GetDurationTotal() uint64 { - if m != nil { - return m.DurationTotal - } - return 0 -} - -func init() { - proto.RegisterType((*QueryParamsRequest)(nil), "lavanet.lava.subscription.QueryParamsRequestV5") - proto.RegisterType((*QueryParamsResponse)(nil), "lavanet.lava.subscription.QueryParamsResponseV5") - proto.RegisterType((*QueryCurrentRequest)(nil), "lavanet.lava.subscription.QueryCurrentRequestV5") - proto.RegisterType((*QueryCurrentResponse)(nil), "lavanet.lava.subscription.QueryCurrentResponseV5") - proto.RegisterType((*QueryListProjectsRequest)(nil), "lavanet.lava.subscription.QueryListProjectsRequestV5") - proto.RegisterType((*QueryListProjectsResponse)(nil), "lavanet.lava.subscription.QueryListProjectsResponseV5") - proto.RegisterType((*QueryListRequest)(nil), "lavanet.lava.subscription.QueryListRequestV5") - proto.RegisterType((*QueryListResponse)(nil), "lavanet.lava.subscription.QueryListResponseV5") - proto.RegisterType((*ListInfoStruct)(nil), "lavanet.lava.subscription.ListInfoStructV5") -} - -func init() { - proto.RegisterFile("lavanet/lava/subscription/query.proto", fileDescriptor_e870698c9d8ccc09) -} - -var fileDescriptor_e870698c9d8ccc09 = []byte{ - // 705 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x55, 0xcf, 0x4e, 0xd4, 0x40, - 0x18, 0xdf, 0xc2, 0xb2, 0x2c, 0x1f, 0x0b, 0xea, 0xc8, 0xa1, 0x6c, 0xcc, 0xc2, 0x16, 0x11, 0x50, - 0xe8, 0x04, 0xd0, 0x10, 0x2f, 0x92, 0x40, 0x34, 0x31, 0xe1, 0x80, 0x8b, 0xd1, 0xc4, 0xcb, 0xa6, - 0xad, 0xb3, 0xa5, 0xa6, 0xdb, 0x29, 0x9d, 0x19, 0x02, 0x21, 0x5c, 0x3c, 0x72, 0x32, 0xfa, 0x04, - 0xbe, 0x86, 0xf1, 0x01, 0x38, 0x92, 0x78, 0xf1, 0x64, 0x0c, 0xf8, 0x20, 0xa6, 0xd3, 0x69, 0xd3, - 0x46, 0xd8, 0x5d, 0x4f, 0xdb, 0xf9, 0xf5, 0xf7, 0x6f, 0xa6, 0xf3, 0x65, 0x61, 0xde, 0xb7, 0x0e, - 0xad, 0x80, 0x70, 0x1c, 0xff, 0x62, 0x26, 0x6c, 0xe6, 0x44, 0x5e, 0xc8, 0x3d, 0x1a, 0xe0, 0x03, - 0x41, 0xa2, 0x63, 0x33, 0x8c, 0x28, 0xa7, 0x68, 0x5a, 0xd1, 0xcc, 0xf8, 0xd7, 0xcc, 0xd3, 0xea, - 0x53, 0x2e, 0x75, 0xa9, 0x64, 0xe1, 0xf8, 0x29, 0x11, 0xd4, 0xef, 0xb9, 0x94, 0xba, 0x3e, 0xc1, - 0x56, 0xe8, 0x61, 0x2b, 0x08, 0x28, 0xb7, 0x62, 0x32, 0x53, 0x6f, 0x1f, 0x3a, 0x94, 0x75, 0x29, - 0xc3, 0xb6, 0xc5, 0x48, 0x92, 0x83, 0x0f, 0x57, 0x6d, 0xc2, 0xad, 0x55, 0x1c, 0x5a, 0xae, 0x17, - 0x48, 0xb2, 0xe2, 0x3e, 0xb8, 0xb9, 0x61, 0x68, 0x45, 0x56, 0x37, 0xf5, 0x5c, 0xbe, 0x99, 0x97, - 0x5f, 0x24, 0x6c, 0x63, 0x0a, 0xd0, 0xab, 0x38, 0x77, 0x57, 0x5a, 0xb4, 0xc8, 0x81, 0x20, 0x8c, - 0x1b, 0x6f, 0xe0, 0x6e, 0x01, 0x65, 0x21, 0x0d, 0x18, 0x41, 0x9b, 0x50, 0x49, 0xa2, 0x74, 0x6d, - 0x56, 0x5b, 0x1c, 0x5f, 0x6b, 0x9a, 0x37, 0x1e, 0x87, 0x99, 0x48, 0xb7, 0xca, 0xe7, 0xbf, 0x66, - 0x4a, 0x2d, 0x25, 0x33, 0x56, 0x95, 0xef, 0xb6, 0x88, 0x22, 0x12, 0x70, 0x15, 0x87, 0xea, 0x50, - 0x75, 0x68, 0xc0, 0x44, 0x97, 0x44, 0xd2, 0x79, 0xac, 0x95, 0xad, 0x8d, 0xb7, 0x30, 0x55, 0x94, - 0x64, 0x5d, 0x86, 0x99, 0xb0, 0x55, 0x91, 0x85, 0x1e, 0x45, 0xf6, 0x72, 0x0b, 0x59, 0x47, 0x6b, - 0xc5, 0x4a, 0xe3, 0x19, 0xe8, 0xd2, 0x78, 0xc7, 0x63, 0x7c, 0x37, 0xa2, 0x1f, 0x88, 0xc3, 0xd3, - 0xfd, 0x23, 0x03, 0x6a, 0x79, 0x0f, 0x55, 0xaa, 0x80, 0x19, 0x1b, 0x30, 0x7d, 0x8d, 0x5e, 0xb5, - 0xab, 0x43, 0x35, 0x54, 0x98, 0xae, 0xcd, 0x0e, 0xc7, 0x3b, 0x4a, 0xd7, 0x06, 0x82, 0xdb, 0x99, - 0x30, 0x3d, 0x70, 0x0b, 0xee, 0xe4, 0x30, 0x65, 0xb2, 0x03, 0x63, 0x71, 0x62, 0xdb, 0x0b, 0x3a, - 0x54, 0xba, 0x8c, 0xaf, 0x2d, 0xf5, 0xd8, 0x68, 0xac, 0x7d, 0x19, 0x74, 0xe8, 0x1e, 0x8f, 0x84, - 0xc3, 0xd5, 0xc9, 0x57, 0x63, 0x4a, 0x8c, 0x1a, 0xdf, 0x87, 0x60, 0xb2, 0x48, 0xe9, 0x75, 0xee, - 0x08, 0x41, 0x39, 0xf4, 0xad, 0x40, 0x1f, 0x92, 0xb8, 0x7c, 0x46, 0x0b, 0x70, 0xeb, 0xbd, 0x88, - 0xe4, 0xa5, 0x6c, 0xdb, 0x54, 0xb8, 0xfb, 0x5c, 0x1f, 0x9e, 0xd5, 0x16, 0xcb, 0xad, 0xc9, 0x14, - 0xde, 0x92, 0x28, 0x9a, 0x83, 0x89, 0x8c, 0xe8, 0x93, 0x0e, 0xd7, 0xcb, 0x92, 0x56, 0x4b, 0xc1, - 0x1d, 0xd2, 0xe1, 0xa8, 0x09, 0xb5, 0x2e, 0x0d, 0xf8, 0x7e, 0x9b, 0x1c, 0x85, 0x5e, 0x74, 0xac, - 0x8f, 0x48, 0xce, 0xb8, 0xc4, 0x9e, 0x4b, 0x08, 0xdd, 0x87, 0xc9, 0x84, 0xe2, 0x88, 0x36, 0xa7, - 0xdc, 0xf2, 0xf5, 0x4a, 0x62, 0x24, 0xd1, 0x6d, 0xf1, 0x3a, 0xc6, 0x90, 0x01, 0x13, 0x19, 0x4b, - 0xa6, 0x8d, 0xe6, 0x9c, 0xb6, 0x85, 0x0c, 0xd3, 0x61, 0xd4, 0xf1, 0x05, 0xe3, 0x24, 0xd2, 0xab, - 0x72, 0x47, 0xe9, 0x12, 0xcd, 0x43, 0xd6, 0x5e, 0x65, 0x8c, 0x49, 0x79, 0xb6, 0x03, 0x19, 0xb2, - 0x76, 0x36, 0x02, 0x23, 0xf2, 0x13, 0xa1, 0xcf, 0x1a, 0x54, 0x92, 0xdb, 0x8d, 0x56, 0x7a, 0x7c, - 0x8e, 0x7f, 0xc7, 0xaa, 0x6e, 0x0e, 0x4a, 0x4f, 0x2e, 0x80, 0xb1, 0xf4, 0xf1, 0xc7, 0x9f, 0x2f, - 0x43, 0x73, 0xa8, 0x89, 0xfb, 0xcd, 0x3e, 0xfa, 0xaa, 0xc1, 0xa8, 0x1a, 0x11, 0xd4, 0x37, 0xa6, - 0x38, 0x7e, 0x75, 0x3c, 0x30, 0x5f, 0xf5, 0x7a, 0x22, 0x7b, 0x61, 0xb4, 0xd2, 0xa3, 0x97, 0x93, - 0x68, 0xf0, 0x49, 0x7a, 0xa3, 0x4e, 0xd1, 0x37, 0x0d, 0x6a, 0xf9, 0x69, 0x41, 0xeb, 0xfd, 0x82, - 0xaf, 0x99, 0xcd, 0xfa, 0xe3, 0xff, 0x13, 0xa9, 0xca, 0x9b, 0xb2, 0xf2, 0x53, 0xb4, 0xd1, 0xa3, - 0xb2, 0xef, 0x31, 0xde, 0x4e, 0xc7, 0x14, 0x9f, 0xe4, 0xdf, 0x9d, 0xa2, 0x33, 0x0d, 0xca, 0xb1, - 0x33, 0x7a, 0x34, 0x48, 0x7e, 0x5a, 0x76, 0x79, 0x30, 0xb2, 0x2a, 0xb9, 0x20, 0x4b, 0x36, 0xd1, - 0x4c, 0x9f, 0x92, 0x5b, 0x2f, 0xce, 0x2f, 0x1b, 0xda, 0xc5, 0x65, 0x43, 0xfb, 0x7d, 0xd9, 0xd0, - 0x3e, 0x5d, 0x35, 0x4a, 0x17, 0x57, 0x8d, 0xd2, 0xcf, 0xab, 0x46, 0xe9, 0xdd, 0xb2, 0xeb, 0xf1, - 0x7d, 0x61, 0x9b, 0x0e, 0xed, 0x16, 0x4d, 0x8e, 0x8a, 0x36, 0xfc, 0x38, 0x24, 0xcc, 0xae, 0xc8, - 0x3f, 0x81, 0xf5, 0xbf, 0x01, 0x00, 0x00, 0xff, 0xff, 0x59, 0xba, 0xfa, 0x52, 0xfe, 0x06, 0x00, - 0x00, -} - -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConn - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion4 - -// QueryClient is the client API for Query service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. -type QueryClient interface { - // Parameters queries the parameters of the module. - Params(ctx context.Context, in *QueryParamsRequest, opts ...grpc.CallOption) (*QueryParamsResponse, error) - // Queries a list of Current items. - Current(ctx context.Context, in *QueryCurrentRequest, opts ...grpc.CallOption) (*QueryCurrentResponse, error) - // Queries a list of ListProjects items. - ListProjects(ctx context.Context, in *QueryListProjectsRequest, opts ...grpc.CallOption) (*QueryListProjectsResponse, error) - // Queries a list of List items. - List(ctx context.Context, in *QueryListRequest, opts ...grpc.CallOption) (*QueryListResponse, error) -} - -type queryClient struct { - cc grpc1.ClientConn -} - -func NewQueryClient(cc grpc1.ClientConn) QueryClient { - return &queryClient{cc} -} - -func (c *queryClient) Params(ctx context.Context, in *QueryParamsRequest, opts ...grpc.CallOption) (*QueryParamsResponse, error) { - out := new(QueryParamsResponse) - err := c.cc.Invoke(ctx, "/lavanet.lava.subscription.Query/Params", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *queryClient) Current(ctx context.Context, in *QueryCurrentRequest, opts ...grpc.CallOption) (*QueryCurrentResponse, error) { - out := new(QueryCurrentResponse) - err := c.cc.Invoke(ctx, "/lavanet.lava.subscription.Query/Current", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *queryClient) ListProjects(ctx context.Context, in *QueryListProjectsRequest, opts ...grpc.CallOption) (*QueryListProjectsResponse, error) { - out := new(QueryListProjectsResponse) - err := c.cc.Invoke(ctx, "/lavanet.lava.subscription.Query/ListProjects", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *queryClient) List(ctx context.Context, in *QueryListRequest, opts ...grpc.CallOption) (*QueryListResponse, error) { - out := new(QueryListResponse) - err := c.cc.Invoke(ctx, "/lavanet.lava.subscription.Query/List", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -// QueryServer is the server API for Query service. -type QueryServer interface { - // Parameters queries the parameters of the module. - Params(context.Context, *QueryParamsRequest) (*QueryParamsResponse, error) - // Queries a list of Current items. - Current(context.Context, *QueryCurrentRequest) (*QueryCurrentResponse, error) - // Queries a list of ListProjects items. - ListProjects(context.Context, *QueryListProjectsRequest) (*QueryListProjectsResponse, error) - // Queries a list of List items. - List(context.Context, *QueryListRequest) (*QueryListResponse, error) -} - -// UnimplementedQueryServer can be embedded to have forward compatible implementations. -type UnimplementedQueryServer struct { -} - -func (*UnimplementedQueryServer) Params(ctx context.Context, req *QueryParamsRequest) (*QueryParamsResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Params not implemented") -} -func (*UnimplementedQueryServer) Current(ctx context.Context, req *QueryCurrentRequest) (*QueryCurrentResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Current not implemented") -} -func (*UnimplementedQueryServer) ListProjects(ctx context.Context, req *QueryListProjectsRequest) (*QueryListProjectsResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method ListProjects not implemented") -} -func (*UnimplementedQueryServer) List(ctx context.Context, req *QueryListRequest) (*QueryListResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method List not implemented") -} - -func RegisterQueryServer(s grpc1.Server, srv QueryServer) { - s.RegisterService(&_Query_serviceDesc, srv) -} - -func _Query_Params_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(QueryParamsRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(QueryServer).Params(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/lavanet.lava.subscription.Query/Params", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(QueryServer).Params(ctx, req.(*QueryParamsRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Query_Current_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(QueryCurrentRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(QueryServer).Current(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/lavanet.lava.subscription.Query/Current", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(QueryServer).Current(ctx, req.(*QueryCurrentRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Query_ListProjects_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(QueryListProjectsRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(QueryServer).ListProjects(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/lavanet.lava.subscription.Query/ListProjects", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(QueryServer).ListProjects(ctx, req.(*QueryListProjectsRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Query_List_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(QueryListRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(QueryServer).List(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/lavanet.lava.subscription.Query/List", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(QueryServer).List(ctx, req.(*QueryListRequest)) - } - return interceptor(ctx, in, info, handler) -} - -var _Query_serviceDesc = grpc.ServiceDesc{ - ServiceName: "lavanet.lava.subscription.Query", - HandlerType: (*QueryServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "Params", - Handler: _Query_Params_Handler, - }, - { - MethodName: "Current", - Handler: _Query_Current_Handler, - }, - { - MethodName: "ListProjects", - Handler: _Query_ListProjects_Handler, - }, - { - MethodName: "List", - Handler: _Query_List_Handler, - }, - }, - Streams: []grpc.StreamDesc{}, - Metadata: "lavanet/lava/subscription/query.proto", -} - -func (m *QueryParamsRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *QueryParamsRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *QueryParamsRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - return len(dAtA) - i, nil -} - -func (m *QueryParamsResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *QueryParamsResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *QueryParamsResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - { - size, err := m.Params.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintQuery(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *QueryCurrentRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *QueryCurrentRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *QueryCurrentRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Consumer) > 0 { - i -= len(m.Consumer) - copy(dAtA[i:], m.Consumer) - i = encodeVarintQuery(dAtA, i, uint64(len(m.Consumer))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *QueryCurrentResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *QueryCurrentResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *QueryCurrentResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Sub != nil { - { - size, err := m.Sub.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintQuery(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *QueryListProjectsRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *QueryListProjectsRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *QueryListProjectsRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Subscription) > 0 { - i -= len(m.Subscription) - copy(dAtA[i:], m.Subscription) - i = encodeVarintQuery(dAtA, i, uint64(len(m.Subscription))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *QueryListProjectsResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *QueryListProjectsResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *QueryListProjectsResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Projects) > 0 { - for iNdEx := len(m.Projects) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Projects[iNdEx]) - copy(dAtA[i:], m.Projects[iNdEx]) - i = encodeVarintQuery(dAtA, i, uint64(len(m.Projects[iNdEx]))) - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *QueryListRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *QueryListRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *QueryListRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - return len(dAtA) - i, nil -} - -func (m *QueryListResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *QueryListResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *QueryListResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.SubsInfo) > 0 { - for iNdEx := len(m.SubsInfo) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.SubsInfo[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintQuery(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *ListInfoStruct) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ListInfoStruct) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ListInfoStruct) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.DurationTotal != 0 { - i = encodeVarintQuery(dAtA, i, uint64(m.DurationTotal)) - i-- - dAtA[i] = 0x48 - } - if len(m.Cluster) > 0 { - i -= len(m.Cluster) - copy(dAtA[i:], m.Cluster) - i = encodeVarintQuery(dAtA, i, uint64(len(m.Cluster))) - i-- - dAtA[i] = 0x42 - } - if m.MonthCuLeft != 0 { - i = encodeVarintQuery(dAtA, i, uint64(m.MonthCuLeft)) - i-- - dAtA[i] = 0x38 - } - if m.MonthCuTotal != 0 { - i = encodeVarintQuery(dAtA, i, uint64(m.MonthCuTotal)) - i-- - dAtA[i] = 0x30 - } - if m.MonthExpiry != 0 { - i = encodeVarintQuery(dAtA, i, uint64(m.MonthExpiry)) - i-- - dAtA[i] = 0x28 - } - if m.DurationLeft != 0 { - i = encodeVarintQuery(dAtA, i, uint64(m.DurationLeft)) - i-- - dAtA[i] = 0x20 - } - if m.DurationBought != 0 { - i = encodeVarintQuery(dAtA, i, uint64(m.DurationBought)) - i-- - dAtA[i] = 0x18 - } - if len(m.Plan) > 0 { - i -= len(m.Plan) - copy(dAtA[i:], m.Plan) - i = encodeVarintQuery(dAtA, i, uint64(len(m.Plan))) - i-- - dAtA[i] = 0x12 - } - if len(m.Consumer) > 0 { - i -= len(m.Consumer) - copy(dAtA[i:], m.Consumer) - i = encodeVarintQuery(dAtA, i, uint64(len(m.Consumer))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func encodeVarintQuery(dAtA []byte, offset int, v uint64) int { - offset -= sovQuery(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *QueryParamsRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - return n -} - -func (m *QueryParamsResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.Params.Size() - n += 1 + l + sovQuery(uint64(l)) - return n -} - -func (m *QueryCurrentRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Consumer) - if l > 0 { - n += 1 + l + sovQuery(uint64(l)) - } - return n -} - -func (m *QueryCurrentResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Sub != nil { - l = m.Sub.Size() - n += 1 + l + sovQuery(uint64(l)) - } - return n -} - -func (m *QueryListProjectsRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Subscription) - if l > 0 { - n += 1 + l + sovQuery(uint64(l)) - } - return n -} - -func (m *QueryListProjectsResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Projects) > 0 { - for _, s := range m.Projects { - l = len(s) - n += 1 + l + sovQuery(uint64(l)) - } - } - return n -} - -func (m *QueryListRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - return n -} - -func (m *QueryListResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.SubsInfo) > 0 { - for _, e := range m.SubsInfo { - l = e.Size() - n += 1 + l + sovQuery(uint64(l)) - } - } - return n -} - -func (m *ListInfoStruct) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Consumer) - if l > 0 { - n += 1 + l + sovQuery(uint64(l)) - } - l = len(m.Plan) - if l > 0 { - n += 1 + l + sovQuery(uint64(l)) - } - if m.DurationBought != 0 { - n += 1 + sovQuery(uint64(m.DurationBought)) - } - if m.DurationLeft != 0 { - n += 1 + sovQuery(uint64(m.DurationLeft)) - } - if m.MonthExpiry != 0 { - n += 1 + sovQuery(uint64(m.MonthExpiry)) - } - if m.MonthCuTotal != 0 { - n += 1 + sovQuery(uint64(m.MonthCuTotal)) - } - if m.MonthCuLeft != 0 { - n += 1 + sovQuery(uint64(m.MonthCuLeft)) - } - l = len(m.Cluster) - if l > 0 { - n += 1 + l + sovQuery(uint64(l)) - } - if m.DurationTotal != 0 { - n += 1 + sovQuery(uint64(m.DurationTotal)) - } - return n -} - -func sovQuery(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozQuery(x uint64) (n int) { - return sovQuery(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *QueryParamsRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: QueryParamsRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: QueryParamsRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skipQuery(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthQuery - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *QueryParamsResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: QueryParamsResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: QueryParamsResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Params", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Params.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipQuery(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthQuery - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *QueryCurrentRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: QueryCurrentRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: QueryCurrentRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Consumer", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Consumer = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipQuery(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthQuery - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *QueryCurrentResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: QueryCurrentResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: QueryCurrentResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Sub", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Sub == nil { - m.Sub = &Subscription{} - } - if err := m.Sub.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipQuery(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthQuery - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *QueryListProjectsRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: QueryListProjectsRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: QueryListProjectsRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Subscription", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Subscription = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipQuery(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthQuery - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *QueryListProjectsResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: QueryListProjectsResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: QueryListProjectsResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Projects", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Projects = append(m.Projects, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipQuery(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthQuery - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *QueryListRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: QueryListRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: QueryListRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skipQuery(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthQuery - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *QueryListResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: QueryListResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: QueryListResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SubsInfo", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.SubsInfo = append(m.SubsInfo, ListInfoStruct{}) - if err := m.SubsInfo[len(m.SubsInfo)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipQuery(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthQuery - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ListInfoStruct) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ListInfoStruct: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ListInfoStruct: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Consumer", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Consumer = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Plan", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Plan = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field DurationBought", wireType) - } - m.DurationBought = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.DurationBought |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field DurationLeft", wireType) - } - m.DurationLeft = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.DurationLeft |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field MonthExpiry", wireType) - } - m.MonthExpiry = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.MonthExpiry |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 6: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field MonthCuTotal", wireType) - } - m.MonthCuTotal = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.MonthCuTotal |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 7: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field MonthCuLeft", wireType) - } - m.MonthCuLeft = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.MonthCuLeft |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 8: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Cluster", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Cluster = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 9: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field DurationTotal", wireType) - } - m.DurationTotal = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.DurationTotal |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipQuery(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthQuery - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipQuery(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowQuery - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowQuery - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowQuery - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthQuery - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupQuery - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthQuery - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthQuery = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowQuery = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupQuery = fmt.Errorf("proto: unexpected end of group") -) diff --git a/x/subscription/migrations/v5/subscription.pb.go b/x/subscription/migrations/v5/subscription.pb.go deleted file mode 100644 index aa00293c60..0000000000 --- a/x/subscription/migrations/v5/subscription.pb.go +++ /dev/null @@ -1,763 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: lavanet/lava/subscription/subscription.proto - -package v5 - -import ( - fmt "fmt" - proto "github.com/cosmos/gogoproto/proto" - io "io" - math "math" - math_bits "math/bits" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -type Subscription struct { - Creator string `protobuf:"bytes,1,opt,name=creator,proto3" json:"creator,omitempty"` - Consumer string `protobuf:"bytes,2,opt,name=consumer,proto3" json:"consumer,omitempty"` - Block uint64 `protobuf:"varint,3,opt,name=block,proto3" json:"block,omitempty"` - PlanIndex string `protobuf:"bytes,4,opt,name=plan_index,json=planIndex,proto3" json:"plan_index,omitempty"` - PlanBlock uint64 `protobuf:"varint,5,opt,name=plan_block,json=planBlock,proto3" json:"plan_block,omitempty"` - DurationBought uint64 `protobuf:"varint,6,opt,name=duration_bought,json=durationBought,proto3" json:"duration_bought,omitempty"` - DurationLeft uint64 `protobuf:"varint,7,opt,name=duration_left,json=durationLeft,proto3" json:"duration_left,omitempty"` - MonthExpiryTime uint64 `protobuf:"varint,8,opt,name=month_expiry_time,json=monthExpiryTime,proto3" json:"month_expiry_time,omitempty"` - MonthCuTotal uint64 `protobuf:"varint,10,opt,name=month_cu_total,json=monthCuTotal,proto3" json:"month_cu_total,omitempty"` - MonthCuLeft uint64 `protobuf:"varint,11,opt,name=month_cu_left,json=monthCuLeft,proto3" json:"month_cu_left,omitempty"` - Cluster string `protobuf:"bytes,13,opt,name=cluster,proto3" json:"cluster,omitempty"` - DurationTotal uint64 `protobuf:"varint,14,opt,name=duration_total,json=durationTotal,proto3" json:"duration_total,omitempty"` -} - -func (m *Subscription) Reset() { *m = Subscription{} } -func (m *Subscription) String() string { return proto.CompactTextString(m) } -func (*Subscription) ProtoMessage() {} -func (*Subscription) Descriptor() ([]byte, []int) { - return fileDescriptor_c3bc5507ca237d79, []int{0} -} -func (m *Subscription) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Subscription) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Subscription.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Subscription) XXX_Merge(src proto.Message) { - xxx_messageInfo_Subscription.Merge(m, src) -} -func (m *Subscription) XXX_Size() int { - return m.Size() -} -func (m *Subscription) XXX_DiscardUnknown() { - xxx_messageInfo_Subscription.DiscardUnknown(m) -} - -var xxx_messageInfo_Subscription proto.InternalMessageInfo - -func (m *Subscription) GetCreator() string { - if m != nil { - return m.Creator - } - return "" -} - -func (m *Subscription) GetConsumer() string { - if m != nil { - return m.Consumer - } - return "" -} - -func (m *Subscription) GetBlock() uint64 { - if m != nil { - return m.Block - } - return 0 -} - -func (m *Subscription) GetPlanIndex() string { - if m != nil { - return m.PlanIndex - } - return "" -} - -func (m *Subscription) GetPlanBlock() uint64 { - if m != nil { - return m.PlanBlock - } - return 0 -} - -func (m *Subscription) GetDurationBought() uint64 { - if m != nil { - return m.DurationBought - } - return 0 -} - -func (m *Subscription) GetDurationLeft() uint64 { - if m != nil { - return m.DurationLeft - } - return 0 -} - -func (m *Subscription) GetMonthExpiryTime() uint64 { - if m != nil { - return m.MonthExpiryTime - } - return 0 -} - -func (m *Subscription) GetMonthCuTotal() uint64 { - if m != nil { - return m.MonthCuTotal - } - return 0 -} - -func (m *Subscription) GetMonthCuLeft() uint64 { - if m != nil { - return m.MonthCuLeft - } - return 0 -} - -func (m *Subscription) GetCluster() string { - if m != nil { - return m.Cluster - } - return "" -} - -func (m *Subscription) GetDurationTotal() uint64 { - if m != nil { - return m.DurationTotal - } - return 0 -} - -func init() { - proto.RegisterType((*Subscription)(nil), "lavanet.lava.subscription.SubscriptionV5") -} - -func init() { - proto.RegisterFile("lavanet/lava/subscription/subscription.proto", fileDescriptor_c3bc5507ca237d79) -} - -var fileDescriptor_c3bc5507ca237d79 = []byte{ - // 370 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x54, 0x92, 0xc1, 0x6e, 0xaa, 0x40, - 0x14, 0x86, 0xe5, 0x8a, 0x8a, 0x47, 0x51, 0xef, 0xe4, 0x2e, 0xe6, 0xde, 0xe4, 0x12, 0x63, 0xdb, - 0xd4, 0x34, 0x06, 0x17, 0x7d, 0x03, 0x9b, 0x36, 0xa9, 0xe9, 0xca, 0xba, 0xea, 0x86, 0x00, 0x8e, - 0x4a, 0x0a, 0x0c, 0x19, 0x66, 0x1a, 0x7d, 0x8b, 0xbe, 0x46, 0xdf, 0xa4, 0x4b, 0x97, 0x5d, 0x36, - 0xfa, 0x22, 0x0d, 0x07, 0x24, 0xba, 0x9a, 0x9c, 0xef, 0x7c, 0x3f, 0x87, 0x99, 0x1c, 0x18, 0x85, - 0xee, 0x9b, 0x1b, 0x33, 0x39, 0xce, 0xce, 0x71, 0xaa, 0xbc, 0xd4, 0x17, 0x41, 0x22, 0x03, 0x1e, - 0x9f, 0x15, 0x76, 0x22, 0xb8, 0xe4, 0xe4, 0x6f, 0x61, 0xdb, 0xd9, 0x69, 0x9f, 0x0a, 0x83, 0x8f, - 0x2a, 0xb4, 0x9f, 0x4f, 0x00, 0xa1, 0xd0, 0xf0, 0x05, 0x73, 0x25, 0x17, 0x54, 0xeb, 0x6b, 0xc3, - 0xe6, 0xec, 0x58, 0x92, 0x7f, 0x60, 0xf8, 0x3c, 0x4e, 0x55, 0xc4, 0x04, 0xfd, 0x85, 0xad, 0xb2, - 0x26, 0x7f, 0xa0, 0xe6, 0x85, 0xdc, 0x7f, 0xa5, 0xd5, 0xbe, 0x36, 0xd4, 0x67, 0x79, 0x41, 0xfe, - 0x03, 0x24, 0xa1, 0x1b, 0x3b, 0x41, 0xbc, 0x60, 0x1b, 0xaa, 0x63, 0xa6, 0x99, 0x91, 0xc7, 0x0c, - 0x94, 0xed, 0x3c, 0x59, 0xc3, 0x24, 0xb6, 0x27, 0x98, 0xbe, 0x86, 0xee, 0x42, 0x09, 0x37, 0xfb, - 0x2b, 0xc7, 0xe3, 0x6a, 0xb5, 0x96, 0xb4, 0x8e, 0x4e, 0xe7, 0x88, 0x27, 0x48, 0xc9, 0x05, 0x98, - 0xa5, 0x18, 0xb2, 0xa5, 0xa4, 0x0d, 0xd4, 0xda, 0x47, 0xf8, 0xc4, 0x96, 0x92, 0xdc, 0xc0, 0xef, - 0x88, 0xc7, 0x72, 0xed, 0xb0, 0x4d, 0x12, 0x88, 0xad, 0x23, 0x83, 0x88, 0x51, 0x03, 0xc5, 0x2e, - 0x36, 0xee, 0x91, 0xcf, 0x83, 0x88, 0x91, 0x4b, 0xe8, 0xe4, 0xae, 0xaf, 0x1c, 0xc9, 0xa5, 0x1b, - 0x52, 0xc8, 0xbf, 0x88, 0xf4, 0x4e, 0xcd, 0x33, 0x46, 0x06, 0x60, 0x96, 0x16, 0x8e, 0x6d, 0xa1, - 0xd4, 0x2a, 0x24, 0x9c, 0x9a, 0xbd, 0x66, 0xa8, 0x52, 0xc9, 0x04, 0x35, 0x8b, 0xd7, 0xcc, 0x4b, - 0x72, 0x05, 0xe5, 0x35, 0x8a, 0x19, 0x1d, 0x8c, 0x97, 0x57, 0xc1, 0x21, 0x53, 0xdd, 0x68, 0xf6, - 0x60, 0xaa, 0x1b, 0xed, 0x9e, 0x39, 0x79, 0xf8, 0xdc, 0x5b, 0xda, 0x6e, 0x6f, 0x69, 0xdf, 0x7b, - 0x4b, 0x7b, 0x3f, 0x58, 0x95, 0xdd, 0xc1, 0xaa, 0x7c, 0x1d, 0xac, 0xca, 0xcb, 0x68, 0x15, 0xc8, - 0xb5, 0xf2, 0x6c, 0x9f, 0x47, 0xe3, 0xb3, 0xcd, 0xd8, 0x9c, 0xef, 0x86, 0xdc, 0x26, 0x2c, 0xf5, - 0xea, 0xb8, 0x15, 0xb7, 0x3f, 0x01, 0x00, 0x00, 0xff, 0xff, 0xc2, 0x89, 0x0d, 0x30, 0x45, 0x02, - 0x00, 0x00, -} - -func (m *Subscription) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Subscription) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Subscription) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.DurationTotal != 0 { - i = encodeVarintSubscription(dAtA, i, uint64(m.DurationTotal)) - i-- - dAtA[i] = 0x70 - } - if len(m.Cluster) > 0 { - i -= len(m.Cluster) - copy(dAtA[i:], m.Cluster) - i = encodeVarintSubscription(dAtA, i, uint64(len(m.Cluster))) - i-- - dAtA[i] = 0x6a - } - if m.MonthCuLeft != 0 { - i = encodeVarintSubscription(dAtA, i, uint64(m.MonthCuLeft)) - i-- - dAtA[i] = 0x58 - } - if m.MonthCuTotal != 0 { - i = encodeVarintSubscription(dAtA, i, uint64(m.MonthCuTotal)) - i-- - dAtA[i] = 0x50 - } - if m.MonthExpiryTime != 0 { - i = encodeVarintSubscription(dAtA, i, uint64(m.MonthExpiryTime)) - i-- - dAtA[i] = 0x40 - } - if m.DurationLeft != 0 { - i = encodeVarintSubscription(dAtA, i, uint64(m.DurationLeft)) - i-- - dAtA[i] = 0x38 - } - if m.DurationBought != 0 { - i = encodeVarintSubscription(dAtA, i, uint64(m.DurationBought)) - i-- - dAtA[i] = 0x30 - } - if m.PlanBlock != 0 { - i = encodeVarintSubscription(dAtA, i, uint64(m.PlanBlock)) - i-- - dAtA[i] = 0x28 - } - if len(m.PlanIndex) > 0 { - i -= len(m.PlanIndex) - copy(dAtA[i:], m.PlanIndex) - i = encodeVarintSubscription(dAtA, i, uint64(len(m.PlanIndex))) - i-- - dAtA[i] = 0x22 - } - if m.Block != 0 { - i = encodeVarintSubscription(dAtA, i, uint64(m.Block)) - i-- - dAtA[i] = 0x18 - } - if len(m.Consumer) > 0 { - i -= len(m.Consumer) - copy(dAtA[i:], m.Consumer) - i = encodeVarintSubscription(dAtA, i, uint64(len(m.Consumer))) - i-- - dAtA[i] = 0x12 - } - if len(m.Creator) > 0 { - i -= len(m.Creator) - copy(dAtA[i:], m.Creator) - i = encodeVarintSubscription(dAtA, i, uint64(len(m.Creator))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func encodeVarintSubscription(dAtA []byte, offset int, v uint64) int { - offset -= sovSubscription(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *Subscription) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Creator) - if l > 0 { - n += 1 + l + sovSubscription(uint64(l)) - } - l = len(m.Consumer) - if l > 0 { - n += 1 + l + sovSubscription(uint64(l)) - } - if m.Block != 0 { - n += 1 + sovSubscription(uint64(m.Block)) - } - l = len(m.PlanIndex) - if l > 0 { - n += 1 + l + sovSubscription(uint64(l)) - } - if m.PlanBlock != 0 { - n += 1 + sovSubscription(uint64(m.PlanBlock)) - } - if m.DurationBought != 0 { - n += 1 + sovSubscription(uint64(m.DurationBought)) - } - if m.DurationLeft != 0 { - n += 1 + sovSubscription(uint64(m.DurationLeft)) - } - if m.MonthExpiryTime != 0 { - n += 1 + sovSubscription(uint64(m.MonthExpiryTime)) - } - if m.MonthCuTotal != 0 { - n += 1 + sovSubscription(uint64(m.MonthCuTotal)) - } - if m.MonthCuLeft != 0 { - n += 1 + sovSubscription(uint64(m.MonthCuLeft)) - } - l = len(m.Cluster) - if l > 0 { - n += 1 + l + sovSubscription(uint64(l)) - } - if m.DurationTotal != 0 { - n += 1 + sovSubscription(uint64(m.DurationTotal)) - } - return n -} - -func sovSubscription(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozSubscription(x uint64) (n int) { - return sovSubscription(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *Subscription) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSubscription - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Subscription: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Subscription: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Creator", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSubscription - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthSubscription - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthSubscription - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Creator = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Consumer", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSubscription - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthSubscription - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthSubscription - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Consumer = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Block", wireType) - } - m.Block = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSubscription - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Block |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PlanIndex", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSubscription - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthSubscription - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthSubscription - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.PlanIndex = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field PlanBlock", wireType) - } - m.PlanBlock = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSubscription - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.PlanBlock |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 6: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field DurationBought", wireType) - } - m.DurationBought = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSubscription - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.DurationBought |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 7: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field DurationLeft", wireType) - } - m.DurationLeft = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSubscription - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.DurationLeft |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 8: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field MonthExpiryTime", wireType) - } - m.MonthExpiryTime = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSubscription - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.MonthExpiryTime |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 10: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field MonthCuTotal", wireType) - } - m.MonthCuTotal = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSubscription - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.MonthCuTotal |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 11: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field MonthCuLeft", wireType) - } - m.MonthCuLeft = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSubscription - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.MonthCuLeft |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 13: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Cluster", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSubscription - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthSubscription - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthSubscription - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Cluster = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 14: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field DurationTotal", wireType) - } - m.DurationTotal = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSubscription - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.DurationTotal |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipSubscription(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthSubscription - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipSubscription(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowSubscription - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowSubscription - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowSubscription - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthSubscription - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupSubscription - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthSubscription - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthSubscription = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowSubscription = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupSubscription = fmt.Errorf("proto: unexpected end of group") -) diff --git a/x/subscription/migrations/v6/subscription.pb.go b/x/subscription/migrations/v6/subscription.pb.go deleted file mode 100644 index 521214471f..0000000000 --- a/x/subscription/migrations/v6/subscription.pb.go +++ /dev/null @@ -1,1162 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: lavanet/lava/subscription/subscription.proto - -package v6 - -import ( - fmt "fmt" - proto "github.com/cosmos/gogoproto/proto" - io "io" - math "math" - math_bits "math/bits" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -type Subscription struct { - Creator string `protobuf:"bytes,1,opt,name=creator,proto3" json:"creator,omitempty"` - Consumer string `protobuf:"bytes,2,opt,name=consumer,proto3" json:"consumer,omitempty"` - Block uint64 `protobuf:"varint,3,opt,name=block,proto3" json:"block,omitempty"` - PlanIndex string `protobuf:"bytes,4,opt,name=plan_index,json=planIndex,proto3" json:"plan_index,omitempty"` - PlanBlock uint64 `protobuf:"varint,5,opt,name=plan_block,json=planBlock,proto3" json:"plan_block,omitempty"` - DurationBought uint64 `protobuf:"varint,6,opt,name=duration_bought,json=durationBought,proto3" json:"duration_bought,omitempty"` - DurationLeft uint64 `protobuf:"varint,7,opt,name=duration_left,json=durationLeft,proto3" json:"duration_left,omitempty"` - MonthExpiryTime uint64 `protobuf:"varint,8,opt,name=month_expiry_time,json=monthExpiryTime,proto3" json:"month_expiry_time,omitempty"` - MonthCuTotal uint64 `protobuf:"varint,10,opt,name=month_cu_total,json=monthCuTotal,proto3" json:"month_cu_total,omitempty"` - MonthCuLeft uint64 `protobuf:"varint,11,opt,name=month_cu_left,json=monthCuLeft,proto3" json:"month_cu_left,omitempty"` - Cluster string `protobuf:"bytes,13,opt,name=cluster,proto3" json:"cluster,omitempty"` - DurationTotal uint64 `protobuf:"varint,14,opt,name=duration_total,json=durationTotal,proto3" json:"duration_total,omitempty"` - AutoRenewal bool `protobuf:"varint,15,opt,name=auto_renewal,json=autoRenewal,proto3" json:"auto_renewal,omitempty"` - FutureSubscription *FutureSubscription `protobuf:"bytes,16,opt,name=future_subscription,json=futureSubscription,proto3" json:"future_subscription,omitempty"` -} - -func (m *Subscription) Reset() { *m = Subscription{} } -func (m *Subscription) String() string { return proto.CompactTextString(m) } -func (*Subscription) ProtoMessage() {} -func (*Subscription) Descriptor() ([]byte, []int) { - return fileDescriptor_c3bc5507ca237d79, []int{0} -} -func (m *Subscription) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Subscription) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Subscription.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Subscription) XXX_Merge(src proto.Message) { - xxx_messageInfo_Subscription.Merge(m, src) -} -func (m *Subscription) XXX_Size() int { - return m.Size() -} -func (m *Subscription) XXX_DiscardUnknown() { - xxx_messageInfo_Subscription.DiscardUnknown(m) -} - -var xxx_messageInfo_Subscription proto.InternalMessageInfo - -func (m *Subscription) GetCreator() string { - if m != nil { - return m.Creator - } - return "" -} - -func (m *Subscription) GetConsumer() string { - if m != nil { - return m.Consumer - } - return "" -} - -func (m *Subscription) GetBlock() uint64 { - if m != nil { - return m.Block - } - return 0 -} - -func (m *Subscription) GetPlanIndex() string { - if m != nil { - return m.PlanIndex - } - return "" -} - -func (m *Subscription) GetPlanBlock() uint64 { - if m != nil { - return m.PlanBlock - } - return 0 -} - -func (m *Subscription) GetDurationBought() uint64 { - if m != nil { - return m.DurationBought - } - return 0 -} - -func (m *Subscription) GetDurationLeft() uint64 { - if m != nil { - return m.DurationLeft - } - return 0 -} - -func (m *Subscription) GetMonthExpiryTime() uint64 { - if m != nil { - return m.MonthExpiryTime - } - return 0 -} - -func (m *Subscription) GetMonthCuTotal() uint64 { - if m != nil { - return m.MonthCuTotal - } - return 0 -} - -func (m *Subscription) GetMonthCuLeft() uint64 { - if m != nil { - return m.MonthCuLeft - } - return 0 -} - -func (m *Subscription) GetCluster() string { - if m != nil { - return m.Cluster - } - return "" -} - -func (m *Subscription) GetDurationTotal() uint64 { - if m != nil { - return m.DurationTotal - } - return 0 -} - -func (m *Subscription) GetAutoRenewal() bool { - if m != nil { - return m.AutoRenewal - } - return false -} - -func (m *Subscription) GetFutureSubscription() *FutureSubscription { - if m != nil { - return m.FutureSubscription - } - return nil -} - -type FutureSubscription struct { - Creator string `protobuf:"bytes,1,opt,name=creator,proto3" json:"creator,omitempty"` - PlanIndex string `protobuf:"bytes,2,opt,name=plan_index,json=planIndex,proto3" json:"plan_index,omitempty"` - PlanBlock uint64 `protobuf:"varint,3,opt,name=plan_block,json=planBlock,proto3" json:"plan_block,omitempty"` - DurationBought uint64 `protobuf:"varint,4,opt,name=duration_bought,json=durationBought,proto3" json:"duration_bought,omitempty"` -} - -func (m *FutureSubscription) Reset() { *m = FutureSubscription{} } -func (m *FutureSubscription) String() string { return proto.CompactTextString(m) } -func (*FutureSubscription) ProtoMessage() {} -func (*FutureSubscription) Descriptor() ([]byte, []int) { - return fileDescriptor_c3bc5507ca237d79, []int{1} -} -func (m *FutureSubscription) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *FutureSubscription) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_FutureSubscription.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *FutureSubscription) XXX_Merge(src proto.Message) { - xxx_messageInfo_FutureSubscription.Merge(m, src) -} -func (m *FutureSubscription) XXX_Size() int { - return m.Size() -} -func (m *FutureSubscription) XXX_DiscardUnknown() { - xxx_messageInfo_FutureSubscription.DiscardUnknown(m) -} - -var xxx_messageInfo_FutureSubscription proto.InternalMessageInfo - -func (m *FutureSubscription) GetCreator() string { - if m != nil { - return m.Creator - } - return "" -} - -func (m *FutureSubscription) GetPlanIndex() string { - if m != nil { - return m.PlanIndex - } - return "" -} - -func (m *FutureSubscription) GetPlanBlock() uint64 { - if m != nil { - return m.PlanBlock - } - return 0 -} - -func (m *FutureSubscription) GetDurationBought() uint64 { - if m != nil { - return m.DurationBought - } - return 0 -} - -func init() { - proto.RegisterType((*Subscription)(nil), "lavanet.lava.subscription.SubscriptionV6") - proto.RegisterType((*FutureSubscription)(nil), "lavanet.lava.subscription.FutureSubscriptionV6") -} - -func init() { - proto.RegisterFile("lavanet/lava/subscription/subscription.proto", fileDescriptor_c3bc5507ca237d79) -} - -var fileDescriptor_c3bc5507ca237d79 = []byte{ - // 452 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x93, 0xcf, 0x6e, 0xd3, 0x40, - 0x10, 0xc6, 0xb3, 0xad, 0xdb, 0x3a, 0x93, 0xbf, 0x2c, 0x1c, 0x16, 0x24, 0xac, 0x10, 0x40, 0x44, - 0xa8, 0x38, 0x12, 0xbc, 0x41, 0x10, 0x95, 0xa8, 0x38, 0x99, 0x9e, 0x38, 0x60, 0xd9, 0xee, 0xa6, - 0xb1, 0xb0, 0xbd, 0xd6, 0x7a, 0x16, 0xd2, 0xb7, 0xe0, 0xc2, 0x5b, 0xf0, 0x20, 0x1c, 0x7b, 0xe4, - 0x88, 0x92, 0x17, 0x41, 0x1e, 0x27, 0x56, 0xac, 0x42, 0xd4, 0xd3, 0x6a, 0x7e, 0xf3, 0x7d, 0x1e, - 0xef, 0xce, 0x0c, 0x9c, 0x26, 0xc1, 0xd7, 0x20, 0x93, 0x38, 0x2d, 0xcf, 0x69, 0x61, 0xc2, 0x22, - 0xd2, 0x71, 0x8e, 0xb1, 0xca, 0x1a, 0x81, 0x9b, 0x6b, 0x85, 0x8a, 0x3f, 0xdc, 0xa8, 0xdd, 0xf2, - 0x74, 0x77, 0x05, 0xe3, 0x9f, 0x16, 0x74, 0x3f, 0xee, 0x00, 0x2e, 0xe0, 0x24, 0xd2, 0x32, 0x40, - 0xa5, 0x05, 0x1b, 0xb1, 0x49, 0xdb, 0xdb, 0x86, 0xfc, 0x11, 0xd8, 0x91, 0xca, 0x0a, 0x93, 0x4a, - 0x2d, 0x0e, 0x28, 0x55, 0xc7, 0xfc, 0x01, 0x1c, 0x85, 0x89, 0x8a, 0xbe, 0x88, 0xc3, 0x11, 0x9b, - 0x58, 0x5e, 0x15, 0xf0, 0xc7, 0x00, 0x79, 0x12, 0x64, 0x7e, 0x9c, 0x5d, 0xca, 0xa5, 0xb0, 0xc8, - 0xd3, 0x2e, 0xc9, 0xfb, 0x12, 0xd4, 0xe9, 0xca, 0x79, 0x44, 0x4e, 0x4a, 0xcf, 0xc8, 0xfd, 0x02, - 0x06, 0x97, 0x46, 0x07, 0xe5, 0x5f, 0xf9, 0xa1, 0x32, 0x57, 0x0b, 0x14, 0xc7, 0xa4, 0xe9, 0x6f, - 0xf1, 0x8c, 0x28, 0x7f, 0x0a, 0xbd, 0x5a, 0x98, 0xc8, 0x39, 0x8a, 0x13, 0x92, 0x75, 0xb7, 0xf0, - 0x83, 0x9c, 0x23, 0x7f, 0x09, 0xf7, 0x52, 0x95, 0xe1, 0xc2, 0x97, 0xcb, 0x3c, 0xd6, 0xd7, 0x3e, - 0xc6, 0xa9, 0x14, 0x36, 0x09, 0x07, 0x94, 0x78, 0x47, 0xfc, 0x22, 0x4e, 0x25, 0x7f, 0x06, 0xfd, - 0x4a, 0x1b, 0x19, 0x1f, 0x15, 0x06, 0x89, 0x80, 0xea, 0x8b, 0x44, 0xdf, 0x9a, 0x8b, 0x92, 0xf1, - 0x31, 0xf4, 0x6a, 0x15, 0x95, 0xed, 0x90, 0xa8, 0xb3, 0x11, 0x51, 0xd5, 0xf2, 0x35, 0x13, 0x53, - 0xa0, 0xd4, 0xa2, 0xb7, 0x79, 0xcd, 0x2a, 0xe4, 0xcf, 0xa1, 0xbe, 0xc6, 0xa6, 0x46, 0x9f, 0xec, - 0xf5, 0x55, 0xaa, 0x22, 0x4f, 0xa0, 0x1b, 0x18, 0x54, 0xbe, 0x96, 0x99, 0xfc, 0x16, 0x24, 0x62, - 0x30, 0x62, 0x13, 0xdb, 0xeb, 0x94, 0xcc, 0xab, 0x10, 0xff, 0x0c, 0xf7, 0xe7, 0x06, 0x8d, 0x96, - 0xfe, 0x6e, 0x67, 0xc5, 0x70, 0xc4, 0x26, 0x9d, 0xd7, 0xaf, 0xdc, 0xff, 0xf6, 0xde, 0x3d, 0x23, - 0xd7, 0x6e, 0xf7, 0x3d, 0x3e, 0xbf, 0xc5, 0xce, 0x2d, 0xbb, 0x3d, 0x84, 0x73, 0xcb, 0xee, 0x0e, - 0x7b, 0xe3, 0x1f, 0x0c, 0xf8, 0x6d, 0xdb, 0x9e, 0xa1, 0x69, 0x8e, 0xc0, 0xc1, 0xfe, 0x11, 0x38, - 0xbc, 0xc3, 0x08, 0x58, 0xff, 0x1a, 0x81, 0xd9, 0xd9, 0xaf, 0x95, 0xc3, 0x6e, 0x56, 0x0e, 0xfb, - 0xb3, 0x72, 0xd8, 0xf7, 0xb5, 0xd3, 0xba, 0x59, 0x3b, 0xad, 0xdf, 0x6b, 0xa7, 0xf5, 0xe9, 0xf4, - 0x2a, 0xc6, 0x85, 0x09, 0xdd, 0x48, 0xa5, 0xd3, 0xc6, 0xd2, 0x2c, 0x9b, 0x6b, 0x83, 0xd7, 0xb9, - 0x2c, 0xc2, 0x63, 0x5a, 0x98, 0x37, 0x7f, 0x03, 0x00, 0x00, 0xff, 0xff, 0xb1, 0xdc, 0x42, 0x8e, - 0x60, 0x03, 0x00, 0x00, -} - -func (m *Subscription) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Subscription) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Subscription) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.FutureSubscription != nil { - { - size, err := m.FutureSubscription.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintSubscription(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0x82 - } - if m.AutoRenewal { - i-- - if m.AutoRenewal { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x78 - } - if m.DurationTotal != 0 { - i = encodeVarintSubscription(dAtA, i, uint64(m.DurationTotal)) - i-- - dAtA[i] = 0x70 - } - if len(m.Cluster) > 0 { - i -= len(m.Cluster) - copy(dAtA[i:], m.Cluster) - i = encodeVarintSubscription(dAtA, i, uint64(len(m.Cluster))) - i-- - dAtA[i] = 0x6a - } - if m.MonthCuLeft != 0 { - i = encodeVarintSubscription(dAtA, i, uint64(m.MonthCuLeft)) - i-- - dAtA[i] = 0x58 - } - if m.MonthCuTotal != 0 { - i = encodeVarintSubscription(dAtA, i, uint64(m.MonthCuTotal)) - i-- - dAtA[i] = 0x50 - } - if m.MonthExpiryTime != 0 { - i = encodeVarintSubscription(dAtA, i, uint64(m.MonthExpiryTime)) - i-- - dAtA[i] = 0x40 - } - if m.DurationLeft != 0 { - i = encodeVarintSubscription(dAtA, i, uint64(m.DurationLeft)) - i-- - dAtA[i] = 0x38 - } - if m.DurationBought != 0 { - i = encodeVarintSubscription(dAtA, i, uint64(m.DurationBought)) - i-- - dAtA[i] = 0x30 - } - if m.PlanBlock != 0 { - i = encodeVarintSubscription(dAtA, i, uint64(m.PlanBlock)) - i-- - dAtA[i] = 0x28 - } - if len(m.PlanIndex) > 0 { - i -= len(m.PlanIndex) - copy(dAtA[i:], m.PlanIndex) - i = encodeVarintSubscription(dAtA, i, uint64(len(m.PlanIndex))) - i-- - dAtA[i] = 0x22 - } - if m.Block != 0 { - i = encodeVarintSubscription(dAtA, i, uint64(m.Block)) - i-- - dAtA[i] = 0x18 - } - if len(m.Consumer) > 0 { - i -= len(m.Consumer) - copy(dAtA[i:], m.Consumer) - i = encodeVarintSubscription(dAtA, i, uint64(len(m.Consumer))) - i-- - dAtA[i] = 0x12 - } - if len(m.Creator) > 0 { - i -= len(m.Creator) - copy(dAtA[i:], m.Creator) - i = encodeVarintSubscription(dAtA, i, uint64(len(m.Creator))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *FutureSubscription) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *FutureSubscription) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *FutureSubscription) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.DurationBought != 0 { - i = encodeVarintSubscription(dAtA, i, uint64(m.DurationBought)) - i-- - dAtA[i] = 0x20 - } - if m.PlanBlock != 0 { - i = encodeVarintSubscription(dAtA, i, uint64(m.PlanBlock)) - i-- - dAtA[i] = 0x18 - } - if len(m.PlanIndex) > 0 { - i -= len(m.PlanIndex) - copy(dAtA[i:], m.PlanIndex) - i = encodeVarintSubscription(dAtA, i, uint64(len(m.PlanIndex))) - i-- - dAtA[i] = 0x12 - } - if len(m.Creator) > 0 { - i -= len(m.Creator) - copy(dAtA[i:], m.Creator) - i = encodeVarintSubscription(dAtA, i, uint64(len(m.Creator))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func encodeVarintSubscription(dAtA []byte, offset int, v uint64) int { - offset -= sovSubscription(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *Subscription) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Creator) - if l > 0 { - n += 1 + l + sovSubscription(uint64(l)) - } - l = len(m.Consumer) - if l > 0 { - n += 1 + l + sovSubscription(uint64(l)) - } - if m.Block != 0 { - n += 1 + sovSubscription(uint64(m.Block)) - } - l = len(m.PlanIndex) - if l > 0 { - n += 1 + l + sovSubscription(uint64(l)) - } - if m.PlanBlock != 0 { - n += 1 + sovSubscription(uint64(m.PlanBlock)) - } - if m.DurationBought != 0 { - n += 1 + sovSubscription(uint64(m.DurationBought)) - } - if m.DurationLeft != 0 { - n += 1 + sovSubscription(uint64(m.DurationLeft)) - } - if m.MonthExpiryTime != 0 { - n += 1 + sovSubscription(uint64(m.MonthExpiryTime)) - } - if m.MonthCuTotal != 0 { - n += 1 + sovSubscription(uint64(m.MonthCuTotal)) - } - if m.MonthCuLeft != 0 { - n += 1 + sovSubscription(uint64(m.MonthCuLeft)) - } - l = len(m.Cluster) - if l > 0 { - n += 1 + l + sovSubscription(uint64(l)) - } - if m.DurationTotal != 0 { - n += 1 + sovSubscription(uint64(m.DurationTotal)) - } - if m.AutoRenewal { - n += 2 - } - if m.FutureSubscription != nil { - l = m.FutureSubscription.Size() - n += 2 + l + sovSubscription(uint64(l)) - } - return n -} - -func (m *FutureSubscription) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Creator) - if l > 0 { - n += 1 + l + sovSubscription(uint64(l)) - } - l = len(m.PlanIndex) - if l > 0 { - n += 1 + l + sovSubscription(uint64(l)) - } - if m.PlanBlock != 0 { - n += 1 + sovSubscription(uint64(m.PlanBlock)) - } - if m.DurationBought != 0 { - n += 1 + sovSubscription(uint64(m.DurationBought)) - } - return n -} - -func sovSubscription(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozSubscription(x uint64) (n int) { - return sovSubscription(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *Subscription) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSubscription - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Subscription: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Subscription: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Creator", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSubscription - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthSubscription - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthSubscription - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Creator = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Consumer", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSubscription - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthSubscription - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthSubscription - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Consumer = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Block", wireType) - } - m.Block = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSubscription - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Block |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PlanIndex", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSubscription - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthSubscription - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthSubscription - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.PlanIndex = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field PlanBlock", wireType) - } - m.PlanBlock = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSubscription - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.PlanBlock |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 6: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field DurationBought", wireType) - } - m.DurationBought = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSubscription - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.DurationBought |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 7: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field DurationLeft", wireType) - } - m.DurationLeft = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSubscription - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.DurationLeft |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 8: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field MonthExpiryTime", wireType) - } - m.MonthExpiryTime = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSubscription - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.MonthExpiryTime |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 10: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field MonthCuTotal", wireType) - } - m.MonthCuTotal = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSubscription - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.MonthCuTotal |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 11: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field MonthCuLeft", wireType) - } - m.MonthCuLeft = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSubscription - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.MonthCuLeft |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 13: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Cluster", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSubscription - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthSubscription - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthSubscription - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Cluster = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 14: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field DurationTotal", wireType) - } - m.DurationTotal = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSubscription - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.DurationTotal |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 15: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field AutoRenewal", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSubscription - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.AutoRenewal = bool(v != 0) - case 16: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field FutureSubscription", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSubscription - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthSubscription - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthSubscription - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.FutureSubscription == nil { - m.FutureSubscription = &FutureSubscription{} - } - if err := m.FutureSubscription.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipSubscription(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthSubscription - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *FutureSubscription) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSubscription - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: FutureSubscription: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: FutureSubscription: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Creator", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSubscription - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthSubscription - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthSubscription - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Creator = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PlanIndex", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSubscription - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthSubscription - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthSubscription - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.PlanIndex = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field PlanBlock", wireType) - } - m.PlanBlock = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSubscription - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.PlanBlock |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field DurationBought", wireType) - } - m.DurationBought = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSubscription - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.DurationBought |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipSubscription(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthSubscription - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipSubscription(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowSubscription - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowSubscription - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowSubscription - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthSubscription - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupSubscription - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthSubscription - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthSubscription = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowSubscription = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupSubscription = fmt.Errorf("proto: unexpected end of group") -) diff --git a/x/subscription/migrations/v7/subscription.pb.go b/x/subscription/migrations/v7/subscription.pb.go deleted file mode 100644 index aabb342d2c..0000000000 --- a/x/subscription/migrations/v7/subscription.pb.go +++ /dev/null @@ -1,1175 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: lavanet/lava/subscription/subscription.proto - -package types - -import ( - fmt "fmt" - proto "github.com/cosmos/gogoproto/proto" - io "io" - math "math" - math_bits "math/bits" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -type Subscription struct { - Creator string `protobuf:"bytes,1,opt,name=creator,proto3" json:"creator,omitempty"` - Consumer string `protobuf:"bytes,2,opt,name=consumer,proto3" json:"consumer,omitempty"` - Block uint64 `protobuf:"varint,3,opt,name=block,proto3" json:"block,omitempty"` - PlanIndex string `protobuf:"bytes,4,opt,name=plan_index,json=planIndex,proto3" json:"plan_index,omitempty"` - PlanBlock uint64 `protobuf:"varint,5,opt,name=plan_block,json=planBlock,proto3" json:"plan_block,omitempty"` - DurationBought uint64 `protobuf:"varint,6,opt,name=duration_bought,json=durationBought,proto3" json:"duration_bought,omitempty"` - DurationLeft uint64 `protobuf:"varint,7,opt,name=duration_left,json=durationLeft,proto3" json:"duration_left,omitempty"` - MonthExpiryTime uint64 `protobuf:"varint,8,opt,name=month_expiry_time,json=monthExpiryTime,proto3" json:"month_expiry_time,omitempty"` - MonthCuTotal uint64 `protobuf:"varint,10,opt,name=month_cu_total,json=monthCuTotal,proto3" json:"month_cu_total,omitempty"` - MonthCuLeft uint64 `protobuf:"varint,11,opt,name=month_cu_left,json=monthCuLeft,proto3" json:"month_cu_left,omitempty"` - Cluster string `protobuf:"bytes,13,opt,name=cluster,proto3" json:"cluster,omitempty"` - DurationTotal uint64 `protobuf:"varint,14,opt,name=duration_total,json=durationTotal,proto3" json:"duration_total,omitempty"` - FutureSubscription *FutureSubscription `protobuf:"bytes,16,opt,name=future_subscription,json=futureSubscription,proto3" json:"future_subscription,omitempty"` - AutoRenewalNextPlan string `protobuf:"bytes,17,opt,name=auto_renewal_next_plan,json=autoRenewalNextPlan,proto3" json:"auto_renewal_next_plan,omitempty"` -} - -func (m *Subscription) Reset() { *m = Subscription{} } -func (m *Subscription) String() string { return proto.CompactTextString(m) } -func (*Subscription) ProtoMessage() {} -func (*Subscription) Descriptor() ([]byte, []int) { - return fileDescriptor_c3bc5507ca237d79, []int{0} -} -func (m *Subscription) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Subscription) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Subscription.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Subscription) XXX_Merge(src proto.Message) { - xxx_messageInfo_Subscription.Merge(m, src) -} -func (m *Subscription) XXX_Size() int { - return m.Size() -} -func (m *Subscription) XXX_DiscardUnknown() { - xxx_messageInfo_Subscription.DiscardUnknown(m) -} - -var xxx_messageInfo_Subscription proto.InternalMessageInfo - -func (m *Subscription) GetCreator() string { - if m != nil { - return m.Creator - } - return "" -} - -func (m *Subscription) GetConsumer() string { - if m != nil { - return m.Consumer - } - return "" -} - -func (m *Subscription) GetBlock() uint64 { - if m != nil { - return m.Block - } - return 0 -} - -func (m *Subscription) GetPlanIndex() string { - if m != nil { - return m.PlanIndex - } - return "" -} - -func (m *Subscription) GetPlanBlock() uint64 { - if m != nil { - return m.PlanBlock - } - return 0 -} - -func (m *Subscription) GetDurationBought() uint64 { - if m != nil { - return m.DurationBought - } - return 0 -} - -func (m *Subscription) GetDurationLeft() uint64 { - if m != nil { - return m.DurationLeft - } - return 0 -} - -func (m *Subscription) GetMonthExpiryTime() uint64 { - if m != nil { - return m.MonthExpiryTime - } - return 0 -} - -func (m *Subscription) GetMonthCuTotal() uint64 { - if m != nil { - return m.MonthCuTotal - } - return 0 -} - -func (m *Subscription) GetMonthCuLeft() uint64 { - if m != nil { - return m.MonthCuLeft - } - return 0 -} - -func (m *Subscription) GetCluster() string { - if m != nil { - return m.Cluster - } - return "" -} - -func (m *Subscription) GetDurationTotal() uint64 { - if m != nil { - return m.DurationTotal - } - return 0 -} - -func (m *Subscription) GetFutureSubscription() *FutureSubscription { - if m != nil { - return m.FutureSubscription - } - return nil -} - -func (m *Subscription) GetAutoRenewalNextPlan() string { - if m != nil { - return m.AutoRenewalNextPlan - } - return "" -} - -type FutureSubscription struct { - Creator string `protobuf:"bytes,1,opt,name=creator,proto3" json:"creator,omitempty"` - PlanIndex string `protobuf:"bytes,2,opt,name=plan_index,json=planIndex,proto3" json:"plan_index,omitempty"` - PlanBlock uint64 `protobuf:"varint,3,opt,name=plan_block,json=planBlock,proto3" json:"plan_block,omitempty"` - DurationBought uint64 `protobuf:"varint,4,opt,name=duration_bought,json=durationBought,proto3" json:"duration_bought,omitempty"` -} - -func (m *FutureSubscription) Reset() { *m = FutureSubscription{} } -func (m *FutureSubscription) String() string { return proto.CompactTextString(m) } -func (*FutureSubscription) ProtoMessage() {} -func (*FutureSubscription) Descriptor() ([]byte, []int) { - return fileDescriptor_c3bc5507ca237d79, []int{1} -} -func (m *FutureSubscription) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *FutureSubscription) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_FutureSubscription.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *FutureSubscription) XXX_Merge(src proto.Message) { - xxx_messageInfo_FutureSubscription.Merge(m, src) -} -func (m *FutureSubscription) XXX_Size() int { - return m.Size() -} -func (m *FutureSubscription) XXX_DiscardUnknown() { - xxx_messageInfo_FutureSubscription.DiscardUnknown(m) -} - -var xxx_messageInfo_FutureSubscription proto.InternalMessageInfo - -func (m *FutureSubscription) GetCreator() string { - if m != nil { - return m.Creator - } - return "" -} - -func (m *FutureSubscription) GetPlanIndex() string { - if m != nil { - return m.PlanIndex - } - return "" -} - -func (m *FutureSubscription) GetPlanBlock() uint64 { - if m != nil { - return m.PlanBlock - } - return 0 -} - -func (m *FutureSubscription) GetDurationBought() uint64 { - if m != nil { - return m.DurationBought - } - return 0 -} - -func init() { - proto.RegisterType((*Subscription)(nil), "lavanet.lava.subscription.SubscriptionV7") - proto.RegisterType((*FutureSubscription)(nil), "lavanet.lava.subscription.FutureSubscriptionV7") -} - -func init() { - proto.RegisterFile("lavanet/lava/subscription/subscription.proto", fileDescriptor_c3bc5507ca237d79) -} - -var fileDescriptor_c3bc5507ca237d79 = []byte{ - // 468 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x53, 0xcd, 0x6e, 0xd3, 0x40, - 0x10, 0x8e, 0x5b, 0xb7, 0x4d, 0x26, 0x7f, 0xee, 0x16, 0xa1, 0x05, 0x09, 0x2b, 0x0a, 0x20, 0x22, - 0x54, 0x1c, 0x89, 0xbe, 0x41, 0x10, 0x95, 0x88, 0x10, 0x42, 0xa1, 0x27, 0x0e, 0x58, 0xb6, 0xbb, - 0x69, 0x2c, 0x6c, 0xaf, 0xb5, 0x9e, 0x05, 0xf7, 0x2d, 0xb8, 0xf0, 0x46, 0x1c, 0x38, 0xf6, 0xc8, - 0x11, 0x25, 0x2f, 0x82, 0x76, 0x9c, 0x58, 0x89, 0x0a, 0x15, 0x27, 0x6b, 0xbe, 0x1f, 0xcf, 0xee, - 0xce, 0x37, 0x70, 0x9a, 0x04, 0x5f, 0x82, 0x4c, 0xe0, 0xd8, 0x7c, 0xc7, 0x85, 0x0e, 0x8b, 0x48, - 0xc5, 0x39, 0xc6, 0x32, 0xdb, 0x29, 0xbc, 0x5c, 0x49, 0x94, 0xec, 0xc1, 0x5a, 0xed, 0x99, 0xaf, - 0xb7, 0x2d, 0x18, 0xfe, 0xb0, 0xa1, 0xf3, 0x61, 0x0b, 0x60, 0x1c, 0x8e, 0x22, 0x25, 0x02, 0x94, - 0x8a, 0x5b, 0x03, 0x6b, 0xd4, 0x9a, 0x6d, 0x4a, 0xf6, 0x10, 0x9a, 0x91, 0xcc, 0x0a, 0x9d, 0x0a, - 0xc5, 0xf7, 0x88, 0xaa, 0x6b, 0x76, 0x0f, 0x0e, 0xc2, 0x44, 0x46, 0x9f, 0xf9, 0xfe, 0xc0, 0x1a, - 0xd9, 0xb3, 0xaa, 0x60, 0x8f, 0x00, 0xf2, 0x24, 0xc8, 0xfc, 0x38, 0xbb, 0x14, 0x25, 0xb7, 0xc9, - 0xd3, 0x32, 0xc8, 0x1b, 0x03, 0xd4, 0x74, 0xe5, 0x3c, 0x20, 0x27, 0xd1, 0x13, 0x72, 0x3f, 0x83, - 0xfe, 0xa5, 0x56, 0x81, 0x39, 0x95, 0x1f, 0x4a, 0x7d, 0xb5, 0x40, 0x7e, 0x48, 0x9a, 0xde, 0x06, - 0x9e, 0x10, 0xca, 0x1e, 0x43, 0xb7, 0x16, 0x26, 0x62, 0x8e, 0xfc, 0x88, 0x64, 0x9d, 0x0d, 0xf8, - 0x56, 0xcc, 0x91, 0x3d, 0x87, 0xe3, 0x54, 0x66, 0xb8, 0xf0, 0x45, 0x99, 0xc7, 0xea, 0xda, 0xc7, - 0x38, 0x15, 0xbc, 0x49, 0xc2, 0x3e, 0x11, 0xaf, 0x09, 0xbf, 0x88, 0x53, 0xc1, 0x9e, 0x40, 0xaf, - 0xd2, 0x46, 0xda, 0x47, 0x89, 0x41, 0xc2, 0xa1, 0xfa, 0x23, 0xa1, 0xaf, 0xf4, 0x85, 0xc1, 0xd8, - 0x10, 0xba, 0xb5, 0x8a, 0xda, 0xb6, 0x49, 0xd4, 0x5e, 0x8b, 0xa8, 0xab, 0x79, 0xcd, 0x44, 0x17, - 0x28, 0x14, 0xef, 0xae, 0x5f, 0xb3, 0x2a, 0xd9, 0x53, 0xa8, 0xaf, 0xb1, 0xee, 0xd1, 0x23, 0x7b, - 0x7d, 0x95, 0xaa, 0xc9, 0x27, 0x38, 0x99, 0x6b, 0xd4, 0x4a, 0xf8, 0xdb, 0x63, 0xe3, 0xce, 0xc0, - 0x1a, 0xb5, 0x5f, 0xbe, 0xf0, 0xfe, 0x39, 0x58, 0xef, 0x9c, 0x5c, 0xdb, 0xa3, 0x9d, 0xb1, 0xf9, - 0x2d, 0x8c, 0x9d, 0xc1, 0xfd, 0x40, 0xa3, 0xf4, 0x95, 0xc8, 0xc4, 0xd7, 0x20, 0xf1, 0x33, 0x51, - 0xa2, 0x6f, 0x66, 0xc0, 0x8f, 0xe9, 0xbc, 0x27, 0x86, 0x9d, 0x55, 0xe4, 0x3b, 0x51, 0xe2, 0xfb, - 0x24, 0xc8, 0xa6, 0x76, 0xb3, 0xe5, 0xc0, 0xd4, 0x6e, 0x76, 0x9c, 0xee, 0xd4, 0x6e, 0xf6, 0x1d, - 0x67, 0xf8, 0xdd, 0x02, 0x76, 0xbb, 0xe3, 0x1d, 0x61, 0xda, 0x8d, 0xc6, 0xde, 0xdd, 0xd1, 0xd8, - 0xff, 0x8f, 0x68, 0xd8, 0x7f, 0x8b, 0xc6, 0xe4, 0xfc, 0xe7, 0xd2, 0xb5, 0x6e, 0x96, 0xae, 0xf5, - 0x7b, 0xe9, 0x5a, 0xdf, 0x56, 0x6e, 0xe3, 0x66, 0xe5, 0x36, 0x7e, 0xad, 0xdc, 0xc6, 0xc7, 0xd3, - 0xab, 0x18, 0x17, 0x3a, 0xf4, 0x22, 0x99, 0x8e, 0x77, 0x96, 0xa9, 0xdc, 0x5d, 0x27, 0xbc, 0xce, - 0x45, 0x11, 0x1e, 0xd2, 0x22, 0x9d, 0xfd, 0x09, 0x00, 0x00, 0xff, 0xff, 0xad, 0xec, 0x4c, 0x89, - 0x78, 0x03, 0x00, 0x00, -} - -func (m *Subscription) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Subscription) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Subscription) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.AutoRenewalNextPlan) > 0 { - i -= len(m.AutoRenewalNextPlan) - copy(dAtA[i:], m.AutoRenewalNextPlan) - i = encodeVarintSubscription(dAtA, i, uint64(len(m.AutoRenewalNextPlan))) - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0x8a - } - if m.FutureSubscription != nil { - { - size, err := m.FutureSubscription.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintSubscription(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0x82 - } - if m.DurationTotal != 0 { - i = encodeVarintSubscription(dAtA, i, uint64(m.DurationTotal)) - i-- - dAtA[i] = 0x70 - } - if len(m.Cluster) > 0 { - i -= len(m.Cluster) - copy(dAtA[i:], m.Cluster) - i = encodeVarintSubscription(dAtA, i, uint64(len(m.Cluster))) - i-- - dAtA[i] = 0x6a - } - if m.MonthCuLeft != 0 { - i = encodeVarintSubscription(dAtA, i, uint64(m.MonthCuLeft)) - i-- - dAtA[i] = 0x58 - } - if m.MonthCuTotal != 0 { - i = encodeVarintSubscription(dAtA, i, uint64(m.MonthCuTotal)) - i-- - dAtA[i] = 0x50 - } - if m.MonthExpiryTime != 0 { - i = encodeVarintSubscription(dAtA, i, uint64(m.MonthExpiryTime)) - i-- - dAtA[i] = 0x40 - } - if m.DurationLeft != 0 { - i = encodeVarintSubscription(dAtA, i, uint64(m.DurationLeft)) - i-- - dAtA[i] = 0x38 - } - if m.DurationBought != 0 { - i = encodeVarintSubscription(dAtA, i, uint64(m.DurationBought)) - i-- - dAtA[i] = 0x30 - } - if m.PlanBlock != 0 { - i = encodeVarintSubscription(dAtA, i, uint64(m.PlanBlock)) - i-- - dAtA[i] = 0x28 - } - if len(m.PlanIndex) > 0 { - i -= len(m.PlanIndex) - copy(dAtA[i:], m.PlanIndex) - i = encodeVarintSubscription(dAtA, i, uint64(len(m.PlanIndex))) - i-- - dAtA[i] = 0x22 - } - if m.Block != 0 { - i = encodeVarintSubscription(dAtA, i, uint64(m.Block)) - i-- - dAtA[i] = 0x18 - } - if len(m.Consumer) > 0 { - i -= len(m.Consumer) - copy(dAtA[i:], m.Consumer) - i = encodeVarintSubscription(dAtA, i, uint64(len(m.Consumer))) - i-- - dAtA[i] = 0x12 - } - if len(m.Creator) > 0 { - i -= len(m.Creator) - copy(dAtA[i:], m.Creator) - i = encodeVarintSubscription(dAtA, i, uint64(len(m.Creator))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *FutureSubscription) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *FutureSubscription) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *FutureSubscription) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.DurationBought != 0 { - i = encodeVarintSubscription(dAtA, i, uint64(m.DurationBought)) - i-- - dAtA[i] = 0x20 - } - if m.PlanBlock != 0 { - i = encodeVarintSubscription(dAtA, i, uint64(m.PlanBlock)) - i-- - dAtA[i] = 0x18 - } - if len(m.PlanIndex) > 0 { - i -= len(m.PlanIndex) - copy(dAtA[i:], m.PlanIndex) - i = encodeVarintSubscription(dAtA, i, uint64(len(m.PlanIndex))) - i-- - dAtA[i] = 0x12 - } - if len(m.Creator) > 0 { - i -= len(m.Creator) - copy(dAtA[i:], m.Creator) - i = encodeVarintSubscription(dAtA, i, uint64(len(m.Creator))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func encodeVarintSubscription(dAtA []byte, offset int, v uint64) int { - offset -= sovSubscription(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *Subscription) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Creator) - if l > 0 { - n += 1 + l + sovSubscription(uint64(l)) - } - l = len(m.Consumer) - if l > 0 { - n += 1 + l + sovSubscription(uint64(l)) - } - if m.Block != 0 { - n += 1 + sovSubscription(uint64(m.Block)) - } - l = len(m.PlanIndex) - if l > 0 { - n += 1 + l + sovSubscription(uint64(l)) - } - if m.PlanBlock != 0 { - n += 1 + sovSubscription(uint64(m.PlanBlock)) - } - if m.DurationBought != 0 { - n += 1 + sovSubscription(uint64(m.DurationBought)) - } - if m.DurationLeft != 0 { - n += 1 + sovSubscription(uint64(m.DurationLeft)) - } - if m.MonthExpiryTime != 0 { - n += 1 + sovSubscription(uint64(m.MonthExpiryTime)) - } - if m.MonthCuTotal != 0 { - n += 1 + sovSubscription(uint64(m.MonthCuTotal)) - } - if m.MonthCuLeft != 0 { - n += 1 + sovSubscription(uint64(m.MonthCuLeft)) - } - l = len(m.Cluster) - if l > 0 { - n += 1 + l + sovSubscription(uint64(l)) - } - if m.DurationTotal != 0 { - n += 1 + sovSubscription(uint64(m.DurationTotal)) - } - if m.FutureSubscription != nil { - l = m.FutureSubscription.Size() - n += 2 + l + sovSubscription(uint64(l)) - } - l = len(m.AutoRenewalNextPlan) - if l > 0 { - n += 2 + l + sovSubscription(uint64(l)) - } - return n -} - -func (m *FutureSubscription) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Creator) - if l > 0 { - n += 1 + l + sovSubscription(uint64(l)) - } - l = len(m.PlanIndex) - if l > 0 { - n += 1 + l + sovSubscription(uint64(l)) - } - if m.PlanBlock != 0 { - n += 1 + sovSubscription(uint64(m.PlanBlock)) - } - if m.DurationBought != 0 { - n += 1 + sovSubscription(uint64(m.DurationBought)) - } - return n -} - -func sovSubscription(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozSubscription(x uint64) (n int) { - return sovSubscription(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *Subscription) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSubscription - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Subscription: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Subscription: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Creator", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSubscription - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthSubscription - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthSubscription - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Creator = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Consumer", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSubscription - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthSubscription - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthSubscription - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Consumer = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Block", wireType) - } - m.Block = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSubscription - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Block |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PlanIndex", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSubscription - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthSubscription - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthSubscription - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.PlanIndex = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field PlanBlock", wireType) - } - m.PlanBlock = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSubscription - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.PlanBlock |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 6: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field DurationBought", wireType) - } - m.DurationBought = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSubscription - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.DurationBought |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 7: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field DurationLeft", wireType) - } - m.DurationLeft = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSubscription - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.DurationLeft |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 8: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field MonthExpiryTime", wireType) - } - m.MonthExpiryTime = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSubscription - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.MonthExpiryTime |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 10: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field MonthCuTotal", wireType) - } - m.MonthCuTotal = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSubscription - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.MonthCuTotal |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 11: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field MonthCuLeft", wireType) - } - m.MonthCuLeft = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSubscription - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.MonthCuLeft |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 13: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Cluster", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSubscription - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthSubscription - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthSubscription - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Cluster = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 14: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field DurationTotal", wireType) - } - m.DurationTotal = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSubscription - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.DurationTotal |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 16: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field FutureSubscription", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSubscription - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthSubscription - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthSubscription - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.FutureSubscription == nil { - m.FutureSubscription = &FutureSubscription{} - } - if err := m.FutureSubscription.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 17: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AutoRenewalNextPlan", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSubscription - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthSubscription - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthSubscription - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.AutoRenewalNextPlan = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipSubscription(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthSubscription - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *FutureSubscription) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSubscription - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: FutureSubscription: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: FutureSubscription: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Creator", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSubscription - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthSubscription - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthSubscription - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Creator = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PlanIndex", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSubscription - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthSubscription - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthSubscription - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.PlanIndex = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field PlanBlock", wireType) - } - m.PlanBlock = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSubscription - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.PlanBlock |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field DurationBought", wireType) - } - m.DurationBought = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSubscription - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.DurationBought |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipSubscription(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthSubscription - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipSubscription(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowSubscription - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowSubscription - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowSubscription - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthSubscription - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupSubscription - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthSubscription - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthSubscription = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowSubscription = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupSubscription = fmt.Errorf("proto: unexpected end of group") -) diff --git a/x/subscription/module.go b/x/subscription/module.go index 6a8f4fbdbe..493b61ee1c 100644 --- a/x/subscription/module.go +++ b/x/subscription/module.go @@ -133,34 +133,6 @@ func (am AppModule) RegisterServices(cfg module.Configurator) { migrator := keeper.NewMigrator(am.keeper) - // register v2 -> v3 migration - if err := cfg.RegisterMigration(types.ModuleName, 2, migrator.Migrate2to3); err != nil { - // panic:ok: at start up, migration cannot proceed anyhow - panic(fmt.Errorf("%s: failed to register migration to v3: %w", types.ModuleName, err)) - } - // register v3 -> v4 migration - if err := cfg.RegisterMigration(types.ModuleName, 3, migrator.Migrate3to4); err != nil { - // panic:ok: at start up, migration cannot proceed anyhow - panic(fmt.Errorf("%s: failed to register migration to v4: %w", types.ModuleName, err)) - } - // register v4 -> v5 migration - if err := cfg.RegisterMigration(types.ModuleName, 4, migrator.Migrate4to5); err != nil { - // panic:ok: at start up, migration cannot proceed anyhow - panic(fmt.Errorf("%s: failed to register migration to v5: %w", types.ModuleName, err)) - } - - // register v5 -> v6 migration - if err := cfg.RegisterMigration(types.ModuleName, 5, migrator.Migrate5to6); err != nil { - // panic:ok: at start up, migration cannot proceed anyhow - panic(fmt.Errorf("%s: failed to register migration to v6: %w", types.ModuleName, err)) - } - - // register v6 -> v7 migration - if err := cfg.RegisterMigration(types.ModuleName, 6, migrator.Migrate6to7); err != nil { - // panic:ok: at start up, migration cannot proceed anyhow - panic(fmt.Errorf("%s: failed to register migration to v6: %w", types.ModuleName, err)) - } - // register v7 -> v8 migration if err := cfg.RegisterMigration(types.ModuleName, 7, migrator.Migrate7to8); err != nil { // panic:ok: at start up, migration cannot proceed anyhow diff --git a/x/subscription/module_simulation.go b/x/subscription/module_simulation.go deleted file mode 100644 index c8047e4039..0000000000 --- a/x/subscription/module_simulation.go +++ /dev/null @@ -1,119 +0,0 @@ -package subscription - -import ( - "math/rand" - - "github.com/cosmos/cosmos-sdk/testutil/sims" - - "github.com/cosmos/cosmos-sdk/baseapp" - sdk "github.com/cosmos/cosmos-sdk/types" - "github.com/cosmos/cosmos-sdk/types/module" - simtypes "github.com/cosmos/cosmos-sdk/types/simulation" - "github.com/cosmos/cosmos-sdk/x/simulation" - "github.com/lavanet/lava/v4/testutil/sample" - subscriptionsimulation "github.com/lavanet/lava/v4/x/subscription/simulation" - "github.com/lavanet/lava/v4/x/subscription/types" -) - -// avoid unused import issue -var ( - _ = sample.AccAddress - _ = subscriptionsimulation.FindAccount - _ = sims.StakePerAccount - _ = simulation.MsgEntryKind - _ = baseapp.Paramspace -) - -const ( - opWeightMsgBuy = "op_weight_msg_buy" - // TODO: Determine the simulation weight value - defaultWeightMsgBuy int = 100 - - opWeightMsgAddProject = "op_weight_msg_add_project" - // TODO: Determine the simulation weight value - defaultWeightMsgAddProject int = 100 - - opWeightMsgDelProject = "op_weight_msg_del_project" - // TODO: Determine the simulation weight value - defaultWeightMsgDelProject int = 100 - - opWeightMsgAutoRenewal = "op_weight_msg_auto_renewal" - // TODO: Determine the simulation weight value - defaultWeightMsgAutoRenewal int = 100 - - // this line is used by starport scaffolding # simapp/module/const -) - -// GenerateGenesisState creates a randomized GenState of the module -func (AppModule) GenerateGenesisState(simState *module.SimulationState) { - accs := make([]string, len(simState.Accounts)) - for i, acc := range simState.Accounts { - accs[i] = acc.Address.String() - } - subscriptionGenesis := types.GenesisState{ - Params: types.DefaultParams(), - // this line is used by starport scaffolding # simapp/module/genesisState - } - simState.GenState[types.ModuleName] = simState.Cdc.MustMarshalJSON(&subscriptionGenesis) -} - -// ProposalContents doesn't return any content functions for governance proposals -func (AppModule) ProposalContents(_ module.SimulationState) []simtypes.WeightedProposalMsg { - return nil -} - -// RegisterStoreDecoder registers a decoder -func (am AppModule) RegisterStoreDecoder(_ sdk.StoreDecoderRegistry) {} - -// WeightedOperations returns the all the gov module operations with their respective weights. -func (am AppModule) WeightedOperations(simState module.SimulationState) []simtypes.WeightedOperation { - operations := make([]simtypes.WeightedOperation, 0) - - var weightMsgBuy int - simState.AppParams.GetOrGenerate(simState.Cdc, opWeightMsgBuy, &weightMsgBuy, nil, - func(_ *rand.Rand) { - weightMsgBuy = defaultWeightMsgBuy - }, - ) - operations = append(operations, simulation.NewWeightedOperation( - weightMsgBuy, - subscriptionsimulation.SimulateMsgBuy(am.accountKeeper, am.bankKeeper, am.keeper), - )) - - var weightMsgAddProject int - simState.AppParams.GetOrGenerate(simState.Cdc, opWeightMsgAddProject, &weightMsgAddProject, nil, - func(_ *rand.Rand) { - weightMsgAddProject = defaultWeightMsgAddProject - }, - ) - operations = append(operations, simulation.NewWeightedOperation( - weightMsgAddProject, - subscriptionsimulation.SimulateMsgAddProject(am.accountKeeper, am.bankKeeper, am.keeper), - )) - - var weightMsgDelProject int - simState.AppParams.GetOrGenerate(simState.Cdc, opWeightMsgDelProject, &weightMsgDelProject, nil, - func(_ *rand.Rand) { - weightMsgDelProject = defaultWeightMsgDelProject - }, - ) - operations = append(operations, simulation.NewWeightedOperation( - weightMsgDelProject, - subscriptionsimulation.SimulateMsgDelProject(am.accountKeeper, am.bankKeeper, am.keeper), - )) - - var weightMsgAutoRenewal int - simState.AppParams.GetOrGenerate(simState.Cdc, opWeightMsgAutoRenewal, &weightMsgAutoRenewal, nil, - func(_ *rand.Rand) { - weightMsgAutoRenewal = defaultWeightMsgAutoRenewal - }, - ) - operations = append(operations, simulation.NewWeightedOperation( - weightMsgAutoRenewal, - subscriptionsimulation.SimulateMsgAutoRenewal(am.accountKeeper, am.bankKeeper, am.keeper), - )) - - // this line is used by starport scaffolding # simapp/module/operation - - return operations -} diff --git a/x/subscription/simulation/add_project.go b/x/subscription/simulation/add_project.go deleted file mode 100644 index a82d928431..0000000000 --- a/x/subscription/simulation/add_project.go +++ /dev/null @@ -1,29 +0,0 @@ -package simulation - -import ( - "math/rand" - - "github.com/cosmos/cosmos-sdk/baseapp" - sdk "github.com/cosmos/cosmos-sdk/types" - simtypes "github.com/cosmos/cosmos-sdk/types/simulation" - "github.com/lavanet/lava/v4/x/subscription/keeper" - "github.com/lavanet/lava/v4/x/subscription/types" -) - -func SimulateMsgAddProject( - ak types.AccountKeeper, - bk types.BankKeeper, - k keeper.Keeper, -) simtypes.Operation { - return func(r *rand.Rand, app *baseapp.BaseApp, ctx sdk.Context, accs []simtypes.Account, chainID string, - ) (simtypes.OperationMsg, []simtypes.FutureOperation, error) { - simAccount, _ := simtypes.RandomAcc(r, accs) - msg := &types.MsgAddProject{ - Creator: simAccount.Address.String(), - } - - // TODO: Handling the AddProject simulation - - return simtypes.NoOpMsg(types.ModuleName, msg.Type(), "AddProject simulation not implemented"), nil, nil - } -} diff --git a/x/subscription/simulation/auto_renewal.go b/x/subscription/simulation/auto_renewal.go deleted file mode 100644 index 6fc8fde634..0000000000 --- a/x/subscription/simulation/auto_renewal.go +++ /dev/null @@ -1,29 +0,0 @@ -package simulation - -import ( - "math/rand" - - "github.com/cosmos/cosmos-sdk/baseapp" - sdk "github.com/cosmos/cosmos-sdk/types" - simtypes "github.com/cosmos/cosmos-sdk/types/simulation" - "github.com/lavanet/lava/v4/x/subscription/keeper" - "github.com/lavanet/lava/v4/x/subscription/types" -) - -func SimulateMsgAutoRenewal( - ak types.AccountKeeper, - bk types.BankKeeper, - k keeper.Keeper, -) simtypes.Operation { - return func(r *rand.Rand, app *baseapp.BaseApp, ctx sdk.Context, accs []simtypes.Account, chainID string, - ) (simtypes.OperationMsg, []simtypes.FutureOperation, error) { - simAccount, _ := simtypes.RandomAcc(r, accs) - msg := &types.MsgAutoRenewal{ - Creator: simAccount.Address.String(), - } - - // TODO: Handling the AutoRenewal simulation - - return simtypes.NoOpMsg(types.ModuleName, msg.Type(), "AutoRenewal simulation not implemented"), nil, nil - } -} diff --git a/x/subscription/simulation/buy.go b/x/subscription/simulation/buy.go deleted file mode 100644 index 9f3eb3b304..0000000000 --- a/x/subscription/simulation/buy.go +++ /dev/null @@ -1,29 +0,0 @@ -package simulation - -import ( - "math/rand" - - "github.com/cosmos/cosmos-sdk/baseapp" - sdk "github.com/cosmos/cosmos-sdk/types" - simtypes "github.com/cosmos/cosmos-sdk/types/simulation" - "github.com/lavanet/lava/v4/x/subscription/keeper" - "github.com/lavanet/lava/v4/x/subscription/types" -) - -func SimulateMsgBuy( - ak types.AccountKeeper, - bk types.BankKeeper, - k keeper.Keeper, -) simtypes.Operation { - return func(r *rand.Rand, app *baseapp.BaseApp, ctx sdk.Context, accs []simtypes.Account, chainID string, - ) (simtypes.OperationMsg, []simtypes.FutureOperation, error) { - simAccount, _ := simtypes.RandomAcc(r, accs) - msg := &types.MsgBuy{ - Creator: simAccount.Address.String(), - } - - // TODO: Handling the Buy simulation - - return simtypes.NoOpMsg(types.ModuleName, msg.Type(), "Buy simulation not implemented"), nil, nil - } -} diff --git a/x/subscription/simulation/del_project.go b/x/subscription/simulation/del_project.go deleted file mode 100644 index 574f434ed0..0000000000 --- a/x/subscription/simulation/del_project.go +++ /dev/null @@ -1,29 +0,0 @@ -package simulation - -import ( - "math/rand" - - "github.com/cosmos/cosmos-sdk/baseapp" - sdk "github.com/cosmos/cosmos-sdk/types" - simtypes "github.com/cosmos/cosmos-sdk/types/simulation" - "github.com/lavanet/lava/v4/x/subscription/keeper" - "github.com/lavanet/lava/v4/x/subscription/types" -) - -func SimulateMsgDelProject( - ak types.AccountKeeper, - bk types.BankKeeper, - k keeper.Keeper, -) simtypes.Operation { - return func(r *rand.Rand, app *baseapp.BaseApp, ctx sdk.Context, accs []simtypes.Account, chainID string, - ) (simtypes.OperationMsg, []simtypes.FutureOperation, error) { - simAccount, _ := simtypes.RandomAcc(r, accs) - msg := &types.MsgDelProject{ - Creator: simAccount.Address.String(), - } - - // TODO: Handling the DelProject simulation - - return simtypes.NoOpMsg(types.ModuleName, msg.Type(), "DelProject simulation not implemented"), nil, nil - } -} diff --git a/x/subscription/simulation/simap.go b/x/subscription/simulation/simap.go deleted file mode 100644 index 92c437c0d1..0000000000 --- a/x/subscription/simulation/simap.go +++ /dev/null @@ -1,15 +0,0 @@ -package simulation - -import ( - sdk "github.com/cosmos/cosmos-sdk/types" - simtypes "github.com/cosmos/cosmos-sdk/types/simulation" -) - -// FindAccount find a specific address from an account list -func FindAccount(accs []simtypes.Account, address string) (simtypes.Account, bool) { - creator, err := sdk.AccAddressFromBech32(address) - if err != nil { - panic(err) - } - return simtypes.FindAccount(accs, creator) -}